mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-16 22:51:32 +00:00
gfs2: Protect gl->gl_object by spin lock
Put all remaining accesses to gl->gl_object under the gl->gl_lockref.lock spinlock to prevent races. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> Signed-off-by: Bob Peterson <rpeterso@redhat.com>
This commit is contained in:
parent
4fd1a57952
commit
6f6597baae
@ -970,7 +970,7 @@ more_rgrps:
|
||||
continue;
|
||||
bn = be64_to_cpu(*p);
|
||||
if (gfs2_holder_initialized(rd_gh)) {
|
||||
rgd = (struct gfs2_rgrpd *)rd_gh->gh_gl->gl_object;
|
||||
rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
|
||||
gfs2_assert_withdraw(sdp,
|
||||
gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
|
||||
} else {
|
||||
|
@ -2032,8 +2032,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
|
||||
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
|
||||
|
||||
for (x = 0; x < rlist.rl_rgrps; x++) {
|
||||
struct gfs2_rgrpd *rgd;
|
||||
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
|
||||
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
|
||||
|
||||
rg_blocks += rgd->rd_length;
|
||||
}
|
||||
|
||||
|
@ -137,7 +137,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
|
||||
*
|
||||
* Called when demoting or unlocking an EX glock. We must flush
|
||||
* to disk all dirty buffers/pages relating to this glock, and must not
|
||||
* not return to caller to demote/unlock the glock until I/O is complete.
|
||||
* return to caller to demote/unlock the glock until I/O is complete.
|
||||
*/
|
||||
|
||||
static void rgrp_go_sync(struct gfs2_glock *gl)
|
||||
@ -184,7 +184,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
|
||||
{
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct address_space *mapping = &sdp->sd_aspace;
|
||||
struct gfs2_rgrpd *rgd = gl->gl_object;
|
||||
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
|
||||
|
||||
if (rgd)
|
||||
gfs2_rgrp_brelse(rgd);
|
||||
@ -209,6 +209,17 @@ static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
|
||||
return ip;
|
||||
}
|
||||
|
||||
struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
|
||||
{
|
||||
struct gfs2_rgrpd *rgd;
|
||||
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
rgd = gl->gl_object;
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
|
||||
return rgd;
|
||||
}
|
||||
|
||||
static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
|
||||
{
|
||||
if (!ip)
|
||||
@ -566,7 +577,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl)
|
||||
*/
|
||||
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
||||
{
|
||||
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
|
||||
if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
|
||||
|
@ -857,5 +857,7 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
|
||||
|
||||
#endif /* __INCORE_DOT_H__ */
|
||||
|
||||
|
@ -202,14 +202,14 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
|
||||
|
||||
fail_refresh:
|
||||
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
|
||||
ip->i_iopen_gh.gh_gl->gl_object = NULL;
|
||||
glock_set_object(ip->i_iopen_gh.gh_gl, NULL);
|
||||
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
|
||||
fail_put:
|
||||
if (io_gl)
|
||||
gfs2_glock_put(io_gl);
|
||||
if (gfs2_holder_initialized(&i_gh))
|
||||
gfs2_glock_dq_uninit(&i_gh);
|
||||
ip->i_gl->gl_object = NULL;
|
||||
glock_set_object(ip->i_gl, NULL);
|
||||
fail:
|
||||
iget_failed(inode);
|
||||
return ERR_PTR(error);
|
||||
@ -706,7 +706,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
||||
if (error)
|
||||
goto fail_free_inode;
|
||||
|
||||
ip->i_gl->gl_object = ip;
|
||||
glock_set_object(ip->i_gl, ip);
|
||||
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
|
||||
if (error)
|
||||
goto fail_free_inode;
|
||||
@ -732,7 +732,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
|
||||
if (error)
|
||||
goto fail_gunlock2;
|
||||
|
||||
ip->i_iopen_gh.gh_gl->gl_object = ip;
|
||||
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
|
||||
gfs2_glock_put(io_gl);
|
||||
gfs2_set_iop(inode);
|
||||
insert_inode_hash(inode);
|
||||
|
@ -71,7 +71,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd)
|
||||
{
|
||||
struct gfs2_glock *gl = bd->bd_gl;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct gfs2_rgrpd *rgd = gl->gl_object;
|
||||
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
|
||||
unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
|
||||
struct gfs2_bitmap *bi = rgd->rd_bits + index;
|
||||
|
||||
|
@ -705,9 +705,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
|
||||
rb_erase(n, &sdp->sd_rindex_tree);
|
||||
|
||||
if (gl) {
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
gl->gl_object = NULL;
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
glock_set_object(gl, NULL);
|
||||
gfs2_glock_add_to_lru(gl);
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
@ -917,7 +915,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
|
||||
error = rgd_insert(rgd);
|
||||
spin_unlock(&sdp->sd_rindex_spin);
|
||||
if (!error) {
|
||||
rgd->rd_gl->gl_object = rgd;
|
||||
glock_set_object(rgd->rd_gl, rgd);
|
||||
rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
|
||||
rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
|
||||
rgd->rd_length) * bsize) - 1;
|
||||
|
@ -1105,9 +1105,12 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
|
||||
gfs2_holder_uninit(gh);
|
||||
error = err;
|
||||
} else {
|
||||
if (!error)
|
||||
error = statfs_slow_fill(
|
||||
gh->gh_gl->gl_object, sc);
|
||||
if (!error) {
|
||||
struct gfs2_rgrpd *rgd =
|
||||
gfs2_glock2rgrp(gh->gh_gl);
|
||||
|
||||
error = statfs_slow_fill(rgd, sc);
|
||||
}
|
||||
gfs2_glock_dq_uninit(gh);
|
||||
}
|
||||
}
|
||||
@ -1637,7 +1640,7 @@ out:
|
||||
gfs2_glock_put(ip->i_gl);
|
||||
ip->i_gl = NULL;
|
||||
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
|
||||
ip->i_iopen_gh.gh_gl->gl_object = NULL;
|
||||
glock_set_object(ip->i_iopen_gh.gh_gl, NULL);
|
||||
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
|
||||
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
|
||||
}
|
||||
|
@ -1327,8 +1327,8 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
|
||||
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
|
||||
|
||||
for (x = 0; x < rlist.rl_rgrps; x++) {
|
||||
struct gfs2_rgrpd *rgd;
|
||||
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
|
||||
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
|
||||
|
||||
rg_blocks += rgd->rd_length;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user