From mboxrd@z Thu Jan 1 00:00:00 1970 From: Bob Peterson Date: Thu, 19 Nov 2015 12:42:40 -0600 Subject: [Cluster-devel] [GFS2 PATCH 1/2] GFS2: Make gfs2_clear_inode() queue the final put In-Reply-To: <1447958561-2584-1-git-send-email-rpeterso@redhat.com> References: <1447958561-2584-1-git-send-email-rpeterso@redhat.com> Message-ID: <1447958561-2584-2-git-send-email-rpeterso@redhat.com> List-Id: To: cluster-devel.redhat.com MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit This patch changes function gfs2_clear_inode() so that instead of calling gfs2_glock_put directly() most of the time, it queues the glock to the delayed work queue. That avoids a possible deadlock where it calls dlm during a fence operation: dlm waits for a fence operation, the fence operation waits for memory, the shrinker waits for gfs2 to free an inode from memory, but gfs2 waits for dlm. Signed-off-by: Bob Peterson --- fs/gfs2/glock.c | 34 +++++++++++++++++----------------- fs/gfs2/glock.h | 1 + fs/gfs2/super.c | 5 ++++- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 68484ef..53fedbb 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -62,7 +62,7 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl); static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); static struct dentry *gfs2_root; -static struct workqueue_struct *glock_workqueue; +struct workqueue_struct *gfs2_glock_workqueue; struct workqueue_struct *gfs2_delete_workqueue; static LIST_HEAD(lru_list); static atomic_t lru_count = ATOMIC_INIT(0); @@ -481,7 +481,7 @@ __acquires(&gl->gl_lockref.lock) } } else { /* lock_nolock */ finish_xmote(gl, target); - if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) + if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0) gfs2_glock_put(gl); } @@ -554,7 +554,7 @@ out_sched: clear_bit(GLF_LOCK, &gl->gl_flags); smp_mb__after_atomic(); gl->gl_lockref.count++; - if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) + if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0) gl->gl_lockref.count--; return; @@ -618,7 +618,7 @@ static void glock_work_func(struct work_struct *work) else { if (gl->gl_name.ln_type != LM_TYPE_INODE) delay = 0; - if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) + if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, delay) == 0) gfs2_glock_put(gl); } if (drop_ref) @@ -973,7 +973,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh) test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { set_bit(GLF_REPLY_PENDING, &gl->gl_flags); gl->gl_lockref.count++; - if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) + if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0) gl->gl_lockref.count--; } run_queue(gl, 1); @@ -1042,7 +1042,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh) !test_bit(GLF_DEMOTE, &gl->gl_flags) && gl->gl_name.ln_type == LM_TYPE_INODE) delay = gl->gl_hold_time; - if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) + if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, delay) == 0) gfs2_glock_put(gl); } @@ -1220,7 +1220,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) spin_lock(&gl->gl_lockref.lock); handle_callback(gl, state, delay, true); spin_unlock(&gl->gl_lockref.lock); - if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) + if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, delay) == 0) gfs2_glock_put(gl); } @@ -1282,7 +1282,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) set_bit(GLF_REPLY_PENDING, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); - if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) + if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0) gfs2_glock_put(gl); } @@ -1341,7 +1341,7 @@ add_back_to_lru: if (demote_ok(gl)) handle_callback(gl, LM_ST_UNLOCKED, 0, false); WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); - if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) + if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0) gl->gl_lockref.count--; spin_unlock(&gl->gl_lockref.lock); cond_resched_lock(&lru_lock); @@ -1445,7 +1445,7 @@ static void thaw_glock(struct gfs2_glock *gl) if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) goto out; set_bit(GLF_REPLY_PENDING, &gl->gl_flags); - if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) { + if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0) { out: gfs2_glock_put(gl); } @@ -1465,7 +1465,7 @@ static void clear_glock(struct gfs2_glock *gl) if (gl->gl_state != LM_ST_UNLOCKED) handle_callback(gl, LM_ST_UNLOCKED, 0, false); spin_unlock(&gl->gl_lockref.lock); - if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) + if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0) gfs2_glock_put(gl); } @@ -1503,9 +1503,9 @@ static void dump_glock_func(struct gfs2_glock *gl) void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) { set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); - flush_workqueue(glock_workqueue); + flush_workqueue(gfs2_glock_workqueue); glock_hash_walk(clear_glock, sdp); - flush_workqueue(glock_workqueue); + flush_workqueue(gfs2_glock_workqueue); wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); glock_hash_walk(dump_glock_func, sdp); } @@ -1756,9 +1756,9 @@ int __init gfs2_glock_init(void) if (ret < 0) return ret; - glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | + gfs2_glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0); - if (!glock_workqueue) { + if (!gfs2_glock_workqueue) { rhashtable_destroy(&gl_hash_table); return -ENOMEM; } @@ -1766,7 +1766,7 @@ int __init gfs2_glock_init(void) WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); if (!gfs2_delete_workqueue) { - destroy_workqueue(glock_workqueue); + destroy_workqueue(gfs2_glock_workqueue); rhashtable_destroy(&gl_hash_table); return -ENOMEM; } @@ -1780,7 +1780,7 @@ void gfs2_glock_exit(void) { unregister_shrinker(&glock_shrinker); rhashtable_destroy(&gl_hash_table); - destroy_workqueue(glock_workqueue); + destroy_workqueue(gfs2_glock_workqueue); destroy_workqueue(gfs2_delete_workqueue); } diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 46ab67f..e2f80ca 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -134,6 +134,7 @@ struct lm_lockops { const match_table_t *lm_tokens; }; +extern struct workqueue_struct *gfs2_glock_workqueue; extern struct workqueue_struct *gfs2_delete_workqueue; static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) { diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 9d5c3f7..46e5004 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -1614,7 +1615,9 @@ out: ip->i_gl->gl_object = NULL; flush_delayed_work(&ip->i_gl->gl_work); gfs2_glock_add_to_lru(ip->i_gl); - gfs2_glock_put(ip->i_gl); + if (queue_delayed_work(gfs2_glock_workqueue, + &ip->i_gl->gl_work, 0) == 0) + gfs2_glock_put(ip->i_gl); ip->i_gl = NULL; if (ip->i_iopen_gh.gh_gl) { ip->i_iopen_gh.gh_gl->gl_object = NULL; -- 2.5.0