From: Bob Peterson <rpeterso@redhat.com>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] [GFS2 PATCH 1/2] GFS2: Make gfs2_clear_inode() queue the final put
Date: Thu, 19 Nov 2015 12:42:40 -0600 [thread overview]
Message-ID: <1447958561-2584-2-git-send-email-rpeterso@redhat.com> (raw)
In-Reply-To: <1447958561-2584-1-git-send-email-rpeterso@redhat.com>
This patch changes function gfs2_clear_inode() so that instead
of calling gfs2_glock_put directly() most of the time, it queues
the glock to the delayed work queue. That avoids a possible
deadlock where it calls dlm during a fence operation:
dlm waits for a fence operation, the fence operation waits for
memory, the shrinker waits for gfs2 to free an inode from memory,
but gfs2 waits for dlm.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
---
fs/gfs2/glock.c | 34 +++++++++++++++++-----------------
fs/gfs2/glock.h | 1 +
fs/gfs2/super.c | 5 ++++-
3 files changed, 22 insertions(+), 18 deletions(-)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 68484ef..53fedbb 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -62,7 +62,7 @@ typedef void (*glock_examiner) (struct gfs2_glock * gl);
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
static struct dentry *gfs2_root;
-static struct workqueue_struct *glock_workqueue;
+struct workqueue_struct *gfs2_glock_workqueue;
struct workqueue_struct *gfs2_delete_workqueue;
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
@@ -481,7 +481,7 @@ __acquires(&gl->gl_lockref.lock)
}
} else { /* lock_nolock */
finish_xmote(gl, target);
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
}
@@ -554,7 +554,7 @@ out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_atomic();
gl->gl_lockref.count++;
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0)
gl->gl_lockref.count--;
return;
@@ -618,7 +618,7 @@ static void glock_work_func(struct work_struct *work)
else {
if (gl->gl_name.ln_type != LM_TYPE_INODE)
delay = 0;
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
+ if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
}
if (drop_ref)
@@ -973,7 +973,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gl->gl_lockref.count++;
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0)
gl->gl_lockref.count--;
}
run_queue(gl, 1);
@@ -1042,7 +1042,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
!test_bit(GLF_DEMOTE, &gl->gl_flags) &&
gl->gl_name.ln_type == LM_TYPE_INODE)
delay = gl->gl_hold_time;
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
+ if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
}
@@ -1220,7 +1220,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
spin_lock(&gl->gl_lockref.lock);
handle_callback(gl, state, delay, true);
spin_unlock(&gl->gl_lockref.lock);
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
+ if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
}
@@ -1282,7 +1282,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
}
@@ -1341,7 +1341,7 @@ add_back_to_lru:
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0)
gl->gl_lockref.count--;
spin_unlock(&gl->gl_lockref.lock);
cond_resched_lock(&lru_lock);
@@ -1445,7 +1445,7 @@ static void thaw_glock(struct gfs2_glock *gl)
if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
goto out;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
+ if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0) {
out:
gfs2_glock_put(gl);
}
@@ -1465,7 +1465,7 @@ static void clear_glock(struct gfs2_glock *gl)
if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
spin_unlock(&gl->gl_lockref.lock);
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ if (queue_delayed_work(gfs2_glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
}
@@ -1503,9 +1503,9 @@ static void dump_glock_func(struct gfs2_glock *gl)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
- flush_workqueue(glock_workqueue);
+ flush_workqueue(gfs2_glock_workqueue);
glock_hash_walk(clear_glock, sdp);
- flush_workqueue(glock_workqueue);
+ flush_workqueue(gfs2_glock_workqueue);
wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
glock_hash_walk(dump_glock_func, sdp);
}
@@ -1756,9 +1756,9 @@ int __init gfs2_glock_init(void)
if (ret < 0)
return ret;
- glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
+ gfs2_glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_FREEZABLE, 0);
- if (!glock_workqueue) {
+ if (!gfs2_glock_workqueue) {
rhashtable_destroy(&gl_hash_table);
return -ENOMEM;
}
@@ -1766,7 +1766,7 @@ int __init gfs2_glock_init(void)
WQ_MEM_RECLAIM | WQ_FREEZABLE,
0);
if (!gfs2_delete_workqueue) {
- destroy_workqueue(glock_workqueue);
+ destroy_workqueue(gfs2_glock_workqueue);
rhashtable_destroy(&gl_hash_table);
return -ENOMEM;
}
@@ -1780,7 +1780,7 @@ void gfs2_glock_exit(void)
{
unregister_shrinker(&glock_shrinker);
rhashtable_destroy(&gl_hash_table);
- destroy_workqueue(glock_workqueue);
+ destroy_workqueue(gfs2_glock_workqueue);
destroy_workqueue(gfs2_delete_workqueue);
}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 46ab67f..e2f80ca 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -134,6 +134,7 @@ struct lm_lockops {
const match_table_t *lm_tokens;
};
+extern struct workqueue_struct *gfs2_glock_workqueue;
extern struct workqueue_struct *gfs2_delete_workqueue;
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
{
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 9d5c3f7..46e5004 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -24,6 +24,7 @@
#include <linux/crc32.h>
#include <linux/time.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/kernel.h>
@@ -1614,7 +1615,9 @@ out:
ip->i_gl->gl_object = NULL;
flush_delayed_work(&ip->i_gl->gl_work);
gfs2_glock_add_to_lru(ip->i_gl);
- gfs2_glock_put(ip->i_gl);
+ if (queue_delayed_work(gfs2_glock_workqueue,
+ &ip->i_gl->gl_work, 0) == 0)
+ gfs2_glock_put(ip->i_gl);
ip->i_gl = NULL;
if (ip->i_iopen_gh.gh_gl) {
ip->i_iopen_gh.gh_gl->gl_object = NULL;
--
2.5.0
next prev parent reply other threads:[~2015-11-19 18:42 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-11-19 18:42 [Cluster-devel] [GFS2 PATCH 0/2] GFS2: Avoid inode shrinker-related deadlocks Bob Peterson
2015-11-19 18:42 ` Bob Peterson [this message]
2015-11-20 13:33 ` [Cluster-devel] [GFS2 PATCH 1/2] GFS2: Make gfs2_clear_inode() queue the final put Steven Whitehouse
2015-11-25 14:22 ` Bob Peterson
2015-11-25 14:26 ` Steven Whitehouse
2015-12-01 15:42 ` Bob Peterson
2015-12-02 10:23 ` Steven Whitehouse
2015-12-02 16:42 ` Bob Peterson
2015-12-02 17:41 ` Bob Peterson
2015-12-03 11:18 ` Steven Whitehouse
2015-12-04 14:51 ` Bob Peterson
2015-12-04 15:51 ` David Teigland
2015-12-04 17:38 ` Bob Peterson
2015-12-08 7:57 ` Dave Chinner
2015-12-08 9:03 ` Steven Whitehouse
2015-11-19 18:42 ` [Cluster-devel] [GFS2 PATCH 2/2] GFS2: Revert 35e478f Flush pending glock work when evicting an inode Bob Peterson
2015-11-20 13:47 ` Steven Whitehouse
2015-11-25 14:36 ` Bob Peterson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1447958561-2584-2-git-send-email-rpeterso@redhat.com \
--to=rpeterso@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).