* [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock
@ 2017-08-01 0:00 Andreas Gruenbacher
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 1/4] gfs2: gfs2_glock_get: Wait on freeing glocks Andreas Gruenbacher
` (4 more replies)
0 siblings, 5 replies; 7+ messages in thread
From: Andreas Gruenbacher @ 2017-08-01 0:00 UTC (permalink / raw)
To: cluster-devel.redhat.com
With the recent gl_object fixes and an additional reference counting bug
fixed in this patch queue, these four remaining shrinker deadlock
avoidance patches now seem ready for mainline.
As explained in the previous posting of this patch queue, when inodes
are evicted, GFS2 currently calls into DLM. Inode eviction can be
triggered by memory pressure, in the context of a random user-space
process. If DLM happens to block in the process in question (for
example, it that process is a fence agent), GFS2 and DLM will deadlock.
This patch queue stops GFS2 from calling into DLM on the inode evict
path under memory pressure. It does so by first decoupling destroying
inodes and putting their associated glocks, which is what ends up
calling into DLM. Second, when under memory pressure, it moves putting
glocks into work queue context where it cannot block DLM. Third, when
gfs2_drop_inode determines that an inode's link count has hit zero under
memory pressure, it puts that inode on the delete workqueue (and keeps
the inode in the icache) instead of causing gfs2_evict_inode to delete
the inode immediately. The delete workqueue will not be processed under
memory pressure, so deleting inodes from there is safe.
Thanks,
Andreas
Andreas Gruenbacher (4):
gfs2: gfs2_glock_get: Wait on freeing glocks
gfs2: Get rid of gfs2_set_nlink
gfs2: gfs2_evict_inode: Put glocks asynchronously
gfs2: Defer deleting inodes under memory pressure
fs/gfs2/glock.c | 135 +++++++++++++++++++++++++++++++++++++++++++++++---------
fs/gfs2/glock.h | 2 +
fs/gfs2/glops.c | 28 +-----------
fs/gfs2/super.c | 43 +++++++++++++++++-
4 files changed, 157 insertions(+), 51 deletions(-)
--
2.13.3
^ permalink raw reply [flat|nested] 7+ messages in thread
* [Cluster-devel] [PATCH v3 1/4] gfs2: gfs2_glock_get: Wait on freeing glocks
2017-08-01 0:00 [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock Andreas Gruenbacher
@ 2017-08-01 0:00 ` Andreas Gruenbacher
2017-08-03 12:04 ` Andrew Price
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 2/4] gfs2: Get rid of gfs2_set_nlink Andreas Gruenbacher
` (3 subsequent siblings)
4 siblings, 1 reply; 7+ messages in thread
From: Andreas Gruenbacher @ 2017-08-01 0:00 UTC (permalink / raw)
To: cluster-devel.redhat.com
Keep glocks in their hash table until they are freed instead of removing
them when their last reference is dropped. This allows to wait for any
previous instances of a glock to go away in gfs2_glock_get before
creating a new glocks.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/glock.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++----------
1 file changed, 104 insertions(+), 21 deletions(-)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 1029340fc8ba..5b1731198e70 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -15,6 +15,7 @@
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
+#include <linux/hash.h>
#include <linux/jhash.h>
#include <linux/kallsyms.h>
#include <linux/gfs2_ondisk.h>
@@ -80,6 +81,66 @@ static struct rhashtable_params ht_parms = {
static struct rhashtable gl_hash_table;
+#define GLOCK_WAIT_TABLE_BITS 12
+#define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
+static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
+
+struct wait_glock_queue {
+ struct lm_lockname *name;
+ wait_queue_entry_t wait;
+};
+
+static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
+ int sync, void *key)
+{
+ struct wait_glock_queue *wait_glock =
+ container_of(wait, struct wait_glock_queue, wait);
+ struct lm_lockname *wait_name = wait_glock->name;
+ struct lm_lockname *wake_name = key;
+
+ if (wake_name->ln_sbd != wait_name->ln_sbd ||
+ wake_name->ln_number != wait_name->ln_number ||
+ wake_name->ln_type != wait_name->ln_type)
+ return 0;
+ return autoremove_wake_function(wait, mode, sync, key);
+}
+
+static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
+{
+ u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
+
+ return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
+}
+
+static void prepare_to_wait_on_glock(wait_queue_head_t **wq,
+ struct wait_glock_queue *wait,
+ struct lm_lockname *name)
+{
+ wait->name = name;
+ init_wait(&wait->wait);
+ wait->wait.func = glock_wake_function;
+ *wq = glock_waitqueue(name);
+ prepare_to_wait(*wq, &wait->wait, TASK_UNINTERRUPTIBLE);
+}
+
+static void finish_wait_on_glock(wait_queue_head_t *wq,
+ struct wait_glock_queue *wait)
+{
+ finish_wait(wq, &wait->wait);
+}
+
+/**
+ * wake_up_glock - Wake up waiters on a glock
+ * @gl: the glock
+ */
+static void wake_up_glock(struct gfs2_glock *gl)
+{
+ wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
+
+ if (waitqueue_active(wq))
+ __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
+}
+
static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
@@ -96,6 +157,9 @@ void gfs2_glock_free(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
+ smp_mb();
+ wake_up_glock(gl);
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
@@ -194,7 +258,6 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
gfs2_glock_remove_from_lru(gl);
spin_unlock(&gl->gl_lockref.lock);
- rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
trace_gfs2_glock_put(gl);
@@ -679,6 +742,36 @@ static void glock_work_func(struct work_struct *work)
spin_unlock(&gl->gl_lockref.lock);
}
+static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
+ struct gfs2_glock *new)
+{
+ struct wait_glock_queue wait;
+ wait_queue_head_t *wq;
+ struct gfs2_glock *gl;
+
+again:
+ prepare_to_wait_on_glock(&wq, &wait, name);
+ rcu_read_lock();
+ if (new) {
+ gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
+ &new->gl_node, ht_parms);
+ if (IS_ERR(gl))
+ goto out;
+ } else {
+ gl = rhashtable_lookup_fast(&gl_hash_table,
+ name, ht_parms);
+ }
+ if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
+ rcu_read_unlock();
+ schedule();
+ goto again;
+ }
+out:
+ rcu_read_unlock();
+ finish_wait_on_glock(wq, &wait);
+ return gl;
+}
+
/**
* gfs2_glock_get() - Get a glock, or create one if one doesn't exist
* @sdp: The GFS2 superblock
@@ -705,15 +798,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
struct kmem_cache *cachep;
int ret = 0;
- rcu_read_lock();
- gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
- if (gl && !lockref_get_not_dead(&gl->gl_lockref))
- gl = NULL;
- rcu_read_unlock();
-
- *glp = gl;
- if (gl)
+ gl = find_insert_glock(&name, NULL);
+ if (gl) {
+ *glp = gl;
return 0;
+ }
if (!create)
return -ENOENT;
@@ -767,10 +856,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
mapping->writeback_index = 0;
}
-again:
- rcu_read_lock();
- tmp = rhashtable_lookup_get_insert_fast(&gl_hash_table, &gl->gl_node,
- ht_parms);
+ tmp = find_insert_glock(&name, gl);
if (!tmp) {
*glp = gl;
goto out;
@@ -779,13 +865,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
ret = PTR_ERR(tmp);
goto out_free;
}
- if (lockref_get_not_dead(&tmp->gl_lockref)) {
- *glp = tmp;
- goto out_free;
- }
- rcu_read_unlock();
- cond_resched();
- goto again;
+ *glp = tmp;
out_free:
kfree(gl->gl_lksb.sb_lvbptr);
@@ -1806,7 +1886,7 @@ static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
int __init gfs2_glock_init(void)
{
- int ret;
+ int i, ret;
ret = rhashtable_init(&gl_hash_table, &ht_parms);
if (ret < 0)
@@ -1835,6 +1915,9 @@ int __init gfs2_glock_init(void)
return ret;
}
+ for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
+ init_waitqueue_head(glock_wait_table + i);
+
return 0;
}
--
2.13.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Cluster-devel] [PATCH v3 2/4] gfs2: Get rid of gfs2_set_nlink
2017-08-01 0:00 [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock Andreas Gruenbacher
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 1/4] gfs2: gfs2_glock_get: Wait on freeing glocks Andreas Gruenbacher
@ 2017-08-01 0:00 ` Andreas Gruenbacher
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 3/4] gfs2: gfs2_evict_inode: Put glocks asynchronously Andreas Gruenbacher
` (2 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Andreas Gruenbacher @ 2017-08-01 0:00 UTC (permalink / raw)
To: cluster-devel.redhat.com
Remove gfs2_set_nlink which prevents the link count of an inode from
becoming non-zero once it has reached zero. The next commit reduces the
amount of waiting on glocks when an inode is evicted from memory. With
that, an inode can become reallocated before all the remote-unlink
callbacks from a previous delete are processed, which causes the link
count to change from zero to non-zero.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/glops.c | 28 +---------------------------
1 file changed, 1 insertion(+), 27 deletions(-)
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 28c203a02960..dac6559e2195 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -329,32 +329,6 @@ static int inode_go_demote_ok(const struct gfs2_glock *gl)
return 1;
}
-/**
- * gfs2_set_nlink - Set the inode's link count based on on-disk info
- * @inode: The inode in question
- * @nlink: The link count
- *
- * If the link count has hit zero, it must never be raised, whatever the
- * on-disk inode might say. When new struct inodes are created the link
- * count is set to 1, so that we can safely use this test even when reading
- * in on disk information for the first time.
- */
-
-static void gfs2_set_nlink(struct inode *inode, u32 nlink)
-{
- /*
- * We will need to review setting the nlink count here in the
- * light of the forthcoming ro bind mount work. This is a reminder
- * to do that.
- */
- if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
- if (nlink == 0)
- clear_nlink(inode);
- else
- set_nlink(inode, nlink);
- }
-}
-
static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
const struct gfs2_dinode *str = buf;
@@ -376,7 +350,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
- gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
+ set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
atime.tv_sec = be64_to_cpu(str->di_atime);
--
2.13.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Cluster-devel] [PATCH v3 3/4] gfs2: gfs2_evict_inode: Put glocks asynchronously
2017-08-01 0:00 [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock Andreas Gruenbacher
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 1/4] gfs2: gfs2_glock_get: Wait on freeing glocks Andreas Gruenbacher
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 2/4] gfs2: Get rid of gfs2_set_nlink Andreas Gruenbacher
@ 2017-08-01 0:00 ` Andreas Gruenbacher
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 4/4] gfs2: Defer deleting inodes under memory pressure Andreas Gruenbacher
2017-08-01 17:00 ` [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock Bob Peterson
4 siblings, 0 replies; 7+ messages in thread
From: Andreas Gruenbacher @ 2017-08-01 0:00 UTC (permalink / raw)
To: cluster-devel.redhat.com
gfs2_evict_inode is called to free inodes under memory pressure. The
function calls into DLM when an inode's last cluster-wide reference goes
away (remote unlink) and to release the glock and associated DLM lock
before finally destroying the inode. However, if DLM is blocked on
memory to become available, calling into DLM again will deadlock.
Avoid that by decoupling releasing glocks from destroying inodes in that
case: with gfs2_glock_queue_put, glocks will be dequeued asynchronously
in work queue context, when the associated inodes have likely already
been destroyed.
With this change, inodes can end up being unlinked, remote-unlink can be
triggered, and then the inode can be reallocated before all
remote-unlink callbacks are processed. To detect that, revalidate the
link count in gfs2_evict_inode to make sure we're not deleting an
allocated, referenced inode.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/glock.c | 10 +++++++++-
fs/gfs2/glock.h | 2 ++
fs/gfs2/super.c | 20 ++++++++++++++++++--
3 files changed, 29 insertions(+), 3 deletions(-)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 5b1731198e70..fcbe478480f7 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -171,7 +171,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
*
*/
-static void gfs2_glock_hold(struct gfs2_glock *gl)
+void gfs2_glock_hold(struct gfs2_glock *gl)
{
GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
lockref_get(&gl->gl_lockref);
@@ -264,6 +264,14 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
+/*
+ * Cause the glock to be put in work queue context.
+ */
+void gfs2_glock_queue_put(struct gfs2_glock *gl)
+{
+ gfs2_glock_queue_work(gl, 0);
+}
+
/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 526d2123f758..5e12220cc0c2 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -182,7 +182,9 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
+extern void gfs2_glock_hold(struct gfs2_glock *gl);
extern void gfs2_glock_put(struct gfs2_glock *gl);
+extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
u16 flags, struct gfs2_holder *gh);
extern void gfs2_holder_reinit(unsigned int state, u16 flags,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 1918bb5fc943..027abb11337b 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1564,6 +1564,12 @@ static void gfs2_evict_inode(struct inode *inode)
goto out_truncate;
}
+ /*
+ * The inode may have been recreated in the meantime.
+ */
+ if (inode->i_nlink)
+ goto out_truncate;
+
alloc_failed:
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
@@ -1653,12 +1659,22 @@ static void gfs2_evict_inode(struct inode *inode)
glock_clear_object(ip->i_gl, ip);
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
gfs2_glock_add_to_lru(ip->i_gl);
- gfs2_glock_put(ip->i_gl);
+ if (current->flags & PF_MEMALLOC)
+ gfs2_glock_queue_put(ip->i_gl);
+ else
+ gfs2_glock_put(ip->i_gl);
ip->i_gl = NULL;
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
+ struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
+
+ glock_clear_object(gl, ip);
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
+ gfs2_glock_hold(gl);
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+ if (current->flags & PF_MEMALLOC)
+ gfs2_glock_queue_put(gl);
+ else
+ gfs2_glock_put(gl);
}
}
--
2.13.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Cluster-devel] [PATCH v3 4/4] gfs2: Defer deleting inodes under memory pressure
2017-08-01 0:00 [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock Andreas Gruenbacher
` (2 preceding siblings ...)
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 3/4] gfs2: gfs2_evict_inode: Put glocks asynchronously Andreas Gruenbacher
@ 2017-08-01 0:00 ` Andreas Gruenbacher
2017-08-01 17:00 ` [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock Bob Peterson
4 siblings, 0 replies; 7+ messages in thread
From: Andreas Gruenbacher @ 2017-08-01 0:00 UTC (permalink / raw)
To: cluster-devel.redhat.com
When under memory pressure and an inode's link count has dropped to
zero, defer deleting the inode to the delete workqueue. This avoids
calling into DLM under memory pressure, which can deadlock.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/super.c | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 027abb11337b..958a2c5eec2a 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1318,6 +1318,25 @@ static int gfs2_drop_inode(struct inode *inode)
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
clear_nlink(inode);
}
+
+ /*
+ * When under memory pressure when an inode's link count has dropped to
+ * zero, defer deleting the inode to the delete workqueue. This avoids
+ * calling into DLM under memory pressure, which can deadlock.
+ */
+ if (!inode->i_nlink &&
+ unlikely(current->flags & PF_MEMALLOC) &&
+ gfs2_holder_initialized(&ip->i_iopen_gh)) {
+ struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
+
+ spin_lock(&gl->gl_lockref.lock);
+ gl->gl_lockref.count++;
+ if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+ gl->gl_lockref.count--;
+ spin_unlock(&gl->gl_lockref.lock);
+ return false;
+ }
+
return generic_drop_inode(inode);
}
@@ -1545,6 +1564,10 @@ static void gfs2_evict_inode(struct inode *inode)
goto alloc_failed;
}
+ /* Deletes should never happen under memory pressure anymore. */
+ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
+ goto out;
+
/* Must not read inode block until block type has been verified */
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
if (unlikely(error)) {
--
2.13.3
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock
2017-08-01 0:00 [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock Andreas Gruenbacher
` (3 preceding siblings ...)
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 4/4] gfs2: Defer deleting inodes under memory pressure Andreas Gruenbacher
@ 2017-08-01 17:00 ` Bob Peterson
4 siblings, 0 replies; 7+ messages in thread
From: Bob Peterson @ 2017-08-01 17:00 UTC (permalink / raw)
To: cluster-devel.redhat.com
----- Original Message -----
| With the recent gl_object fixes and an additional reference counting bug
| fixed in this patch queue, these four remaining shrinker deadlock
| avoidance patches now seem ready for mainline.
|
| As explained in the previous posting of this patch queue, when inodes
| are evicted, GFS2 currently calls into DLM. Inode eviction can be
| triggered by memory pressure, in the context of a random user-space
| process. If DLM happens to block in the process in question (for
| example, it that process is a fence agent), GFS2 and DLM will deadlock.
|
| This patch queue stops GFS2 from calling into DLM on the inode evict
| path under memory pressure. It does so by first decoupling destroying
| inodes and putting their associated glocks, which is what ends up
| calling into DLM. Second, when under memory pressure, it moves putting
| glocks into work queue context where it cannot block DLM. Third, when
| gfs2_drop_inode determines that an inode's link count has hit zero under
| memory pressure, it puts that inode on the delete workqueue (and keeps
| the inode in the icache) instead of causing gfs2_evict_inode to delete
| the inode immediately. The delete workqueue will not be processed under
| memory pressure, so deleting inodes from there is safe.
|
| Thanks,
| Andreas
|
| Andreas Gruenbacher (4):
| gfs2: gfs2_glock_get: Wait on freeing glocks
| gfs2: Get rid of gfs2_set_nlink
| gfs2: gfs2_evict_inode: Put glocks asynchronously
| gfs2: Defer deleting inodes under memory pressure
|
| fs/gfs2/glock.c | 135
| +++++++++++++++++++++++++++++++++++++++++++++++---------
| fs/gfs2/glock.h | 2 +
| fs/gfs2/glops.c | 28 +-----------
| fs/gfs2/super.c | 43 +++++++++++++++++-
| 4 files changed, 157 insertions(+), 51 deletions(-)
|
| --
| 2.13.3
|
|
Hi,
These all look good. This was a major problem to get fixed.
Thanks for all your effort. All four patches are now pushed to the
for-next branch of the linux-gfs2 tree:
https://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git/commit/fs/gfs2?h=for-next&id=56a365beda9ef5121eab1d8c5dfe8742b4e69d48
https://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git/commit/fs/gfs2?h=for-next&id=325c8fe97257c68c90c68cc6bde61e9825de3361
https://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git/commit/fs/gfs2?h=for-next&id=6e036cbbd6909b8c8e53cd399051c699379e4818
https://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git/commit/fs/gfs2?h=for-next&id=0d0e409c22b6b53e1ed1a57b1551e144e0afae79
Regards,
Bob Peterson
Red Hat File Systems
^ permalink raw reply [flat|nested] 7+ messages in thread
* [Cluster-devel] [PATCH v3 1/4] gfs2: gfs2_glock_get: Wait on freeing glocks
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 1/4] gfs2: gfs2_glock_get: Wait on freeing glocks Andreas Gruenbacher
@ 2017-08-03 12:04 ` Andrew Price
0 siblings, 0 replies; 7+ messages in thread
From: Andrew Price @ 2017-08-03 12:04 UTC (permalink / raw)
To: cluster-devel.redhat.com
On Tue, Aug 01, 2017 at 02:00:19AM +0200, Andreas Gruenbacher wrote:
> Keep glocks in their hash table until they are freed instead of removing
> them when their last reference is dropped. This allows to wait for any
> previous instances of a glock to go away in gfs2_glock_get before
> creating a new glocks.
I hit a panic on mount with the for-next tree on top of the latest upstream and it bisected down to this patch. Trace is below.
Andy
[ 6503.415480] gfs2: fsid=rawhide:gfs2-1: Trying to join cluster "lock_dlm", "rawhide:gfs2-1"
[ 6503.418121] dlm: gfs2-1: joining the lockspace group...
[ 6503.428351] dlm: gfs2-1: group event done 0 0
[ 6503.429026] dlm: gfs2-1: dlm_recover 1
[ 6503.429629] dlm: gfs2-1: add member 1
[ 6503.430100] dlm: gfs2-1: dlm_recover_members 1 nodes
[ 6503.430524] dlm: gfs2-1: join complete
[ 6503.431343] dlm: gfs2-1: generation 1 slots 1 1:1
[ 6503.432073] dlm: gfs2-1: dlm_recover_directory
[ 6503.432732] dlm: gfs2-1: dlm_recover_directory 0 in 0 new
[ 6503.433566] dlm: gfs2-1: dlm_recover_directory 0 out 0 messages
[ 6503.434527] dlm: gfs2-1: dlm_recover 1 generation 1 done: 2 ms
[ 6503.443359] dlm: gfs2-1: dlm_recover 3
[ 6503.444444] dlm: gfs2-1: add member 3
[ 6503.446317] dlm: gfs2-1: add member 2
[ 6503.454397] dlm: gfs2-1: dlm_recover_members 3 nodes
[ 6503.459230] dlm: gfs2-1: generation 2 slots 3 1:1 2:2 3:3
[ 6503.460140] dlm: gfs2-1: dlm_recover_directory
[ 6503.461343] dlm: gfs2-1: dlm_recover_directory 0 in 0 new
[ 6503.529253] dlm: gfs2-1: dlm_recover_directory 2 out 2 messages
[ 6503.579899] dlm: gfs2-1: dlm_recover 3 generation 2 done: 120 ms
[ 6503.940007] gfs2: fsid=rawhide:gfs2-1: first mounter control generation 0
[ 6503.943329] gfs2: fsid=rawhide:gfs2-1: Joined cluster. Now mounting FS...
[ 6503.945791] BUG: sleeping function called from invalid context at kernel/locking/rwsem.c:22
[ 6503.949186] in_atomic(): 1, irqs_disabled(): 0, pid: 9120, name: mount
[ 6503.952380] INFO: lockdep is turned off.
[ 6503.953627] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6503.956543] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6503.959438] Call Trace:
[ 6503.960276] dump_stack+0x8e/0xcd
[ 6503.961411] ___might_sleep+0x164/0x250
[ 6503.962767] __might_sleep+0x4a/0x80
[ 6503.964089] down_read+0x20/0x70
[ 6503.965237] dlm_lock+0x80/0x1e0
[ 6503.966385] ? gdlm_recovery_result+0x120/0x120
[ 6503.967859] ? gdlm_cancel+0x30/0x30
[ 6503.969130] gdlm_lock+0x1e4/0x320
[ 6503.970316] ? gdlm_cancel+0x30/0x30
[ 6503.971737] ? gdlm_recovery_result+0x120/0x120
[ 6503.973271] do_xmote+0x104/0x1e0
[ 6503.974457] run_queue+0xfd/0x180
[ 6503.975731] gfs2_glock_nq+0x233/0x510
[ 6503.976983] gfs2_glock_nq_num+0x71/0xc0
[ 6503.978230] fill_super+0x770/0xcf0
[ 6503.979323] ? gfs2_glock_nq_num+0x69/0xc0
[ 6503.980681] ? snprintf+0x45/0x70
[ 6503.981821] gfs2_mount+0x248/0x290
[ 6503.983083] ? gfs2_mount+0x248/0x290
[ 6503.984278] mount_fs+0x14/0x80
[ 6503.985276] vfs_kern_mount.part.31+0x5d/0x160
[ 6503.986672] do_mount+0x1fc/0xd50
[ 6503.987720] ? refcount_dec_and_lock+0x31/0x50
[ 6503.989119] ? memdup_user+0x4f/0x80
[ 6503.990404] SyS_mount+0x98/0xe0
[ 6503.991608] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6503.993313] RIP: 0033:0x7f7a9583bcea
[ 6503.994573] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6503.997075] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6503.999288] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.001440] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.003660] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.005658] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.007943] BUG: scheduling while atomic: mount/9120/0x00000000
[ 6504.010957] INFO: lockdep is turned off.
[ 6504.012026] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.014484] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.018037] Call Trace:
[ 6504.019030] dump_stack+0x8e/0xcd
[ 6504.020628] __schedule_bug+0x61/0x90
[ 6504.021865] __schedule+0x685/0xa60
[ 6504.023114] ? trace_hardirqs_on+0xd/0x10
[ 6504.024575] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.026603] schedule+0x3d/0x90
[ 6504.027909] bit_wait+0x11/0x60
[ 6504.029197] __wait_on_bit+0x31/0x90
[ 6504.030701] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.032489] ? woken_wake_function+0x20/0x20
[ 6504.034242] gfs2_glock_wait+0x6b/0xb0
[ 6504.035782] gfs2_glock_nq+0x24e/0x510
[ 6504.037312] gfs2_glock_nq_num+0x71/0xc0
[ 6504.038929] fill_super+0x770/0xcf0
[ 6504.040225] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.041321] ? snprintf+0x45/0x70
[ 6504.042222] gfs2_mount+0x248/0x290
[ 6504.043210] ? gfs2_mount+0x248/0x290
[ 6504.044258] mount_fs+0x14/0x80
[ 6504.045176] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.046430] do_mount+0x1fc/0xd50
[ 6504.047148] ? refcount_dec_and_lock+0x31/0x50
[ 6504.048147] ? memdup_user+0x4f/0x80
[ 6504.049037] SyS_mount+0x98/0xe0
[ 6504.049640] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.050509] RIP: 0033:0x7f7a9583bcea
[ 6504.051218] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.052618] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.053908] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.055214] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.056484] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.057774] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.058855] BUG: scheduling while atomic: mount/9120/0x00000000
[ 6504.059924] INFO: lockdep is turned off.
[ 6504.060957] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.062323] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.063416] Call Trace:
[ 6504.063738] dump_stack+0x8e/0xcd
[ 6504.064160] __schedule_bug+0x61/0x90
[ 6504.064642] __schedule+0x685/0xa60
[ 6504.065130] ? trace_hardirqs_on+0xd/0x10
[ 6504.065678] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.066473] schedule+0x3d/0x90
[ 6504.066969] bit_wait+0x11/0x60
[ 6504.067359] __wait_on_bit+0x31/0x90
[ 6504.067816] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.068367] ? woken_wake_function+0x20/0x20
[ 6504.068924] gfs2_glock_wait+0x6b/0xb0
[ 6504.069400] gfs2_glock_nq+0x24e/0x510
[ 6504.069891] gfs2_glock_nq_num+0x71/0xc0
[ 6504.070394] fill_super+0x7ab/0xcf0
[ 6504.070853] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.071386] ? snprintf+0x45/0x70
[ 6504.071803] gfs2_mount+0x248/0x290
[ 6504.072234] ? gfs2_mount+0x248/0x290
[ 6504.072695] mount_fs+0x14/0x80
[ 6504.073077] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.073653] do_mount+0x1fc/0xd50
[ 6504.074046] ? refcount_dec_and_lock+0x31/0x50
[ 6504.074575] ? memdup_user+0x4f/0x80
[ 6504.075122] SyS_mount+0x98/0xe0
[ 6504.075625] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.076319] RIP: 0033:0x7f7a9583bcea
[ 6504.076897] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.077997] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.078839] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.079678] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.080548] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.081395] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.082262] BUG: scheduling while atomic: mount/9120/0x7ffffffe
[ 6504.083012] INFO: lockdep is turned off.
[ 6504.083513] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.084728] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.085892] Call Trace:
[ 6504.086268] dump_stack+0x8e/0xcd
[ 6504.086714] __schedule_bug+0x61/0x90
[ 6504.087191] __schedule+0x685/0xa60
[ 6504.087647] ? trace_hardirqs_on+0xd/0x10
[ 6504.088194] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.088891] schedule+0x3d/0x90
[ 6504.089299] bit_wait+0x11/0x60
[ 6504.089730] __wait_on_bit+0x31/0x90
[ 6504.090209] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.090776] ? woken_wake_function+0x20/0x20
[ 6504.091333] gfs2_glock_wait+0x6b/0xb0
[ 6504.091831] gfs2_glock_nq+0x24e/0x510
[ 6504.092316] gfs2_glock_nq_num+0x71/0xc0
[ 6504.092846] init_sb+0x57/0x4a0
[ 6504.093261] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.093800] fill_super+0x81a/0xcf0
[ 6504.094246] ? fill_super+0x81a/0xcf0
[ 6504.094662] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.095157] ? snprintf+0x45/0x70
[ 6504.095547] gfs2_mount+0x248/0x290
[ 6504.095970] ? gfs2_mount+0x248/0x290
[ 6504.096394] mount_fs+0x14/0x80
[ 6504.096779] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.097290] do_mount+0x1fc/0xd50
[ 6504.097673] ? refcount_dec_and_lock+0x31/0x50
[ 6504.098193] ? memdup_user+0x4f/0x80
[ 6504.098617] SyS_mount+0x98/0xe0
[ 6504.098984] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.099516] RIP: 0033:0x7f7a9583bcea
[ 6504.099937] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.100791] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.101605] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.102427] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.103287] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.104218] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.105240] NOHZ: local_softirq_pending 202
[ 6504.111547] BUG: scheduling while atomic: mount/9120/0x7fffffff
[ 6504.112466] INFO: lockdep is turned off.
[ 6504.113024] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.114476] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.116260] Call Trace:
[ 6504.116782] dump_stack+0x8e/0xcd
[ 6504.117468] __schedule_bug+0x61/0x90
[ 6504.118219] __schedule+0x685/0xa60
[ 6504.118952] ? trace_hardirqs_on+0xd/0x10
[ 6504.119568] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.120382] schedule+0x3d/0x90
[ 6504.120800] bit_wait+0x11/0x60
[ 6504.121226] __wait_on_bit+0x31/0x90
[ 6504.121696] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.122301] ? woken_wake_function+0x20/0x20
[ 6504.122843] gfs2_glock_wait+0x6b/0xb0
[ 6504.123328] gfs2_glock_nq+0x24e/0x510
[ 6504.123827] gfs2_inode_lookup+0x1e2/0x420
[ 6504.124355] gfs2_lookup_root+0x2c/0xa0
[ 6504.124832] ? gfs2_lookup_root+0x2c/0xa0
[ 6504.125330] init_sb+0x343/0x4a0
[ 6504.125779] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.126279] fill_super+0x81a/0xcf0
[ 6504.126722] ? fill_super+0x81a/0xcf0
[ 6504.127174] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.127666] ? snprintf+0x45/0x70
[ 6504.128073] gfs2_mount+0x248/0x290
[ 6504.128515] ? gfs2_mount+0x248/0x290
[ 6504.128977] mount_fs+0x14/0x80
[ 6504.129373] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.129945] do_mount+0x1fc/0xd50
[ 6504.130399] ? refcount_dec_and_lock+0x31/0x50
[ 6504.130994] ? memdup_user+0x4f/0x80
[ 6504.131480] SyS_mount+0x98/0xe0
[ 6504.131894] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.132436] RIP: 0033:0x7f7a9583bcea
[ 6504.132869] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.133761] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.134587] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.135481] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.136332] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.137162] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.137989] BUG: scheduling while atomic: mount/9120/0x7fffffff
[ 6504.138732] INFO: lockdep is turned off.
[ 6504.139372] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.140800] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.142037] Call Trace:
[ 6504.142377] dump_stack+0x8e/0xcd
[ 6504.142834] __schedule_bug+0x61/0x90
[ 6504.143330] __schedule+0x685/0xa60
[ 6504.143803] ? trace_hardirqs_on+0xd/0x10
[ 6504.144328] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.145020] schedule+0x3d/0x90
[ 6504.145438] bit_wait+0x11/0x60
[ 6504.145862] __wait_on_bit+0x31/0x90
[ 6504.146337] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.146929] ? woken_wake_function+0x20/0x20
[ 6504.147485] gfs2_glock_wait+0x6b/0xb0
[ 6504.147987] gfs2_glock_nq+0x24e/0x510
[ 6504.148484] gfs2_inode_lookup+0x1e2/0x420
[ 6504.149002] ? _raw_spin_unlock+0x27/0x40
[ 6504.149513] gfs2_lookup_root+0x2c/0xa0
[ 6504.150002] ? gfs2_lookup_root+0x2c/0xa0
[ 6504.150558] init_sb+0x36b/0x4a0
[ 6504.151019] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.151590] fill_super+0x81a/0xcf0
[ 6504.152083] ? fill_super+0x81a/0xcf0
[ 6504.152585] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.153161] ? snprintf+0x45/0x70
[ 6504.153630] gfs2_mount+0x248/0x290
[ 6504.154133] ? gfs2_mount+0x248/0x290
[ 6504.154644] mount_fs+0x14/0x80
[ 6504.155092] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.155714] do_mount+0x1fc/0xd50
[ 6504.156181] ? refcount_dec_and_lock+0x31/0x50
[ 6504.156801] ? memdup_user+0x4f/0x80
[ 6504.157329] SyS_mount+0x98/0xe0
[ 6504.157811] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.158487] RIP: 0033:0x7f7a9583bcea
[ 6504.159011] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.160100] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.161057] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.162409] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.163210] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.164006] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.164865] NOHZ: local_softirq_pending 02
[ 6504.165665] BUG: scheduling while atomic: mount/9120/0x7fffffff
[ 6504.166593] INFO: lockdep is turned off.
[ 6504.167187] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.168490] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.169690] Call Trace:
[ 6504.170019] dump_stack+0x8e/0xcd
[ 6504.170465] __schedule_bug+0x61/0x90
[ 6504.170960] __schedule+0x685/0xa60
[ 6504.171418] ? trace_hardirqs_on+0xd/0x10
[ 6504.172188] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.173030] schedule+0x3d/0x90
[ 6504.173473] bit_wait+0x11/0x60
[ 6504.174031] __wait_on_bit+0x31/0x90
[ 6504.174586] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.175324] ? woken_wake_function+0x20/0x20
[ 6504.176003] gfs2_glock_wait+0x6b/0xb0
[ 6504.176494] gfs2_glock_nq+0x24e/0x510
[ 6504.177003] gfs2_inode_lookup+0x1e2/0x420
[ 6504.177556] ? do_filldir_main.isra.22+0x170/0x170
[ 6504.178244] gfs2_dir_search+0xc1/0xf0
[ 6504.178740] ? gfs2_dir_search+0xc1/0xf0
[ 6504.179255] gfs2_lookupi+0x182/0x220
[ 6504.179755] ? gfs2_lookupi+0xf7/0x220
[ 6504.180233] gfs2_lookup_simple+0x5a/0x90
[ 6504.180764] ? gfs2_lookup_simple+0x5a/0x90
[ 6504.181335] init_inodes+0x63/0xa70
[ 6504.181816] ? vsnprintf+0x1ac/0x4d0
[ 6504.182299] ? snprintf+0x45/0x70
[ 6504.182776] fill_super+0x896/0xcf0
[ 6504.183264] ? fill_super+0x896/0xcf0
[ 6504.183771] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.184331] ? snprintf+0x45/0x70
[ 6504.184768] gfs2_mount+0x248/0x290
[ 6504.185217] ? gfs2_mount+0x248/0x290
[ 6504.185672] mount_fs+0x14/0x80
[ 6504.186075] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.186606] do_mount+0x1fc/0xd50
[ 6504.187026] ? refcount_dec_and_lock+0x31/0x50
[ 6504.187592] ? memdup_user+0x4f/0x80
[ 6504.188041] SyS_mount+0x98/0xe0
[ 6504.188453] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.189041] RIP: 0033:0x7f7a9583bcea
[ 6504.189488] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.190492] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.191391] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.192332] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.193217] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.194103] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.195027] NOHZ: local_softirq_pending 282
[ 6504.195882] BUG: scheduling while atomic: mount/9120/0x7fffffff
[ 6504.196781] INFO: lockdep is turned off.
[ 6504.197347] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.198729] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.199989] Call Trace:
[ 6504.200349] dump_stack+0x8e/0xcd
[ 6504.200825] __schedule_bug+0x61/0x90
[ 6504.201338] __schedule+0x685/0xa60
[ 6504.201832] ? trace_hardirqs_on+0xd/0x10
[ 6504.202446] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.203147] schedule+0x3d/0x90
[ 6504.203630] bit_wait+0x11/0x60
[ 6504.204125] __wait_on_bit+0x31/0x90
[ 6504.204602] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.205308] ? woken_wake_function+0x20/0x20
[ 6504.205992] gfs2_glock_wait+0x6b/0xb0
[ 6504.206542] gfs2_glock_nq+0x24e/0x510
[ 6504.207079] gfs2_inode_lookup+0x1e2/0x420
[ 6504.207615] ? do_filldir_main.isra.22+0x170/0x170
[ 6504.208258] gfs2_dir_search+0xc1/0xf0
[ 6504.208736] ? gfs2_dir_search+0xc1/0xf0
[ 6504.209308] gfs2_lookupi+0x182/0x220
[ 6504.209857] ? gfs2_lookupi+0xf7/0x220
[ 6504.210406] init_inodes+0x1ee/0xa70
[ 6504.210926] ? init_inodes+0x1ee/0xa70
[ 6504.211472] fill_super+0x896/0xcf0
[ 6504.211968] ? fill_super+0x896/0xcf0
[ 6504.212477] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.213045] ? snprintf+0x45/0x70
[ 6504.213510] gfs2_mount+0x248/0x290
[ 6504.214005] ? gfs2_mount+0x248/0x290
[ 6504.214568] mount_fs+0x14/0x80
[ 6504.215043] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.215712] do_mount+0x1fc/0xd50
[ 6504.216213] ? refcount_dec_and_lock+0x31/0x50
[ 6504.216893] ? memdup_user+0x4f/0x80
[ 6504.217440] SyS_mount+0x98/0xe0
[ 6504.217935] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.218634] RIP: 0033:0x7f7a9583bcea
[ 6504.219178] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.220311] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.221321] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.222343] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.223357] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.224378] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.225451] BUG: scheduling while atomic: mount/9120/0x7fffffff
[ 6504.226241] INFO: lockdep is turned off.
[ 6504.226845] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.228186] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.229473] Call Trace:
[ 6504.229858] dump_stack+0x8e/0xcd
[ 6504.230305] __schedule_bug+0x61/0x90
[ 6504.230820] __schedule+0x685/0xa60
[ 6504.231321] ? trace_hardirqs_on+0xd/0x10
[ 6504.231866] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.232561] schedule+0x3d/0x90
[ 6504.232993] bit_wait+0x11/0x60
[ 6504.233458] __wait_on_bit+0x31/0x90
[ 6504.233895] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.234285] ? woken_wake_function+0x20/0x20
[ 6504.234645] gfs2_glock_wait+0x6b/0xb0
[ 6504.235127] gfs2_glock_nq+0x24e/0x510
[ 6504.235659] gfs2_inode_lookup+0x1e2/0x420
[ 6504.236266] ? do_filldir_main.isra.22+0x170/0x170
[ 6504.236996] gfs2_dir_search+0xc1/0xf0
[ 6504.237537] ? gfs2_dir_search+0xc1/0xf0
[ 6504.238089] gfs2_lookupi+0x182/0x220
[ 6504.238638] ? trace_hardirqs_on+0xd/0x10
[ 6504.239245] ? gfs2_lookupi+0xf7/0x220
[ 6504.239767] init_inodes+0x1ee/0xa70
[ 6504.240224] ? init_inodes+0x1ee/0xa70
[ 6504.240712] fill_super+0x896/0xcf0
[ 6504.241179] ? fill_super+0x896/0xcf0
[ 6504.241673] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.242225] ? snprintf+0x45/0x70
[ 6504.242681] gfs2_mount+0x248/0x290
[ 6504.243183] ? gfs2_mount+0x248/0x290
[ 6504.243726] mount_fs+0x14/0x80
[ 6504.244183] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.244815] do_mount+0x1fc/0xd50
[ 6504.245290] ? refcount_dec_and_lock+0x31/0x50
[ 6504.245924] ? memdup_user+0x4f/0x80
[ 6504.246413] SyS_mount+0x98/0xe0
[ 6504.246885] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.247542] RIP: 0033:0x7f7a9583bcea
[ 6504.248062] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.249102] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.250114] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.251074] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.252043] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.253005] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.254061] BUG: scheduling while atomic: mount/9120/0x7fffffff
[ 6504.254759] INFO: lockdep is turned off.
[ 6504.255258] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.256447] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.257649] Call Trace:
[ 6504.257997] dump_stack+0x8e/0xcd
[ 6504.258453] __schedule_bug+0x61/0x90
[ 6504.258969] __schedule+0x685/0xa60
[ 6504.259496] ? trace_hardirqs_on+0xd/0x10
[ 6504.260078] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.260827] schedule+0x3d/0x90
[ 6504.261290] bit_wait+0x11/0x60
[ 6504.261740] __wait_on_bit+0x31/0x90
[ 6504.262226] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.262817] ? woken_wake_function+0x20/0x20
[ 6504.263410] gfs2_glock_wait+0x6b/0xb0
[ 6504.263943] gfs2_glock_nq+0x24e/0x510
[ 6504.264466] gfs2_inode_lookup+0x1e2/0x420
[ 6504.265037] ? do_filldir_main.isra.22+0x170/0x170
[ 6504.265758] gfs2_dir_search+0xc1/0xf0
[ 6504.266283] ? gfs2_dir_search+0xc1/0xf0
[ 6504.266818] gfs2_lookupi+0x182/0x220
[ 6504.267323] ? trace_hardirqs_on+0xd/0x10
[ 6504.267850] ? gfs2_lookupi+0xf7/0x220
[ 6504.268399] init_inodes+0x1ee/0xa70
[ 6504.268943] ? init_inodes+0x1ee/0xa70
[ 6504.269492] fill_super+0x896/0xcf0
[ 6504.270030] ? fill_super+0x896/0xcf0
[ 6504.270540] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.271121] ? snprintf+0x45/0x70
[ 6504.271561] gfs2_mount+0x248/0x290
[ 6504.272063] ? gfs2_mount+0x248/0x290
[ 6504.272547] mount_fs+0x14/0x80
[ 6504.272976] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.273560] do_mount+0x1fc/0xd50
[ 6504.274002] ? refcount_dec_and_lock+0x31/0x50
[ 6504.274581] ? memdup_user+0x4f/0x80
[ 6504.275066] SyS_mount+0x98/0xe0
[ 6504.275501] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.276141] RIP: 0033:0x7f7a9583bcea
[ 6504.276638] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.277654] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.278660] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.279665] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.280636] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.281580] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.282598] BUG: scheduling while atomic: mount/9120/0x7fffffff
[ 6504.283332] INFO: lockdep is turned off.
[ 6504.283794] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.284829] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.285790] Call Trace:
[ 6504.286064] dump_stack+0x8e/0xcd
[ 6504.286429] __schedule_bug+0x61/0x90
[ 6504.286845] __schedule+0x685/0xa60
[ 6504.287227] ? trace_hardirqs_on+0xd/0x10
[ 6504.287658] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.288353] schedule+0x3d/0x90
[ 6504.288749] bit_wait+0x11/0x60
[ 6504.289177] __wait_on_bit+0x31/0x90
[ 6504.289675] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.290304] ? woken_wake_function+0x20/0x20
[ 6504.290912] gfs2_glock_wait+0x6b/0xb0
[ 6504.291442] gfs2_glock_nq+0x24e/0x510
[ 6504.291972] gfs2_inode_lookup+0x1e2/0x420
[ 6504.292480] ? do_filldir_main.isra.22+0x170/0x170
[ 6504.293050] gfs2_dir_search+0xc1/0xf0
[ 6504.293586] ? gfs2_dir_search+0xc1/0xf0
[ 6504.294161] gfs2_lookupi+0x182/0x220
[ 6504.294680] ? trace_hardirqs_on+0xd/0x10
[ 6504.295232] ? gfs2_lookupi+0xf7/0x220
[ 6504.295748] init_inodes+0x1ee/0xa70
[ 6504.296246] ? init_inodes+0x1ee/0xa70
[ 6504.296746] fill_super+0x896/0xcf0
[ 6504.297196] ? fill_super+0x896/0xcf0
[ 6504.297688] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.298230] ? snprintf+0x45/0x70
[ 6504.298680] gfs2_mount+0x248/0x290
[ 6504.299078] ? gfs2_mount+0x248/0x290
[ 6504.299447] mount_fs+0x14/0x80
[ 6504.299926] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.300575] do_mount+0x1fc/0xd50
[ 6504.301075] ? refcount_dec_and_lock+0x31/0x50
[ 6504.301709] ? memdup_user+0x4f/0x80
[ 6504.302253] SyS_mount+0x98/0xe0
[ 6504.302781] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.303367] RIP: 0033:0x7f7a9583bcea
[ 6504.303807] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.304730] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.305625] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.306525] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.307470] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.308419] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.309428] BUG: scheduling while atomic: mount/9120/0x00000000
[ 6504.310106] INFO: lockdep is turned off.
[ 6504.310589] CPU: 1 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.311709] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.312746] Call Trace:
[ 6504.313050] dump_stack+0x8e/0xcd
[ 6504.313451] __schedule_bug+0x61/0x90
[ 6504.313903] __schedule+0x685/0xa60
[ 6504.314353] ? trace_hardirqs_on+0xd/0x10
[ 6504.314872] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.315517] schedule+0x3d/0x90
[ 6504.315921] bit_wait+0x11/0x60
[ 6504.316318] __wait_on_bit+0x31/0x90
[ 6504.316778] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.317334] ? woken_wake_function+0x20/0x20
[ 6504.317860] gfs2_glock_wait+0x6b/0xb0
[ 6504.318311] gfs2_glock_nq+0x24e/0x510
[ 6504.318732] gfs2_glock_nq_num+0x71/0xc0
[ 6504.319157] init_inodes+0x646/0xa70
[ 6504.319541] ? init_inodes+0xd3/0xa70
[ 6504.319956] fill_super+0x896/0xcf0
[ 6504.320332] ? fill_super+0x896/0xcf0
[ 6504.320756] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.321201] ? snprintf+0x45/0x70
[ 6504.321564] gfs2_mount+0x248/0x290
[ 6504.322009] ? gfs2_mount+0x248/0x290
[ 6504.322504] mount_fs+0x14/0x80
[ 6504.322927] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.323497] do_mount+0x1fc/0xd50
[ 6504.323917] ? refcount_dec_and_lock+0x31/0x50
[ 6504.324488] ? memdup_user+0x4f/0x80
[ 6504.324920] SyS_mount+0x98/0xe0
[ 6504.325330] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.325902] RIP: 0033:0x7f7a9583bcea
[ 6504.326347] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.327279] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.328178] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.329074] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.329978] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.330932] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.341821] gfs2: fsid=rawhide:gfs2-1.0: journal 0 mapped with 1 extents
[ 6504.342886] gfs2: fsid=rawhide:gfs2-1.0: jid=0, already locked for use
[ 6504.343871] gfs2: fsid=rawhide:gfs2-1.0: jid=0: Looking at journal...
[ 6504.348257] gfs2: fsid=rawhide:gfs2-1.0: jid=0: Done
[ 6504.349852] gfs2: fsid=rawhide:gfs2-1.0: jid=1: Trying to acquire journal lock...
[ 6504.351250] BUG: scheduling while atomic: kworker/0:2/9103/0x00000000
[ 6504.352419] INFO: lockdep is turned off.
[ 6504.352988] CPU: 0 PID: 9103 Comm: kworker/0:2 Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.355061] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.356962] Workqueue: gfs_recovery gfs2_recover_func
[ 6504.357668] Call Trace:
[ 6504.358028] dump_stack+0x8e/0xcd
[ 6504.358492] __schedule_bug+0x61/0x90
[ 6504.359019] __schedule+0x685/0xa60
[ 6504.359511] ? trace_hardirqs_on+0xd/0x10
[ 6504.360133] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.360834] schedule+0x3d/0x90
[ 6504.361264] bit_wait+0x11/0x60
[ 6504.361695] __wait_on_bit+0x31/0x90
[ 6504.362177] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.362782] ? woken_wake_function+0x20/0x20
[ 6504.363396] gfs2_glock_wait+0x6b/0xb0
[ 6504.364169] gfs2_glock_nq+0x24e/0x510
[ 6504.364736] gfs2_glock_nq_num+0x71/0xc0
[ 6504.365339] gfs2_recover_func+0xb8/0x8d0
[ 6504.365950] ? update_load_avg+0x44d/0x660
[ 6504.366536] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.367132] ? lock_acquire+0xa3/0x1f0
[ 6504.367636] ? process_one_work+0x19a/0x650
[ 6504.368208] process_one_work+0x212/0x650
[ 6504.368775] ? process_one_work+0x212/0x650
[ 6504.369324] worker_thread+0x4d/0x3b0
[ 6504.369844] kthread+0x130/0x150
[ 6504.370290] ? process_one_work+0x650/0x650
[ 6504.370863] ? __kthread_create_on_node+0x230/0x230
[ 6504.371509] ret_from_fork+0x2a/0x40
[ 6504.373371] gfs2: fsid=rawhide:gfs2-1.0: jid=1: Looking at journal...
[ 6504.386559] gfs2: fsid=rawhide:gfs2-1.0: jid=1: Done
[ 6504.387642] gfs2: fsid=rawhide:gfs2-1.0: jid=2: Trying to acquire journal lock...
[ 6504.388826] BUG: scheduling while atomic: kworker/0:2/9103/0x00000000
[ 6504.389914] INFO: lockdep is turned off.
[ 6504.390570] CPU: 0 PID: 9103 Comm: kworker/0:2 Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.391991] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.393279] Workqueue: gfs_recovery gfs2_recover_func
[ 6504.394195] Call Trace:
[ 6504.394653] dump_stack+0x8e/0xcd
[ 6504.395222] __schedule_bug+0x61/0x90
[ 6504.395850] __schedule+0x685/0xa60
[ 6504.396312] ? trace_hardirqs_on+0xd/0x10
[ 6504.396994] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.397864] schedule+0x3d/0x90
[ 6504.398392] bit_wait+0x11/0x60
[ 6504.398942] __wait_on_bit+0x31/0x90
[ 6504.399422] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.400035] ? woken_wake_function+0x20/0x20
[ 6504.400606] gfs2_glock_wait+0x6b/0xb0
[ 6504.401150] gfs2_glock_nq+0x24e/0x510
[ 6504.401668] gfs2_glock_nq_num+0x71/0xc0
[ 6504.402211] gfs2_recover_func+0xb8/0x8d0
[ 6504.402802] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.403369] ? lock_acquire+0xa3/0x1f0
[ 6504.403879] ? process_one_work+0x2a7/0x650
[ 6504.404418] ? process_one_work+0x19a/0x650
[ 6504.404993] process_one_work+0x212/0x650
[ 6504.405523] ? process_one_work+0x212/0x650
[ 6504.406113] worker_thread+0x4d/0x3b0
[ 6504.406603] kthread+0x130/0x150
[ 6504.407104] ? process_one_work+0x650/0x650
[ 6504.407630] ? __kthread_create_on_node+0x230/0x230
[ 6504.408272] ret_from_fork+0x2a/0x40
[ 6504.410194] gfs2: fsid=rawhide:gfs2-1.0: jid=2: Looking at journal...
[ 6504.422660] gfs2: fsid=rawhide:gfs2-1.0: jid=2: Done
[ 6504.423891] gfs2: fsid=rawhide:gfs2-1.0: jid=3: Trying to acquire journal lock...
[ 6504.425297] BUG: scheduling while atomic: kworker/0:2/9103/0x00000000
[ 6504.426234] INFO: lockdep is turned off.
[ 6504.427056] CPU: 0 PID: 9103 Comm: kworker/0:2 Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.428567] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.429998] Workqueue: gfs_recovery gfs2_recover_func
[ 6504.430782] Call Trace:
[ 6504.431186] dump_stack+0x8e/0xcd
[ 6504.431705] __schedule_bug+0x61/0x90
[ 6504.432272] __schedule+0x685/0xa60
[ 6504.432800] ? trace_hardirqs_on+0xd/0x10
[ 6504.433365] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.434132] schedule+0x3d/0x90
[ 6504.434571] bit_wait+0x11/0x60
[ 6504.435039] __wait_on_bit+0x31/0x90
[ 6504.435521] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.436136] ? woken_wake_function+0x20/0x20
[ 6504.436707] gfs2_glock_wait+0x6b/0xb0
[ 6504.437219] gfs2_glock_nq+0x24e/0x510
[ 6504.437721] gfs2_glock_nq_num+0x71/0xc0
[ 6504.438254] gfs2_recover_func+0xb8/0x8d0
[ 6504.438824] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.439395] ? lock_acquire+0xa3/0x1f0
[ 6504.439946] ? process_one_work+0x2a7/0x650
[ 6504.440536] ? process_one_work+0x19a/0x650
[ 6504.441127] process_one_work+0x212/0x650
[ 6504.441701] ? process_one_work+0x212/0x650
[ 6504.442289] worker_thread+0x4d/0x3b0
[ 6504.442812] kthread+0x130/0x150
[ 6504.443277] ? process_one_work+0x650/0x650
[ 6504.443864] ? __kthread_create_on_node+0x230/0x230
[ 6504.444515] ret_from_fork+0x2a/0x40
[ 6504.446568] gfs2: fsid=rawhide:gfs2-1.0: jid=3: Looking at journal...
[ 6504.459227] gfs2: fsid=rawhide:gfs2-1.0: jid=3: Done
[ 6504.460776] gfs2: fsid=rawhide:gfs2-1.0: first mount done, others may mount
[ 6504.462117] BUG: scheduling while atomic: mount/9120/0x7fffffff
[ 6504.463149] INFO: lockdep is turned off.
[ 6504.463864] CPU: 0 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.465395] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.466478] Call Trace:
[ 6504.466792] dump_stack+0x8e/0xcd
[ 6504.467187] __schedule_bug+0x61/0x90
[ 6504.467609] __schedule+0x685/0xa60
[ 6504.468041] ? trace_hardirqs_on+0xd/0x10
[ 6504.468513] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.469144] schedule+0x3d/0x90
[ 6504.469520] bit_wait+0x11/0x60
[ 6504.469982] __wait_on_bit+0x31/0x90
[ 6504.470456] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.471073] ? woken_wake_function+0x20/0x20
[ 6504.471634] gfs2_glock_wait+0x6b/0xb0
[ 6504.472173] gfs2_glock_nq+0x24e/0x510
[ 6504.472685] gfs2_inode_lookup+0x1e2/0x420
[ 6504.473287] ? do_filldir_main.isra.22+0x170/0x170
[ 6504.473971] gfs2_dir_search+0xc1/0xf0
[ 6504.474416] ? gfs2_dir_search+0xc1/0xf0
[ 6504.474912] gfs2_lookupi+0x182/0x220
[ 6504.475372] ? lockref_put_or_lock+0xd/0x30
[ 6504.475876] ? gfs2_lookupi+0xf7/0x220
[ 6504.476297] gfs2_lookup_simple+0x5a/0x90
[ 6504.476771] ? gfs2_lookup_simple+0x5a/0x90
[ 6504.477262] init_inodes+0x397/0xa70
[ 6504.477687] fill_super+0x896/0xcf0
[ 6504.478100] ? fill_super+0x896/0xcf0
[ 6504.478529] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.479019] ? snprintf+0x45/0x70
[ 6504.479409] gfs2_mount+0x248/0x290
[ 6504.479852] ? gfs2_mount+0x248/0x290
[ 6504.480385] mount_fs+0x14/0x80
[ 6504.480821] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.481452] do_mount+0x1fc/0xd50
[ 6504.481844] ? refcount_dec_and_lock+0x31/0x50
[ 6504.482342] ? memdup_user+0x4f/0x80
[ 6504.482767] SyS_mount+0x98/0xe0
[ 6504.483128] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.483645] RIP: 0033:0x7f7a9583bcea
[ 6504.484063] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.484896] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.485697] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.486605] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.487523] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.488483] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.489523] NOHZ: local_softirq_pending 28a
[ 6504.490600] BUG: scheduling while atomic: mount/9120/0x7fffffff
[ 6504.491711] INFO: lockdep is turned off.
[ 6504.492068] CPU: 0 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.493709] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.495192] Call Trace:
[ 6504.495577] dump_stack+0x8e/0xcd
[ 6504.496096] __schedule_bug+0x61/0x90
[ 6504.496653] __schedule+0x685/0xa60
[ 6504.497185] ? trace_hardirqs_on+0xd/0x10
[ 6504.497773] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.498526] schedule+0x3d/0x90
[ 6504.498990] bit_wait+0x11/0x60
[ 6504.499449] __wait_on_bit+0x31/0x90
[ 6504.499994] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.500612] ? woken_wake_function+0x20/0x20
[ 6504.501261] gfs2_glock_wait+0x6b/0xb0
[ 6504.501773] gfs2_glock_nq+0x24e/0x510
[ 6504.502317] gfs2_inode_lookup+0x1e2/0x420
[ 6504.502921] ? do_filldir_main.isra.22+0x170/0x170
[ 6504.503604] gfs2_dir_search+0xc1/0xf0
[ 6504.504163] ? gfs2_dir_search+0xc1/0xf0
[ 6504.504768] gfs2_lookupi+0x182/0x220
[ 6504.505315] ? lockref_put_or_lock+0xd/0x30
[ 6504.505943] ? gfs2_lookupi+0xf7/0x220
[ 6504.506495] gfs2_lookup_simple+0x5a/0x90
[ 6504.507082] ? gfs2_lookup_simple+0x5a/0x90
[ 6504.507678] init_inodes+0x3bd/0xa70
[ 6504.508209] fill_super+0x896/0xcf0
[ 6504.508702] ? fill_super+0x896/0xcf0
[ 6504.509217] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.509788] ? snprintf+0x45/0x70
[ 6504.510251] gfs2_mount+0x248/0x290
[ 6504.510748] ? gfs2_mount+0x248/0x290
[ 6504.511258] mount_fs+0x14/0x80
[ 6504.511706] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.512324] do_mount+0x1fc/0xd50
[ 6504.512799] ? refcount_dec_and_lock+0x31/0x50
[ 6504.513408] ? memdup_user+0x4f/0x80
[ 6504.513929] SyS_mount+0x98/0xe0
[ 6504.514389] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.515039] RIP: 0033:0x7f7a9583bcea
[ 6504.515551] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.516628] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.517644] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.518645] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.519670] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.520731] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.523650] BUG: scheduling while atomic: mount/9120/0x7fffffff
[ 6504.524404] INFO: lockdep is turned off.
[ 6504.524836] CPU: 0 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.525808] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.527746] Call Trace:
[ 6504.528206] dump_stack+0x8e/0xcd
[ 6504.528767] __schedule_bug+0x61/0x90
[ 6504.529346] __schedule+0x685/0xa60
[ 6504.529904] ? trace_hardirqs_on+0xd/0x10
[ 6504.530511] ? out_of_line_wait_on_atomic_t+0x120/0x120
[ 6504.531297] schedule+0x3d/0x90
[ 6504.531819] bit_wait+0x11/0x60
[ 6504.532242] __wait_on_bit+0x31/0x90
[ 6504.532715] out_of_line_wait_on_bit+0x90/0xb0
[ 6504.533322] ? woken_wake_function+0x20/0x20
[ 6504.533890] gfs2_glock_wait+0x6b/0xb0
[ 6504.534399] gfs2_glock_nq+0x24e/0x510
[ 6504.534926] gfs2_inode_lookup+0x1e2/0x420
[ 6504.535477] ? do_filldir_main.isra.22+0x170/0x170
[ 6504.536127] gfs2_dir_search+0xc1/0xf0
[ 6504.536620] ? gfs2_dir_search+0xc1/0xf0
[ 6504.537172] gfs2_lookupi+0x182/0x220
[ 6504.537655] ? lockref_put_or_lock+0xd/0x30
[ 6504.538205] ? gfs2_lookupi+0xf7/0x220
[ 6504.538687] gfs2_lookup_simple+0x5a/0x90
[ 6504.539245] ? gfs2_lookup_simple+0x5a/0x90
[ 6504.539854] init_inodes+0x3ed/0xa70
[ 6504.540360] fill_super+0x896/0xcf0
[ 6504.540864] ? fill_super+0x896/0xcf0
[ 6504.541366] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.541950] ? snprintf+0x45/0x70
[ 6504.542434] gfs2_mount+0x248/0x290
[ 6504.542957] ? gfs2_mount+0x248/0x290
[ 6504.543501] mount_fs+0x14/0x80
[ 6504.543957] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.544554] do_mount+0x1fc/0xd50
[ 6504.545049] ? refcount_dec_and_lock+0x31/0x50
[ 6504.545666] ? memdup_user+0x4f/0x80
[ 6504.546187] SyS_mount+0x98/0xe0
[ 6504.546661] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.547362] RIP: 0033:0x7f7a9583bcea
[ 6504.547887] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.548951] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.549938] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.550930] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.551975] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.553003] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.556461] ------------[ cut here ]------------
[ 6504.557024] kernel BUG at ./include/linux/pagemap.h:166!
[ 6504.557698] invalid opcode: 0000 [#1] SMP
[ 6504.558253] CPU: 0 PID: 9120 Comm: mount Tainted: G W 4.13.0-rc3-00137-g3c5bc5c05339 #124
[ 6504.559758] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 6504.560883] task: ffff880061aee000 task.stack: ffffc90004f84000
[ 6504.561846] RIP: 0010:find_get_entry+0x223/0x290
[ 6504.562584] RSP: 0018:ffffc90004f87838 EFLAGS: 00010206
[ 6504.563440] RAX: 000000007fffffff RBX: ffff880049ce8790 RCX: ffffc90004f87818
[ 6504.564509] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffea000197fe80
[ 6504.565548] RBP: ffffc90004f87860 R08: 0000000000000000 R09: ffffea000197fe80
[ 6504.566564] R10: 0000000000000040 R11: ffffc90004f87818 R12: 0000000000000000
[ 6504.567664] R13: ffffffff81bdc9d8 R14: ffff880049ce8798 R15: ffffea000197fe80
[ 6504.568699] FS: 00007f7a967f2500(0000) GS:ffff88007fc00000(0000) knlGS:0000000000000000
[ 6504.569853] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 6504.570583] CR2: 00007fa9de6cef58 CR3: 0000000055546000 CR4: 00000000003406f0
[ 6504.571552] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 6504.572389] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[ 6504.573265] Call Trace:
[ 6504.573586] pagecache_get_page+0x2c/0x290
[ 6504.574143] ? finish_wait+0x72/0x90
[ 6504.574621] do_read_cache_page+0x7a/0x5a0
[ 6504.575175] ? stuffed_readpage+0x1a0/0x1a0
[ 6504.575745] read_cache_page+0x15/0x20
[ 6504.576262] gfs2_internal_read+0x7a/0x150
[ 6504.576869] read_rindex_entry+0x92/0x4d0
[ 6504.577515] gfs2_ri_update+0x24/0xb0
[ 6504.578023] gfs2_rindex_update+0x14c/0x150
[ 6504.578578] ? gfs2_rindex_update+0xd6/0x150
[ 6504.579129] init_inodes+0x424/0xa70
[ 6504.579537] ? init_inodes+0x424/0xa70
[ 6504.580060] fill_super+0x896/0xcf0
[ 6504.580535] ? fill_super+0x896/0xcf0
[ 6504.581032] ? gfs2_glock_nq_num+0x69/0xc0
[ 6504.581584] ? snprintf+0x45/0x70
[ 6504.582057] gfs2_mount+0x248/0x290
[ 6504.582563] ? gfs2_mount+0x248/0x290
[ 6504.583068] mount_fs+0x14/0x80
[ 6504.583505] vfs_kern_mount.part.31+0x5d/0x160
[ 6504.584126] do_mount+0x1fc/0xd50
[ 6504.584505] ? refcount_dec_and_lock+0x31/0x50
[ 6504.585032] ? memdup_user+0x4f/0x80
[ 6504.585489] SyS_mount+0x98/0xe0
[ 6504.585935] entry_SYSCALL_64_fastpath+0x1f/0xbe
[ 6504.586539] RIP: 0033:0x7f7a9583bcea
[ 6504.586918] RSP: 002b:00007ffc98642948 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
[ 6504.587540] RAX: ffffffffffffffda RBX: 00007f7a963c65d9 RCX: 00007f7a9583bcea
[ 6504.588134] RDX: 0000008d70864cd0 RSI: 0000008d70860730 RDI: 0000008d7085ea20
[ 6504.588656] RBP: 00007f7a965d8184 R08: 0000000000000000 R09: 0000008d70864d30
[ 6504.589214] R10: 00000000c0ed0000 R11: 0000000000000246 R12: 0000008d7085e900
[ 6504.589838] R13: 00007ffc98642c68 R14: 0000000000000000 R15: 00000000ffffffff
[ 6504.590921] Code: 8b 47 20 a8 01 0f 84 e8 fe ff ff 48 c7 c6 a0 51 c0 81 e8 f1 01 04 00 0f 0b 48 83 f8 01 0f 84 46 fe ff ff 4d 89 fe e9 4b ff ff ff <0f> 0b e8 c6 70 f5 ff 84 c0 0f 85 52 ff ff ff 48 c7 c2 70 bb bd
[ 6504.593497] RIP: find_get_entry+0x223/0x290 RSP: ffffc90004f87838
[ 6504.594578] ---[ end trace a4b6f9bfcf4d1504 ]---
[ 6504.595367] Kernel panic - not syncing: Fatal exception in interrupt
[ 6504.596569] Kernel Offset: disabled
[ 6504.597131] ---[ end Kernel panic - not syncing: Fatal exception in interrupt
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2017-08-03 12:04 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-08-01 0:00 [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock Andreas Gruenbacher
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 1/4] gfs2: gfs2_glock_get: Wait on freeing glocks Andreas Gruenbacher
2017-08-03 12:04 ` Andrew Price
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 2/4] gfs2: Get rid of gfs2_set_nlink Andreas Gruenbacher
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 3/4] gfs2: gfs2_evict_inode: Put glocks asynchronously Andreas Gruenbacher
2017-08-01 0:00 ` [Cluster-devel] [PATCH v3 4/4] gfs2: Defer deleting inodes under memory pressure Andreas Gruenbacher
2017-08-01 17:00 ` [Cluster-devel] [PATCH v3 0/4] GFS2 shrinker deadlock Bob Peterson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).