* [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes
@ 2023-08-24 21:19 Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 1/9] gfs2: Use qd_sbd more consequently Andreas Gruenbacher
` (8 more replies)
0 siblings, 9 replies; 10+ messages in thread
From: Andreas Gruenbacher @ 2023-08-24 21:19 UTC (permalink / raw)
To: cluster-devel
Bob and I have been looking into the following syzbot-reported quota
bug:
https://lore.kernel.org/all/0000000000002b5e2405f14e860f@google.com
The following patches fix that bug. I've put those patches onto our
for-next branch; they will be submitted upstream in the upcoming merge
window.
https://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git/log/?h=for-next
Thanks,
Andreas
Andreas Gruenbacher (8):
gfs2: Rename sd_{ glock => kill }_wait
gfs2: Rename SDF_DEACTIVATING to SDF_KILL
gfs2: Fix wrong quota shrinker return value
gfs2: Use gfs2_qd_dispose in gfs2_quota_cleanup
gfs2: Factor out duplicate quota data disposal code
gfs2: No more quota complaints after withdraw
gfs2: Fix initial quota data refcount
gfs2: Fix quota data refcount after cleanup
Bob Peterson (1):
gfs2: Use qd_sbd more consequently
fs/gfs2/glock.c | 10 ++--
fs/gfs2/glops.c | 2 +-
fs/gfs2/incore.h | 4 +-
fs/gfs2/ops_fstype.c | 7 ++-
fs/gfs2/quota.c | 139 +++++++++++++++++++++++++------------------
fs/gfs2/quota.h | 1 +
fs/gfs2/super.c | 3 +-
fs/gfs2/sys.c | 2 +-
8 files changed, 98 insertions(+), 70 deletions(-)
--
2.40.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* [Cluster-devel] [PATCH 1/9] gfs2: Use qd_sbd more consequently
2023-08-24 21:19 [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes Andreas Gruenbacher
@ 2023-08-24 21:19 ` Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 2/9] gfs2: Rename sd_{ glock => kill }_wait Andreas Gruenbacher
` (7 subsequent siblings)
8 siblings, 0 replies; 10+ messages in thread
From: Andreas Gruenbacher @ 2023-08-24 21:19 UTC (permalink / raw)
To: cluster-devel
From: Bob Peterson <rpeterso@redhat.com>
Before this patch many of the functions in quota.c got their superblock
pointer, sdp, from the quota_data's glock pointer. That's silly because
the qd already has its own pointer to the superblock (qd_sbd).
This patch changes references to use that instead, eliminating a level
of indirection.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/quota.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 96d41ee034d7..48b9fbffe260 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -119,7 +119,7 @@ static void gfs2_qd_dispose(struct list_head *list)
while (!list_empty(list)) {
qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
- sdp = qd->qd_gl->gl_name.ln_sbd;
+ sdp = qd->qd_sbd;
list_del(&qd->qd_lru);
@@ -302,7 +302,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
static void qd_hold(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
lockref_get(&qd->qd_lockref);
}
@@ -367,7 +367,7 @@ static void slot_put(struct gfs2_quota_data *qd)
static int bh_get(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct inode *inode = sdp->sd_qc_inode;
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int block, offset;
@@ -421,7 +421,7 @@ static int bh_get(struct gfs2_quota_data *qd)
static void bh_put(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
mutex_lock(&sdp->sd_quota_mutex);
gfs2_assert(sdp, qd->qd_bh_count);
@@ -489,8 +489,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
static void qd_unlock(struct gfs2_quota_data *qd)
{
- gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
- test_bit(QDF_LOCKED, &qd->qd_flags));
+ gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
clear_bit(QDF_LOCKED, &qd->qd_flags);
bh_put(qd);
slot_put(qd);
@@ -666,7 +665,7 @@ static int sort_qd(const void *a, const void *b)
static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
struct gfs2_quota_change *qc = qd->qd_bh_qc;
s64 x;
@@ -881,7 +880,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
{
- struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = (*qda)->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned int data_blocks, ind_blocks;
@@ -1009,11 +1008,12 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
struct gfs2_holder *q_gh)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct gfs2_holder i_gh;
int error;
+ gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
restart:
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
if (error)
@@ -1091,7 +1091,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
static int need_sync(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_tune *gt = &sdp->sd_tune;
s64 value;
unsigned int num, den;
@@ -1178,7 +1178,7 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
static int print_message(struct gfs2_quota_data *qd, char *type)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
fs_info(sdp, "quota %s for %s %u\n",
type,
--
2.40.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Cluster-devel] [PATCH 2/9] gfs2: Rename sd_{ glock => kill }_wait
2023-08-24 21:19 [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 1/9] gfs2: Use qd_sbd more consequently Andreas Gruenbacher
@ 2023-08-24 21:19 ` Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 3/9] gfs2: Rename SDF_DEACTIVATING to SDF_KILL Andreas Gruenbacher
` (6 subsequent siblings)
8 siblings, 0 replies; 10+ messages in thread
From: Andreas Gruenbacher @ 2023-08-24 21:19 UTC (permalink / raw)
To: cluster-devel
Rename sd_glock_wait to sd_kill_wait: we'll use it for other things
related to "killing" a filesystem on unmount soon (kill_sb).
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/glock.c | 6 +++---
fs/gfs2/incore.h | 2 +-
fs/gfs2/ops_fstype.c | 2 +-
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 675bfec77706..e242745cf40f 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -176,7 +176,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
wake_up_glock(gl);
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ wake_up(&sdp->sd_kill_wait);
}
/**
@@ -1231,7 +1231,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
out_free:
gfs2_glock_dealloc(&gl->gl_rcu);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ wake_up(&sdp->sd_kill_wait);
out:
return ret;
@@ -2188,7 +2188,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
flush_workqueue(glock_workqueue);
glock_hash_walk(clear_glock, sdp);
flush_workqueue(glock_workqueue);
- wait_event_timeout(sdp->sd_glock_wait,
+ wait_event_timeout(sdp->sd_kill_wait,
atomic_read(&sdp->sd_glock_disposal) == 0,
HZ * 600);
glock_hash_walk(dump_glock_func, sdp);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 04f2d78e8658..7abb43bb8df0 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -716,7 +716,7 @@ struct gfs2_sbd {
struct gfs2_glock *sd_rename_gl;
struct gfs2_glock *sd_freeze_gl;
struct work_struct sd_freeze_work;
- wait_queue_head_t sd_glock_wait;
+ wait_queue_head_t sd_kill_wait;
wait_queue_head_t sd_async_glock_wait;
atomic_t sd_glock_disposal;
struct completion sd_locking_init;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 8a27957dbfee..19d19491c0d1 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -87,7 +87,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
gfs2_tune_init(&sdp->sd_tune);
- init_waitqueue_head(&sdp->sd_glock_wait);
+ init_waitqueue_head(&sdp->sd_kill_wait);
init_waitqueue_head(&sdp->sd_async_glock_wait);
atomic_set(&sdp->sd_glock_disposal, 0);
init_completion(&sdp->sd_locking_init);
--
2.40.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Cluster-devel] [PATCH 3/9] gfs2: Rename SDF_DEACTIVATING to SDF_KILL
2023-08-24 21:19 [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 1/9] gfs2: Use qd_sbd more consequently Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 2/9] gfs2: Rename sd_{ glock => kill }_wait Andreas Gruenbacher
@ 2023-08-24 21:19 ` Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 4/9] gfs2: Fix wrong quota shrinker return value Andreas Gruenbacher
` (5 subsequent siblings)
8 siblings, 0 replies; 10+ messages in thread
From: Andreas Gruenbacher @ 2023-08-24 21:19 UTC (permalink / raw)
To: cluster-devel
Rename the SDF_DEACTIVATING flag to SDF_KILL to make it more obvious
that this relates to the kill_sb filesystem operation.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/glock.c | 4 ++--
fs/gfs2/glops.c | 2 +-
fs/gfs2/incore.h | 2 +-
fs/gfs2/ops_fstype.c | 4 ++--
fs/gfs2/super.c | 2 +-
fs/gfs2/sys.c | 2 +-
6 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index e242745cf40f..9cbf8d98489a 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1022,7 +1022,7 @@ static void delete_work_func(struct work_struct *work)
* step entirely.
*/
if (gfs2_try_evict(gl)) {
- if (test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
+ if (test_bit(SDF_KILL, &sdp->sd_flags))
goto out;
if (gfs2_queue_verify_evict(gl))
return;
@@ -1035,7 +1035,7 @@ static void delete_work_func(struct work_struct *work)
GFS2_BLKST_UNLINKED);
if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -EAGAIN &&
- !test_bit(SDF_DEACTIVATING, &sdp->sd_flags) &&
+ !test_bit(SDF_KILL, &sdp->sd_flags) &&
gfs2_queue_verify_evict(gl))
return;
} else {
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 54319328b16b..7d6cca467fa1 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -637,7 +637,7 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (!remote || sb_rdonly(sdp->sd_vfs) ||
- test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
+ test_bit(SDF_KILL, &sdp->sd_flags))
return;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 7abb43bb8df0..ab857431dfa4 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -606,7 +606,7 @@ enum {
SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
withdrawing */
- SDF_DEACTIVATING = 15,
+ SDF_KILL = 15,
SDF_EVICTING = 16,
SDF_FROZEN = 17,
};
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 19d19491c0d1..6ea295cee463 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1786,9 +1786,9 @@ static void gfs2_kill_sb(struct super_block *sb)
/*
* Flush and then drain the delete workqueue here (via
* destroy_workqueue()) to ensure that any delete work that
- * may be running will also see the SDF_DEACTIVATING flag.
+ * may be running will also see the SDF_KILL flag.
*/
- set_bit(SDF_DEACTIVATING, &sdp->sd_flags);
+ set_bit(SDF_KILL, &sdp->sd_flags);
gfs2_flush_delete_work(sdp);
destroy_workqueue(sdp->sd_delete_wq);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 9f4d5d6549ee..e0dceef3c9cc 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -546,7 +546,7 @@ void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
{
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- if (!test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
+ if (!test_bit(SDF_KILL, &sdp->sd_flags))
gfs2_flush_delete_work(sdp);
if (!log_write_allowed && current == sdp->sd_quotad_process)
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 2dfbe2f188dd..a85fe7b92911 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -118,7 +118,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
test_bit(SDF_WITHDRAW_IN_PROG, &f),
test_bit(SDF_REMOTE_WITHDRAW, &f),
test_bit(SDF_WITHDRAW_RECOVERY, &f),
- test_bit(SDF_DEACTIVATING, &f),
+ test_bit(SDF_KILL, &f),
sdp->sd_log_error,
rwsem_is_locked(&sdp->sd_log_flush_lock),
sdp->sd_log_num_revoke,
--
2.40.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Cluster-devel] [PATCH 4/9] gfs2: Fix wrong quota shrinker return value
2023-08-24 21:19 [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes Andreas Gruenbacher
` (2 preceding siblings ...)
2023-08-24 21:19 ` [Cluster-devel] [PATCH 3/9] gfs2: Rename SDF_DEACTIVATING to SDF_KILL Andreas Gruenbacher
@ 2023-08-24 21:19 ` Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 5/9] gfs2: Use gfs2_qd_dispose in gfs2_quota_cleanup Andreas Gruenbacher
` (4 subsequent siblings)
8 siblings, 0 replies; 10+ messages in thread
From: Andreas Gruenbacher @ 2023-08-24 21:19 UTC (permalink / raw)
To: cluster-devel
Function gfs2_qd_isolate must only return LRU_REMOVED when removing the
item from the lru list; otherwise, the number of items on the list will
go wrong.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/quota.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 48b9fbffe260..f58072efafc9 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -149,18 +149,22 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
{
struct list_head *dispose = arg;
- struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
+ struct gfs2_quota_data *qd =
+ list_entry(item, struct gfs2_quota_data, qd_lru);
+ enum lru_status status;
if (!spin_trylock(&qd->qd_lockref.lock))
return LRU_SKIP;
+ status = LRU_SKIP;
if (qd->qd_lockref.count == 0) {
lockref_mark_dead(&qd->qd_lockref);
list_lru_isolate_move(lru, &qd->qd_lru, dispose);
+ status = LRU_REMOVED;
}
spin_unlock(&qd->qd_lockref.lock);
- return LRU_REMOVED;
+ return status;
}
static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
--
2.40.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Cluster-devel] [PATCH 5/9] gfs2: Use gfs2_qd_dispose in gfs2_quota_cleanup
2023-08-24 21:19 [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes Andreas Gruenbacher
` (3 preceding siblings ...)
2023-08-24 21:19 ` [Cluster-devel] [PATCH 4/9] gfs2: Fix wrong quota shrinker return value Andreas Gruenbacher
@ 2023-08-24 21:19 ` Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 6/9] gfs2: Factor out duplicate quota data disposal code Andreas Gruenbacher
` (3 subsequent siblings)
8 siblings, 0 replies; 10+ messages in thread
From: Andreas Gruenbacher @ 2023-08-24 21:19 UTC (permalink / raw)
To: cluster-devel
Change gfs2_quota_cleanup() to move the quota data objects to dispose of
on a dispose list and call gfs2_qd_dispose() on that list, like
gfs2_qd_shrink_scan() does, instead of disposing of the quota data
objects directly.
This may look a bit pointless by itself, but it will make more sense in
combination with a fix that follows.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/quota.c | 26 ++++----------------------
1 file changed, 4 insertions(+), 22 deletions(-)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index f58072efafc9..976bf1097706 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -1459,35 +1459,17 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
{
- struct list_head *head = &sdp->sd_quota_list;
struct gfs2_quota_data *qd;
+ LIST_HEAD(dispose);
spin_lock(&qd_lock);
- while (!list_empty(head)) {
- qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
-
- list_del(&qd->qd_list);
-
- /* Also remove if this qd exists in the reclaim list */
+ list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
- atomic_dec(&sdp->sd_quota_count);
- spin_unlock(&qd_lock);
-
- spin_lock_bucket(qd->qd_hash);
- hlist_bl_del_rcu(&qd->qd_hlist);
- spin_unlock_bucket(qd->qd_hash);
-
- gfs2_assert_warn(sdp, !qd->qd_change);
- gfs2_assert_warn(sdp, !qd->qd_slot_count);
- gfs2_assert_warn(sdp, !qd->qd_bh_count);
-
- gfs2_glock_put(qd->qd_gl);
- call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
-
- spin_lock(&qd_lock);
+ list_add(&qd->qd_lru, &dispose);
}
spin_unlock(&qd_lock);
+ gfs2_qd_dispose(&dispose);
gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
kvfree(sdp->sd_quota_bitmap);
--
2.40.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Cluster-devel] [PATCH 6/9] gfs2: Factor out duplicate quota data disposal code
2023-08-24 21:19 [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes Andreas Gruenbacher
` (4 preceding siblings ...)
2023-08-24 21:19 ` [Cluster-devel] [PATCH 5/9] gfs2: Use gfs2_qd_dispose in gfs2_quota_cleanup Andreas Gruenbacher
@ 2023-08-24 21:19 ` Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 7/9] gfs2: No more quota complaints after withdraw Andreas Gruenbacher
` (2 subsequent siblings)
8 siblings, 0 replies; 10+ messages in thread
From: Andreas Gruenbacher @ 2023-08-24 21:19 UTC (permalink / raw)
To: cluster-devel
Rename gfs2_qd_dispose() to gfs2_qd_dispose_list(). Move some code
duplicated in gfs2_qd_dispose_list() and gfs2_quota_cleanup() into a
new gfs2_qd_dispose() function.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/quota.c | 47 ++++++++++++++++++++++++-----------------------
1 file changed, 24 insertions(+), 23 deletions(-)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 976bf1097706..01fae6b030e9 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -112,35 +112,36 @@ static void gfs2_qd_dealloc(struct rcu_head *rcu)
kmem_cache_free(gfs2_quotad_cachep, qd);
}
-static void gfs2_qd_dispose(struct list_head *list)
+static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
{
- struct gfs2_quota_data *qd;
- struct gfs2_sbd *sdp;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
- while (!list_empty(list)) {
- qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
- sdp = qd->qd_sbd;
+ spin_lock(&qd_lock);
+ list_del(&qd->qd_list);
+ spin_unlock(&qd_lock);
- list_del(&qd->qd_lru);
+ spin_lock_bucket(qd->qd_hash);
+ hlist_bl_del_rcu(&qd->qd_hlist);
+ spin_unlock_bucket(qd->qd_hash);
- /* Free from the filesystem-specific list */
- spin_lock(&qd_lock);
- list_del(&qd->qd_list);
- spin_unlock(&qd_lock);
+ gfs2_assert_warn(sdp, !qd->qd_change);
+ gfs2_assert_warn(sdp, !qd->qd_slot_count);
+ gfs2_assert_warn(sdp, !qd->qd_bh_count);
- spin_lock_bucket(qd->qd_hash);
- hlist_bl_del_rcu(&qd->qd_hlist);
- spin_unlock_bucket(qd->qd_hash);
+ gfs2_glock_put(qd->qd_gl);
+ call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
+}
- gfs2_assert_warn(sdp, !qd->qd_change);
- gfs2_assert_warn(sdp, !qd->qd_slot_count);
- gfs2_assert_warn(sdp, !qd->qd_bh_count);
+static void gfs2_qd_list_dispose(struct list_head *list)
+{
+ struct gfs2_quota_data *qd;
- gfs2_glock_put(qd->qd_gl);
- atomic_dec(&sdp->sd_quota_count);
+ while (!list_empty(list)) {
+ qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
+ list_del(&qd->qd_lru);
- /* Delete it from the common reclaim list */
- call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
+ gfs2_qd_dispose(qd);
+ atomic_dec(&sdp->sd_quota_count);
}
}
@@ -179,7 +180,7 @@ static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
gfs2_qd_isolate, &dispose);
- gfs2_qd_dispose(&dispose);
+ gfs2_qd_list_dispose(&dispose);
return freed;
}
@@ -1469,7 +1470,7 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
}
spin_unlock(&qd_lock);
- gfs2_qd_dispose(&dispose);
+ gfs2_qd_list_dispose(&dispose);
gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
kvfree(sdp->sd_quota_bitmap);
--
2.40.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Cluster-devel] [PATCH 7/9] gfs2: No more quota complaints after withdraw
2023-08-24 21:19 [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes Andreas Gruenbacher
` (5 preceding siblings ...)
2023-08-24 21:19 ` [Cluster-devel] [PATCH 6/9] gfs2: Factor out duplicate quota data disposal code Andreas Gruenbacher
@ 2023-08-24 21:19 ` Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 8/9] gfs2: Fix initial quota data refcount Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 9/9] gfs2: Fix quota data refcount after cleanup Andreas Gruenbacher
8 siblings, 0 replies; 10+ messages in thread
From: Andreas Gruenbacher @ 2023-08-24 21:19 UTC (permalink / raw)
To: cluster-devel
Once a filesystem is withdrawn, don't complain about quota changes
that can't be synced to the main quota file anymore.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/quota.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 01fae6b030e9..fccdb22980e8 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -124,9 +124,11 @@ static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
hlist_bl_del_rcu(&qd->qd_hlist);
spin_unlock_bucket(qd->qd_hash);
- gfs2_assert_warn(sdp, !qd->qd_change);
- gfs2_assert_warn(sdp, !qd->qd_slot_count);
- gfs2_assert_warn(sdp, !qd->qd_bh_count);
+ if (!gfs2_withdrawn(sdp)) {
+ gfs2_assert_warn(sdp, !qd->qd_change);
+ gfs2_assert_warn(sdp, !qd->qd_slot_count);
+ gfs2_assert_warn(sdp, !qd->qd_bh_count);
+ }
gfs2_glock_put(qd->qd_gl);
call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
--
2.40.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Cluster-devel] [PATCH 8/9] gfs2: Fix initial quota data refcount
2023-08-24 21:19 [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes Andreas Gruenbacher
` (6 preceding siblings ...)
2023-08-24 21:19 ` [Cluster-devel] [PATCH 7/9] gfs2: No more quota complaints after withdraw Andreas Gruenbacher
@ 2023-08-24 21:19 ` Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 9/9] gfs2: Fix quota data refcount after cleanup Andreas Gruenbacher
8 siblings, 0 replies; 10+ messages in thread
From: Andreas Gruenbacher @ 2023-08-24 21:19 UTC (permalink / raw)
To: cluster-devel
Fix the refcount of quota data objects created directly by
gfs2_quota_init(): those are placed into the in-memory quota "database"
for eventual syncing to the main quota file, but they are not actively
held and should thus have an initial refcount of 0.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/quota.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index fccdb22980e8..97fdf64148ba 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -228,7 +228,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
return NULL;
qd->qd_sbd = sdp;
- qd->qd_lockref.count = 1;
+ qd->qd_lockref.count = 0;
spin_lock_init(&qd->qd_lockref.lock);
qd->qd_id = qid;
qd->qd_slot = -1;
@@ -290,6 +290,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
spin_lock_bucket(hash);
*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
if (qd == NULL) {
+ new_qd->qd_lockref.count++;
*qdp = new_qd;
list_add(&new_qd->qd_list, &sdp->sd_quota_list);
hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
--
2.40.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Cluster-devel] [PATCH 9/9] gfs2: Fix quota data refcount after cleanup
2023-08-24 21:19 [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes Andreas Gruenbacher
` (7 preceding siblings ...)
2023-08-24 21:19 ` [Cluster-devel] [PATCH 8/9] gfs2: Fix initial quota data refcount Andreas Gruenbacher
@ 2023-08-24 21:19 ` Andreas Gruenbacher
8 siblings, 0 replies; 10+ messages in thread
From: Andreas Gruenbacher @ 2023-08-24 21:19 UTC (permalink / raw)
To: cluster-devel; +Cc: syzbot+3f6a670108ce43356017
[ 81.372851][ T5532] CPU: 1 PID: 5532 Comm: syz-executor.0 Not tainted 6.2.0-rc1-syzkaller-dirty #0
[ 81.382080][ T5532] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/12/2023
[ 81.392343][ T5532] Call Trace:
[ 81.395654][ T5532] <TASK>
[ 81.398603][ T5532] dump_stack_lvl+0x1b1/0x290
[ 81.418421][ T5532] gfs2_assert_warn_i+0x19a/0x2e0
[ 81.423480][ T5532] gfs2_quota_cleanup+0x4c6/0x6b0
[ 81.428611][ T5532] gfs2_make_fs_ro+0x517/0x610
[ 81.457802][ T5532] gfs2_withdraw+0x609/0x1540
[ 81.481452][ T5532] gfs2_inode_refresh+0xb2d/0xf60
[ 81.506658][ T5532] gfs2_instantiate+0x15e/0x220
[ 81.511504][ T5532] gfs2_glock_wait+0x1d9/0x2a0
[ 81.516352][ T5532] do_sync+0x485/0xc80
[ 81.554943][ T5532] gfs2_quota_sync+0x3da/0x8b0
[ 81.559738][ T5532] gfs2_sync_fs+0x49/0xb0
[ 81.564063][ T5532] sync_filesystem+0xe8/0x220
[ 81.568740][ T5532] generic_shutdown_super+0x6b/0x310
[ 81.574112][ T5532] kill_block_super+0x79/0xd0
[ 81.578779][ T5532] deactivate_locked_super+0xa7/0xf0
[ 81.584064][ T5532] cleanup_mnt+0x494/0x520
[ 81.593753][ T5532] task_work_run+0x243/0x300
[ 81.608837][ T5532] exit_to_user_mode_loop+0x124/0x150
[ 81.614232][ T5532] exit_to_user_mode_prepare+0xb2/0x140
[ 81.619820][ T5532] syscall_exit_to_user_mode+0x26/0x60
[ 81.625287][ T5532] do_syscall_64+0x49/0xb0
[ 81.629710][ T5532] entry_SYSCALL_64_after_hwframe+0x63/0xcd
In this backtrace, gfs2_quota_sync() takes quota data references and
then calls do_sync(). Function do_sync() encounters filesystem
corruption and withdraws the filesystem, wich (among other things) calls
gfs2_quota_cleanup(). Function gfs2_quota_cleanup() wrongly assumes
that nobody is holding any quota data references anymore, and destroys
all quota data objects. When gfs2_quota_sync() then resumes and
dereferences the quota data objects it is holding, those objects are no
longer there.
Fix that by changing gfs2_quota_cleanup() to only destroy quota data
objects that are no longer in use. This also means that qd_put() needs
to check if quotas are still active, and destroy quota data objects
itself otherwise. (We check for the presence of the SDF_JOURNAL_LIVE
flag to decide whether quotas are still active or not.)
Thanks to Edward Adam Davis <eadavis@sina.com> for the initial patches.
Link: https://lore.kernel.org/all/0000000000002b5e2405f14e860f@google.com
Reported-by: syzbot+3f6a670108ce43356017@syzkaller.appspotmail.com
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
fs/gfs2/ops_fstype.c | 1 +
fs/gfs2/quota.c | 41 ++++++++++++++++++++++++++++++++++++++---
fs/gfs2/quota.h | 1 +
fs/gfs2/super.c | 1 +
4 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 6ea295cee463..4035f61c294d 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1590,6 +1590,7 @@ static int gfs2_reconfigure(struct fs_context *fc)
if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
if (fc->sb_flags & SB_RDONLY) {
gfs2_make_fs_ro(sdp);
+ gfs2_quota_wait_cleanup(sdp);
} else {
error = gfs2_make_fs_rw(sdp);
if (error)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 97fdf64148ba..232211359f58 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -109,7 +109,11 @@ static inline void spin_unlock_bucket(unsigned int hash)
static void gfs2_qd_dealloc(struct rcu_head *rcu)
{
struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
+ struct gfs2_sbd *sdp = qd->qd_sbd;
+
kmem_cache_free(gfs2_quotad_cachep, qd);
+ if (atomic_dec_and_test(&sdp->sd_quota_count))
+ wake_up(&sdp->sd_kill_wait);
}
static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
@@ -143,7 +147,6 @@ static void gfs2_qd_list_dispose(struct list_head *list)
list_del(&qd->qd_lru);
gfs2_qd_dispose(qd);
- atomic_dec(&sdp->sd_quota_count);
}
}
@@ -317,13 +320,24 @@ static void qd_hold(struct gfs2_quota_data *qd)
static void qd_put(struct gfs2_quota_data *qd)
{
+ struct gfs2_sbd *sdp;
+
if (lockref_put_or_lock(&qd->qd_lockref))
return;
+ BUG_ON(__lockref_is_dead(&qd->qd_lockref));
+ sdp = qd->qd_sbd;
+ if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
+ lockref_mark_dead(&qd->qd_lockref);
+ spin_unlock(&qd->qd_lockref.lock);
+
+ gfs2_qd_dispose(qd);
+ return;
+ }
+
qd->qd_lockref.count = 0;
list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
spin_unlock(&qd->qd_lockref.lock);
-
}
static int slot_get(struct gfs2_quota_data *qd)
@@ -1466,20 +1480,41 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
struct gfs2_quota_data *qd;
LIST_HEAD(dispose);
+ BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
+
spin_lock(&qd_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_lockref.count != 0) {
+ spin_unlock(&qd->qd_lockref.lock);
+ continue;
+ }
+ lockref_mark_dead(&qd->qd_lockref);
+ spin_unlock(&qd->qd_lockref.lock);
+
list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
list_add(&qd->qd_lru, &dispose);
}
spin_unlock(&qd_lock);
gfs2_qd_list_dispose(&dispose);
- gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
kvfree(sdp->sd_quota_bitmap);
sdp->sd_quota_bitmap = NULL;
}
+void gfs2_quota_wait_cleanup(struct gfs2_sbd *sdp)
+{
+ int count;
+
+ wait_event_timeout(sdp->sd_kill_wait,
+ (count = atomic_read(&sdp->sd_quota_count)) == 0,
+ HZ * 60);
+
+ if (count != 0)
+ fs_err(sdp, "%d left-over quota data objects\n", count);
+}
+
static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
{
if (error == 0 || error == -EROFS)
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 21ada332d555..830c5116d628 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -33,6 +33,7 @@ extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
extern int gfs2_quota_init(struct gfs2_sbd *sdp);
extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
+extern void gfs2_quota_wait_cleanup(struct gfs2_sbd *sdp);
extern int gfs2_quotad(void *data);
extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index e0dceef3c9cc..05a4adf320a6 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -622,6 +622,7 @@ static void gfs2_put_super(struct super_block *sb)
if (!sb_rdonly(sb)) {
gfs2_make_fs_ro(sdp);
}
+ gfs2_quota_wait_cleanup(sdp);
WARN_ON(gfs2_withdrawing(sdp));
/* At this point, we're through modifying the disk */
--
2.40.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
end of thread, other threads:[~2023-08-24 21:20 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-08-24 21:19 [Cluster-devel] [PATCH 0/9] gfs2: quota cleanups and fixes Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 1/9] gfs2: Use qd_sbd more consequently Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 2/9] gfs2: Rename sd_{ glock => kill }_wait Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 3/9] gfs2: Rename SDF_DEACTIVATING to SDF_KILL Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 4/9] gfs2: Fix wrong quota shrinker return value Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 5/9] gfs2: Use gfs2_qd_dispose in gfs2_quota_cleanup Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 6/9] gfs2: Factor out duplicate quota data disposal code Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 7/9] gfs2: No more quota complaints after withdraw Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 8/9] gfs2: Fix initial quota data refcount Andreas Gruenbacher
2023-08-24 21:19 ` [Cluster-devel] [PATCH 9/9] gfs2: Fix quota data refcount after cleanup Andreas Gruenbacher
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).