* [PATCH AUTOSEL 5.5 452/542] bcache: cached_dev_free needs to put the sb page
[not found] <20200214154854.6746-1-sashal@kernel.org>
@ 2020-02-14 15:47 ` Sasha Levin
2020-02-14 15:47 ` [PATCH AUTOSEL 5.5 453/542] bcache: rework error unwinding in register_bcache Sasha Levin
` (4 subsequent siblings)
5 siblings, 0 replies; 6+ messages in thread
From: Sasha Levin @ 2020-02-14 15:47 UTC (permalink / raw)
To: linux-kernel, stable
Cc: Liang Chen, Christoph Hellwig, Coly Li, Jens Axboe, Sasha Levin,
linux-bcache
From: Liang Chen <liangchen.linux@gmail.com>
[ Upstream commit e8547d42095e58bee658f00fef8e33d2a185c927 ]
Same as cache device, the buffer page needs to be put while
freeing cached_dev. Otherwise a page would be leaked every
time a cached_dev is stopped.
Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/md/bcache/super.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 77e9869345e70..a573ce1d85aae 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1275,6 +1275,9 @@ static void cached_dev_free(struct closure *cl)
mutex_unlock(&bch_register_lock);
+ if (dc->sb_bio.bi_inline_vecs[0].bv_page)
+ put_page(bio_first_page_all(&dc->sb_bio));
+
if (!IS_ERR_OR_NULL(dc->bdev))
blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
--
2.20.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH AUTOSEL 5.5 453/542] bcache: rework error unwinding in register_bcache
[not found] <20200214154854.6746-1-sashal@kernel.org>
2020-02-14 15:47 ` [PATCH AUTOSEL 5.5 452/542] bcache: cached_dev_free needs to put the sb page Sasha Levin
@ 2020-02-14 15:47 ` Sasha Levin
2020-02-14 15:47 ` [PATCH AUTOSEL 5.5 454/542] bcache: fix use-after-free in register_bcache() Sasha Levin
` (3 subsequent siblings)
5 siblings, 0 replies; 6+ messages in thread
From: Sasha Levin @ 2020-02-14 15:47 UTC (permalink / raw)
To: linux-kernel, stable
Cc: Christoph Hellwig, Coly Li, Jens Axboe, Sasha Levin, linux-bcache
From: Christoph Hellwig <hch@lst.de>
[ Upstream commit 50246693f81fe887f4db78bf7089051d7f1894cc ]
Split the successful and error return path, and use one goto label for each
resource to unwind. This also fixes some small errors like leaking the
module reference count in the reboot case (which seems entirely harmless)
or printing the wrong warning messages for early failures.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/md/bcache/super.c | 75 +++++++++++++++++++++++----------------
1 file changed, 45 insertions(+), 30 deletions(-)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a573ce1d85aae..bd2ae1d78fe15 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -2375,29 +2375,33 @@ static bool bch_is_open(struct block_device *bdev)
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
- ssize_t ret = -EINVAL;
- const char *err = "cannot allocate memory";
- char *path = NULL;
- struct cache_sb *sb = NULL;
+ const char *err;
+ char *path;
+ struct cache_sb *sb;
struct block_device *bdev = NULL;
- struct page *sb_page = NULL;
+ struct page *sb_page;
+ ssize_t ret;
+ ret = -EBUSY;
if (!try_module_get(THIS_MODULE))
- return -EBUSY;
+ goto out;
/* For latest state of bcache_is_reboot */
smp_mb();
if (bcache_is_reboot)
- return -EBUSY;
+ goto out_module_put;
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
path = kstrndup(buffer, size, GFP_KERNEL);
if (!path)
- goto err;
+ goto out_module_put;
sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
if (!sb)
- goto err;
+ goto out_free_path;
+ ret = -EINVAL;
err = "failed to open device";
bdev = blkdev_get_by_path(strim(path),
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
@@ -2414,57 +2418,68 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!IS_ERR(bdev))
bdput(bdev);
if (attr == &ksysfs_register_quiet)
- goto quiet_out;
+ goto done;
}
- goto err;
+ goto out_free_sb;
}
err = "failed to set blocksize";
if (set_blocksize(bdev, 4096))
- goto err_close;
+ goto out_blkdev_put;
err = read_super(sb, bdev, &sb_page);
if (err)
- goto err_close;
+ goto out_blkdev_put;
err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
- goto err_close;
+ goto out_put_sb_page;
mutex_lock(&bch_register_lock);
ret = register_bdev(sb, sb_page, bdev, dc);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ bdev = NULL;
+ goto out_put_sb_page;
+ }
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
- goto err_close;
+ goto out_put_sb_page;
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(sb, sb_page, bdev, ca) != 0)
- goto err;
+ if (register_cache(sb, sb_page, bdev, ca) != 0) {
+ bdev = NULL;
+ goto out_put_sb_page;
+ }
}
-quiet_out:
- ret = size;
-out:
- if (sb_page)
- put_page(sb_page);
+
+ put_page(sb_page);
+done:
kfree(sb);
kfree(path);
module_put(THIS_MODULE);
- return ret;
-
-err_close:
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-err:
+ return size;
+
+out_put_sb_page:
+ put_page(sb_page);
+out_blkdev_put:
+ if (bdev)
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+out_free_sb:
+ kfree(sb);
+out_free_path:
+ kfree(path);
+out_module_put:
+ module_put(THIS_MODULE);
+out:
pr_info("error %s: %s", path, err);
- goto out;
+ return ret;
}
--
2.20.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH AUTOSEL 5.5 454/542] bcache: fix use-after-free in register_bcache()
[not found] <20200214154854.6746-1-sashal@kernel.org>
2020-02-14 15:47 ` [PATCH AUTOSEL 5.5 452/542] bcache: cached_dev_free needs to put the sb page Sasha Levin
2020-02-14 15:47 ` [PATCH AUTOSEL 5.5 453/542] bcache: rework error unwinding in register_bcache Sasha Levin
@ 2020-02-14 15:47 ` Sasha Levin
2020-02-14 15:47 ` [PATCH AUTOSEL 5.5 455/542] bcache: avoid unnecessary btree nodes flushing in btree_flush_write() Sasha Levin
` (2 subsequent siblings)
5 siblings, 0 replies; 6+ messages in thread
From: Sasha Levin @ 2020-02-14 15:47 UTC (permalink / raw)
To: linux-kernel, stable
Cc: Coly Li, Christoph Hellwig, Jens Axboe, Sasha Levin, linux-bcache
From: Coly Li <colyli@suse.de>
[ Upstream commit ae3cd299919af6eb670d5af0bc9d7ba14086bd8e ]
The patch "bcache: rework error unwinding in register_bcache" introduces
a use-after-free regression in register_bcache(). Here are current code,
2510 out_free_path:
2511 kfree(path);
2512 out_module_put:
2513 module_put(THIS_MODULE);
2514 out:
2515 pr_info("error %s: %s", path, err);
2516 return ret;
If some error happens and the above code path is executed, at line 2511
path is released, but referenced at line 2515. Then KASAN reports a use-
after-free error message.
This patch changes line 2515 in the following way to fix the problem,
2515 pr_info("error %s: %s", path?path:"", err);
Signed-off-by: Coly Li <colyli@suse.de>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/md/bcache/super.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index bd2ae1d78fe15..05cb94664efee 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -2475,10 +2475,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
kfree(sb);
out_free_path:
kfree(path);
+ path = NULL;
out_module_put:
module_put(THIS_MODULE);
out:
- pr_info("error %s: %s", path, err);
+ pr_info("error %s: %s", path?path:"", err);
return ret;
}
--
2.20.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH AUTOSEL 5.5 455/542] bcache: avoid unnecessary btree nodes flushing in btree_flush_write()
[not found] <20200214154854.6746-1-sashal@kernel.org>
` (2 preceding siblings ...)
2020-02-14 15:47 ` [PATCH AUTOSEL 5.5 454/542] bcache: fix use-after-free in register_bcache() Sasha Levin
@ 2020-02-14 15:47 ` Sasha Levin
2020-02-14 15:48 ` [PATCH AUTOSEL 5.5 510/542] bcache: explicity type cast in bset_bkey_last() Sasha Levin
2020-02-14 15:48 ` [PATCH AUTOSEL 5.5 511/542] bcache: fix incorrect data type usage in btree_flush_write() Sasha Levin
5 siblings, 0 replies; 6+ messages in thread
From: Sasha Levin @ 2020-02-14 15:47 UTC (permalink / raw)
To: linux-kernel, stable
Cc: Coly Li, Guoju Fang, Shuang Li, Jens Axboe, Sasha Levin,
linux-bcache
From: Coly Li <colyli@suse.de>
[ Upstream commit 2aa8c529387c25606fdc1484154b92f8bfbc5746 ]
the commit 91be66e1318f ("bcache: performance improvement for
btree_flush_write()") was an effort to flushing btree node with oldest
btree node faster in following methods,
- Only iterate dirty btree nodes in c->btree_cache, avoid scanning a lot
of clean btree nodes.
- Take c->btree_cache as a LRU-like list, aggressively flushing all
dirty nodes from tail of c->btree_cache util the btree node with
oldest journal entry is flushed. This is to reduce the time of holding
c->bucket_lock.
Guoju Fang and Shuang Li reported that they observe unexptected extra
write I/Os on cache device after applying the above patch. Guoju Fang
provideed more detailed diagnose information that the aggressive
btree nodes flushing may cause 10x more btree nodes to flush in his
workload. He points out when system memory is large enough to hold all
btree nodes in memory, c->btree_cache is not a LRU-like list any more.
Then the btree node with oldest journal entry is very probably not-
close to the tail of c->btree_cache list. In such situation much more
dirty btree nodes will be aggressively flushed before the target node
is flushed. When slow SATA SSD is used as cache device, such over-
aggressive flushing behavior will cause performance regression.
After spending a lot of time on debug and diagnose, I find the real
condition is more complicated, aggressive flushing dirty btree nodes
from tail of c->btree_cache list is not a good solution.
- When all btree nodes are cached in memory, c->btree_cache is not
a LRU-like list, the btree nodes with oldest journal entry won't
be close to the tail of the list.
- There can be hundreds dirty btree nodes reference the oldest journal
entry, before flushing all the nodes the oldest journal entry cannot
be reclaimed.
When the above two conditions mixed together, a simply flushing from
tail of c->btree_cache list is really NOT a good idea.
Fortunately there is still chance to make btree_flush_write() work
better. Here is how this patch avoids unnecessary btree nodes flushing,
- Only acquire c->journal.lock when getting oldest journal entry of
fifo c->journal.pin. In rested locations check the journal entries
locklessly, so their values can be changed on other cores
in parallel.
- In loop list_for_each_entry_safe_reverse(), checking latest front
point of fifo c->journal.pin. If it is different from the original
point which we get with locking c->journal.lock, it means the oldest
journal entry is reclaim on other cores. At this moment, all selected
dirty nodes recorded in array btree_nodes[] are all flushed and clean
on other CPU cores, it is unncessary to iterate c->btree_cache any
longer. Just quit the list_for_each_entry_safe_reverse() loop and
the following for-loop will skip all the selected clean nodes.
- Find a proper time to quit the list_for_each_entry_safe_reverse()
loop. Check the refcount value of orignial fifo front point, if the
value is larger than selected node number of btree_nodes[], it means
more matching btree nodes should be scanned. Otherwise it means no
more matching btee nodes in rest of c->btree_cache list, the loop
can be quit. If the original oldest journal entry is reclaimed and
fifo front point is updated, the refcount of original fifo front point
will be 0, then the loop will be quit too.
- Not hold c->bucket_lock too long time. c->bucket_lock is also required
for space allocation for cached data, hold it for too long time will
block regular I/O requests. When iterating list c->btree_cache, even
there are a lot of maching btree nodes, in order to not holding
c->bucket_lock for too long time, only BTREE_FLUSH_NR nodes are
selected and to flush in following for-loop.
With this patch, only btree nodes referencing oldest journal entry
are flushed to cache device, no aggressive flushing for unnecessary
btree node any more. And in order to avoid blocking regluar I/O
requests, each time when btree_flush_write() called, at most only
BTREE_FLUSH_NR btree nodes are selected to flush, even there are more
maching btree nodes in list c->btree_cache.
At last, one more thing to explain: Why it is safe to read front point
of c->journal.pin without holding c->journal.lock inside the
list_for_each_entry_safe_reverse() loop ?
Here is my answer: When reading the front point of fifo c->journal.pin,
we don't need to know the exact value of front point, we just want to
check whether the value is different from the original front point
(which is accurate value because we get it while c->jouranl.lock is
held). For such purpose, it works as expected without holding
c->journal.lock. Even the front point is changed on other CPU core and
not updated to local core, and current iterating btree node has
identical journal entry local as original fetched fifo front point, it
is still safe. Because after holding mutex b->write_lock (with memory
barrier) this btree node can be found as clean and skipped, the loop
will quite latter when iterate on next node of list c->btree_cache.
Fixes: 91be66e1318f ("bcache: performance improvement for btree_flush_write()")
Reported-by: Guoju Fang <fangguoju@gmail.com>
Reported-by: Shuang Li <psymon@bonuscloud.io>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/md/bcache/journal.c | 80 ++++++++++++++++++++++++++++++++++---
1 file changed, 75 insertions(+), 5 deletions(-)
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index be2a2a2016032..33ddc5269e8dc 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -417,10 +417,14 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
/* Journalling */
+#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
+
static void btree_flush_write(struct cache_set *c)
{
struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
- unsigned int i, n;
+ unsigned int i, nr, ref_nr;
+ atomic_t *fifo_front_p, *now_fifo_front_p;
+ size_t mask;
if (c->journal.btree_flushing)
return;
@@ -433,12 +437,50 @@ static void btree_flush_write(struct cache_set *c)
c->journal.btree_flushing = true;
spin_unlock(&c->journal.flush_write_lock);
+ /* get the oldest journal entry and check its refcount */
+ spin_lock(&c->journal.lock);
+ fifo_front_p = &fifo_front(&c->journal.pin);
+ ref_nr = atomic_read(fifo_front_p);
+ if (ref_nr <= 0) {
+ /*
+ * do nothing if no btree node references
+ * the oldest journal entry
+ */
+ spin_unlock(&c->journal.lock);
+ goto out;
+ }
+ spin_unlock(&c->journal.lock);
+
+ mask = c->journal.pin.mask;
+ nr = 0;
atomic_long_inc(&c->flush_write);
memset(btree_nodes, 0, sizeof(btree_nodes));
- n = 0;
mutex_lock(&c->bucket_lock);
list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
+ /*
+ * It is safe to get now_fifo_front_p without holding
+ * c->journal.lock here, because we don't need to know
+ * the exactly accurate value, just check whether the
+ * front pointer of c->journal.pin is changed.
+ */
+ now_fifo_front_p = &fifo_front(&c->journal.pin);
+ /*
+ * If the oldest journal entry is reclaimed and front
+ * pointer of c->journal.pin changes, it is unnecessary
+ * to scan c->btree_cache anymore, just quit the loop and
+ * flush out what we have already.
+ */
+ if (now_fifo_front_p != fifo_front_p)
+ break;
+ /*
+ * quit this loop if all matching btree nodes are
+ * scanned and record in btree_nodes[] already.
+ */
+ ref_nr = atomic_read(fifo_front_p);
+ if (nr >= ref_nr)
+ break;
+
if (btree_node_journal_flush(b))
pr_err("BUG: flush_write bit should not be set here!");
@@ -454,17 +496,44 @@ static void btree_flush_write(struct cache_set *c)
continue;
}
+ /*
+ * Only select the btree node which exactly references
+ * the oldest journal entry.
+ *
+ * If the journal entry pointed by fifo_front_p is
+ * reclaimed in parallel, don't worry:
+ * - the list_for_each_xxx loop will quit when checking
+ * next now_fifo_front_p.
+ * - If there are matched nodes recorded in btree_nodes[],
+ * they are clean now (this is why and how the oldest
+ * journal entry can be reclaimed). These selected nodes
+ * will be ignored and skipped in the folowing for-loop.
+ */
+ if (nr_to_fifo_front(btree_current_write(b)->journal,
+ fifo_front_p,
+ mask) != 0) {
+ mutex_unlock(&b->write_lock);
+ continue;
+ }
+
set_btree_node_journal_flush(b);
mutex_unlock(&b->write_lock);
- btree_nodes[n++] = b;
- if (n == BTREE_FLUSH_NR)
+ btree_nodes[nr++] = b;
+ /*
+ * To avoid holding c->bucket_lock too long time,
+ * only scan for BTREE_FLUSH_NR matched btree nodes
+ * at most. If there are more btree nodes reference
+ * the oldest journal entry, try to flush them next
+ * time when btree_flush_write() is called.
+ */
+ if (nr == BTREE_FLUSH_NR)
break;
}
mutex_unlock(&c->bucket_lock);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < nr; i++) {
b = btree_nodes[i];
if (!b) {
pr_err("BUG: btree_nodes[%d] is NULL", i);
@@ -497,6 +566,7 @@ static void btree_flush_write(struct cache_set *c)
mutex_unlock(&b->write_lock);
}
+out:
spin_lock(&c->journal.flush_write_lock);
c->journal.btree_flushing = false;
spin_unlock(&c->journal.flush_write_lock);
--
2.20.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH AUTOSEL 5.5 510/542] bcache: explicity type cast in bset_bkey_last()
[not found] <20200214154854.6746-1-sashal@kernel.org>
` (3 preceding siblings ...)
2020-02-14 15:47 ` [PATCH AUTOSEL 5.5 455/542] bcache: avoid unnecessary btree nodes flushing in btree_flush_write() Sasha Levin
@ 2020-02-14 15:48 ` Sasha Levin
2020-02-14 15:48 ` [PATCH AUTOSEL 5.5 511/542] bcache: fix incorrect data type usage in btree_flush_write() Sasha Levin
5 siblings, 0 replies; 6+ messages in thread
From: Sasha Levin @ 2020-02-14 15:48 UTC (permalink / raw)
To: linux-kernel, stable
Cc: Coly Li, kbuild test robot, Jens Axboe, Sasha Levin, linux-bcache
From: Coly Li <colyli@suse.de>
[ Upstream commit 7c02b0055f774ed9afb6e1c7724f33bf148ffdc0 ]
In bset.h, macro bset_bkey_last() is defined as,
bkey_idx((struct bkey *) (i)->d, (i)->keys)
Parameter i can be variable type of data structure, the macro always
works once the type of struct i has member 'd' and 'keys'.
bset_bkey_last() is also used in macro csum_set() to calculate the
checksum of a on-disk data structure. When csum_set() is used to
calculate checksum of on-disk bcache super block, the parameter 'i'
data type is struct cache_sb_disk. Inside struct cache_sb_disk (also in
struct cache_sb) the member keys is __u16 type. But bkey_idx() expects
unsigned int (a 32bit width), so there is problem when sending
parameters via stack to call bkey_idx().
Sparse tool from Intel 0day kbuild system reports this incompatible
problem. bkey_idx() is part of user space API, so the simplest fix is
to cast the (i)->keys to unsigned int type in macro bset_bkey_last().
Reported-by: kbuild test robot <lkp@intel.com>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/md/bcache/bset.h | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index c71365e7c1fac..a50dcfda656f5 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -397,7 +397,8 @@ void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
/* Bkey utility code */
-#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
+#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \
+ (unsigned int)(i)->keys)
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
{
--
2.20.1
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH AUTOSEL 5.5 511/542] bcache: fix incorrect data type usage in btree_flush_write()
[not found] <20200214154854.6746-1-sashal@kernel.org>
` (4 preceding siblings ...)
2020-02-14 15:48 ` [PATCH AUTOSEL 5.5 510/542] bcache: explicity type cast in bset_bkey_last() Sasha Levin
@ 2020-02-14 15:48 ` Sasha Levin
5 siblings, 0 replies; 6+ messages in thread
From: Sasha Levin @ 2020-02-14 15:48 UTC (permalink / raw)
To: linux-kernel, stable
Cc: Coly Li, Dan Carpenter, Jens Axboe, Sasha Levin, linux-bcache
From: Coly Li <colyli@suse.de>
[ Upstream commit d1c3cc34f5a78b38d2b809b289d912c3560545df ]
Dan Carpenter points out that from commit 2aa8c529387c ("bcache: avoid
unnecessary btree nodes flushing in btree_flush_write()"), there is a
incorrect data type usage which leads to the following static checker
warning:
drivers/md/bcache/journal.c:444 btree_flush_write()
warn: 'ref_nr' unsigned <= 0
drivers/md/bcache/journal.c
422 static void btree_flush_write(struct cache_set *c)
423 {
424 struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
425 unsigned int i, nr, ref_nr;
^^^^^^
426 atomic_t *fifo_front_p, *now_fifo_front_p;
427 size_t mask;
428
429 if (c->journal.btree_flushing)
430 return;
431
432 spin_lock(&c->journal.flush_write_lock);
433 if (c->journal.btree_flushing) {
434 spin_unlock(&c->journal.flush_write_lock);
435 return;
436 }
437 c->journal.btree_flushing = true;
438 spin_unlock(&c->journal.flush_write_lock);
439
440 /* get the oldest journal entry and check its refcount */
441 spin_lock(&c->journal.lock);
442 fifo_front_p = &fifo_front(&c->journal.pin);
443 ref_nr = atomic_read(fifo_front_p);
444 if (ref_nr <= 0) {
^^^^^^^^^^^
Unsigned can't be less than zero.
445 /*
446 * do nothing if no btree node references
447 * the oldest journal entry
448 */
449 spin_unlock(&c->journal.lock);
450 goto out;
451 }
452 spin_unlock(&c->journal.lock);
As the warning information indicates, local varaible ref_nr in unsigned
int type is wrong, which does not matche atomic_read() and the "<= 0"
checking.
This patch fixes the above error by defining local variable ref_nr as
int type.
Fixes: 2aa8c529387c ("bcache: avoid unnecessary btree nodes flushing in btree_flush_write()")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/md/bcache/journal.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 33ddc5269e8dc..6730820780b06 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -422,7 +422,8 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
static void btree_flush_write(struct cache_set *c)
{
struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
- unsigned int i, nr, ref_nr;
+ unsigned int i, nr;
+ int ref_nr;
atomic_t *fifo_front_p, *now_fifo_front_p;
size_t mask;
--
2.20.1
^ permalink raw reply related [flat|nested] 6+ messages in thread