linux-btrfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 00/10] Btrfs, minor scrub code cleanups
@ 2017-05-16 17:10 David Sterba
  2017-05-16 17:10 ` [PATCH 01/10] btrfs: scrub: inline helper scrub_setup_wr_ctx David Sterba
                   ` (9 more replies)
  0 siblings, 10 replies; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

A bunch of minor cleanups in the scrub code. For 4.13.

David Sterba (10):
  btrfs: scrub: inline helper scrub_setup_wr_ctx
  btrfs: scrub: inline helper scrub_free_wr_ctx
  btrfs: scrub: simplify cleanup of wr_ctx in scrub_free_ctx
  btrfs: scrub: embed scrub_wr_ctx into scrub context
  btrfs: scrub: remove pages_per_wr_bio from scrub context
  btrfs: scrub: use bool for flush_all_writes
  btrfs: scrub: use fs_info::sectorsize and drop it from scrub context
  btrfs: scrub: clean up division in __scrub_mark_bitmap
  btrfs: scrub: clean up division in scrub_find_csum
  btrfs: scrub: simplify srrub worker initialization

 fs/btrfs/scrub.c | 181 +++++++++++++++++++++----------------------------------
 1 file changed, 70 insertions(+), 111 deletions(-)

-- 
2.12.0


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 01/10] btrfs: scrub: inline helper scrub_setup_wr_ctx
  2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
@ 2017-05-16 17:10 ` David Sterba
  2017-05-16 17:10 ` [PATCH 02/10] btrfs: scrub: inline helper scrub_free_wr_ctx David Sterba
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

The helper scrub_setup_wr_ctx is used only once and fits into
scrub_setup_ctx as it continues intialization, no need to keep it
separate.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/scrub.c | 36 +++++++++---------------------------
 1 file changed, 9 insertions(+), 27 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index c7b45eb2403d..e88edbeb7644 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -289,9 +289,6 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
 			       u64 *extent_physical,
 			       struct btrfs_device **extent_dev,
 			       int *extent_mirror_num);
-static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
-			      struct btrfs_device *dev,
-			      int is_dev_replace);
 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 				    struct scrub_page *spage);
@@ -680,7 +677,6 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 	struct scrub_ctx *sctx;
 	int		i;
 	struct btrfs_fs_info *fs_info = dev->fs_info;
-	int ret;
 
 	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
 	if (!sctx)
@@ -722,12 +718,16 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 	spin_lock_init(&sctx->stat_lock);
 	init_waitqueue_head(&sctx->list_wait);
 
-	ret = scrub_setup_wr_ctx(&sctx->wr_ctx,
-				 fs_info->dev_replace.tgtdev, is_dev_replace);
-	if (ret) {
-		scrub_free_ctx(sctx);
-		return ERR_PTR(ret);
+	WARN_ON(sctx->wr_ctx.wr_curr_bio != NULL);
+	mutex_init(&sctx->wr_ctx.wr_lock);
+	sctx->wr_ctx.wr_curr_bio = NULL;
+	if (is_dev_replace) {
+		WARN_ON(!dev->bdev);
+		sctx->wr_ctx.pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
+		sctx->wr_ctx.tgtdev = dev;
+		atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
 	}
+
 	return sctx;
 
 nomem:
@@ -4337,24 +4337,6 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
 	btrfs_put_bbio(bbio);
 }
 
-static int scrub_setup_wr_ctx(struct scrub_wr_ctx *wr_ctx,
-			      struct btrfs_device *dev,
-			      int is_dev_replace)
-{
-	WARN_ON(wr_ctx->wr_curr_bio != NULL);
-
-	mutex_init(&wr_ctx->wr_lock);
-	wr_ctx->wr_curr_bio = NULL;
-	if (!is_dev_replace)
-		return 0;
-
-	WARN_ON(!dev->bdev);
-	wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
-	wr_ctx->tgtdev = dev;
-	atomic_set(&wr_ctx->flush_all_writes, 0);
-	return 0;
-}
-
 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
 {
 	mutex_lock(&wr_ctx->wr_lock);
-- 
2.12.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 02/10] btrfs: scrub: inline helper scrub_free_wr_ctx
  2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
  2017-05-16 17:10 ` [PATCH 01/10] btrfs: scrub: inline helper scrub_setup_wr_ctx David Sterba
@ 2017-05-16 17:10 ` David Sterba
  2017-05-16 17:10 ` [PATCH 03/10] btrfs: scrub: simplify cleanup of wr_ctx in scrub_free_ctx David Sterba
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

The helper scrub_free_wr_ctx is used only once and fits into
scrub_free_ctx as it continues sctx shutdown, no need to keep it
separate.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/scrub.c | 14 ++++----------
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index e88edbeb7644..43c208dff67f 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -289,7 +289,6 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
 			       u64 *extent_physical,
 			       struct btrfs_device **extent_dev,
 			       int *extent_mirror_num);
-static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 				    struct scrub_page *spage);
 static void scrub_wr_submit(struct scrub_ctx *sctx);
@@ -640,7 +639,10 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 	if (!sctx)
 		return;
 
-	scrub_free_wr_ctx(&sctx->wr_ctx);
+	mutex_lock(&sctx->wr_ctx.wr_lock);
+	kfree(sctx->wr_ctx.wr_curr_bio);
+	sctx->wr_ctx.wr_curr_bio = NULL;
+	mutex_unlock(&sctx->wr_ctx.wr_lock);
 
 	/* this can happen when scrub is cancelled */
 	if (sctx->curr != -1) {
@@ -4337,14 +4339,6 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
 	btrfs_put_bbio(bbio);
 }
 
-static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
-{
-	mutex_lock(&wr_ctx->wr_lock);
-	kfree(wr_ctx->wr_curr_bio);
-	wr_ctx->wr_curr_bio = NULL;
-	mutex_unlock(&wr_ctx->wr_lock);
-}
-
 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 			    int mirror_num, u64 physical_for_dev_replace)
 {
-- 
2.12.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 03/10] btrfs: scrub: simplify cleanup of wr_ctx in scrub_free_ctx
  2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
  2017-05-16 17:10 ` [PATCH 01/10] btrfs: scrub: inline helper scrub_setup_wr_ctx David Sterba
  2017-05-16 17:10 ` [PATCH 02/10] btrfs: scrub: inline helper scrub_free_wr_ctx David Sterba
@ 2017-05-16 17:10 ` David Sterba
  2017-05-19  0:25   ` Liu Bo
  2017-05-16 17:10 ` [PATCH 04/10] btrfs: scrub: embed scrub_wr_ctx into scrub context David Sterba
                   ` (6 subsequent siblings)
  9 siblings, 1 reply; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

We don't need to take the mutex and zero out wr_cur_bio, as this is
called after the scrub finished.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/scrub.c | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 43c208dff67f..333f63a59059 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -639,11 +639,6 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 	if (!sctx)
 		return;
 
-	mutex_lock(&sctx->wr_ctx.wr_lock);
-	kfree(sctx->wr_ctx.wr_curr_bio);
-	sctx->wr_ctx.wr_curr_bio = NULL;
-	mutex_unlock(&sctx->wr_ctx.wr_lock);
-
 	/* this can happen when scrub is cancelled */
 	if (sctx->curr != -1) {
 		struct scrub_bio *sbio = sctx->bios[sctx->curr];
@@ -663,6 +658,7 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 		kfree(sbio);
 	}
 
+	kfree(sctx->wr_ctx.wr_curr_bio);
 	scrub_free_csums(sctx);
 	kfree(sctx);
 }
-- 
2.12.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 04/10] btrfs: scrub: embed scrub_wr_ctx into scrub context
  2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
                   ` (2 preceding siblings ...)
  2017-05-16 17:10 ` [PATCH 03/10] btrfs: scrub: simplify cleanup of wr_ctx in scrub_free_ctx David Sterba
@ 2017-05-16 17:10 ` David Sterba
  2017-05-16 17:10 ` [PATCH 05/10] btrfs: scrub: remove pages_per_wr_bio from " David Sterba
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

The structure scrub_wr_ctx is not used anywhere just the scrub context,
we can move the members there. The tgtdev is renamed so it's more clear
that it belongs to the "wr" part.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/scrub.c | 103 ++++++++++++++++++++++++++-----------------------------
 1 file changed, 49 insertions(+), 54 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 333f63a59059..f0c7ec38e33c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -161,14 +161,6 @@ struct scrub_parity {
 	unsigned long		bitmap[0];
 };
 
-struct scrub_wr_ctx {
-	struct scrub_bio *wr_curr_bio;
-	struct btrfs_device *tgtdev;
-	int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
-	atomic_t flush_all_writes;
-	struct mutex wr_lock;
-};
-
 struct scrub_ctx {
 	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
 	struct btrfs_fs_info	*fs_info;
@@ -187,7 +179,12 @@ struct scrub_ctx {
 	u32			nodesize;
 
 	int			is_dev_replace;
-	struct scrub_wr_ctx	wr_ctx;
+
+	struct scrub_bio        *wr_curr_bio;
+	struct mutex            wr_lock;
+	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
+	atomic_t                flush_all_writes;
+	struct btrfs_device     *wr_tgtdev;
 
 	/*
 	 * statistics
@@ -658,7 +655,7 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
 		kfree(sbio);
 	}
 
-	kfree(sctx->wr_ctx.wr_curr_bio);
+	kfree(sctx->wr_curr_bio);
 	scrub_free_csums(sctx);
 	kfree(sctx);
 }
@@ -716,14 +713,14 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 	spin_lock_init(&sctx->stat_lock);
 	init_waitqueue_head(&sctx->list_wait);
 
-	WARN_ON(sctx->wr_ctx.wr_curr_bio != NULL);
-	mutex_init(&sctx->wr_ctx.wr_lock);
-	sctx->wr_ctx.wr_curr_bio = NULL;
+	WARN_ON(sctx->wr_curr_bio != NULL);
+	mutex_init(&sctx->wr_lock);
+	sctx->wr_curr_bio = NULL;
 	if (is_dev_replace) {
 		WARN_ON(!dev->bdev);
-		sctx->wr_ctx.pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
-		sctx->wr_ctx.tgtdev = dev;
-		atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
+		sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
+		sctx->wr_tgtdev = dev;
+		atomic_set(&sctx->flush_all_writes, 0);
 	}
 
 	return sctx;
@@ -1896,35 +1893,34 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 				    struct scrub_page *spage)
 {
-	struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
 	struct scrub_bio *sbio;
 	int ret;
 
-	mutex_lock(&wr_ctx->wr_lock);
+	mutex_lock(&sctx->wr_lock);
 again:
-	if (!wr_ctx->wr_curr_bio) {
-		wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
+	if (!sctx->wr_curr_bio) {
+		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
 					      GFP_KERNEL);
-		if (!wr_ctx->wr_curr_bio) {
-			mutex_unlock(&wr_ctx->wr_lock);
+		if (!sctx->wr_curr_bio) {
+			mutex_unlock(&sctx->wr_lock);
 			return -ENOMEM;
 		}
-		wr_ctx->wr_curr_bio->sctx = sctx;
-		wr_ctx->wr_curr_bio->page_count = 0;
+		sctx->wr_curr_bio->sctx = sctx;
+		sctx->wr_curr_bio->page_count = 0;
 	}
-	sbio = wr_ctx->wr_curr_bio;
+	sbio = sctx->wr_curr_bio;
 	if (sbio->page_count == 0) {
 		struct bio *bio;
 
 		sbio->physical = spage->physical_for_dev_replace;
 		sbio->logical = spage->logical;
-		sbio->dev = wr_ctx->tgtdev;
+		sbio->dev = sctx->wr_tgtdev;
 		bio = sbio->bio;
 		if (!bio) {
 			bio = btrfs_io_bio_alloc(GFP_KERNEL,
-					wr_ctx->pages_per_wr_bio);
+					sctx->pages_per_wr_bio);
 			if (!bio) {
-				mutex_unlock(&wr_ctx->wr_lock);
+				mutex_unlock(&sctx->wr_lock);
 				return -ENOMEM;
 			}
 			sbio->bio = bio;
@@ -1949,7 +1945,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 		if (sbio->page_count < 1) {
 			bio_put(sbio->bio);
 			sbio->bio = NULL;
-			mutex_unlock(&wr_ctx->wr_lock);
+			mutex_unlock(&sctx->wr_lock);
 			return -EIO;
 		}
 		scrub_wr_submit(sctx);
@@ -1959,23 +1955,22 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 	sbio->pagev[sbio->page_count] = spage;
 	scrub_page_get(spage);
 	sbio->page_count++;
-	if (sbio->page_count == wr_ctx->pages_per_wr_bio)
+	if (sbio->page_count == sctx->pages_per_wr_bio)
 		scrub_wr_submit(sctx);
-	mutex_unlock(&wr_ctx->wr_lock);
+	mutex_unlock(&sctx->wr_lock);
 
 	return 0;
 }
 
 static void scrub_wr_submit(struct scrub_ctx *sctx)
 {
-	struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
 	struct scrub_bio *sbio;
 
-	if (!wr_ctx->wr_curr_bio)
+	if (!sctx->wr_curr_bio)
 		return;
 
-	sbio = wr_ctx->wr_curr_bio;
-	wr_ctx->wr_curr_bio = NULL;
+	sbio = sctx->wr_curr_bio;
+	sctx->wr_curr_bio = NULL;
 	WARN_ON(!sbio->bio->bi_bdev);
 	scrub_pending_bio_inc(sctx);
 	/* process all writes in a single worker thread. Then the block layer
@@ -2418,10 +2413,10 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
 	scrub_block_put(sblock);
 
 	if (sctx->is_dev_replace &&
-	    atomic_read(&sctx->wr_ctx.flush_all_writes)) {
-		mutex_lock(&sctx->wr_ctx.wr_lock);
+	    atomic_read(&sctx->flush_all_writes)) {
+		mutex_lock(&sctx->wr_lock);
 		scrub_wr_submit(sctx);
-		mutex_unlock(&sctx->wr_ctx.wr_lock);
+		mutex_unlock(&sctx->wr_lock);
 	}
 
 	scrub_pending_bio_dec(sctx);
@@ -2626,10 +2621,10 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
 	spin_unlock(&sctx->list_lock);
 
 	if (sctx->is_dev_replace &&
-	    atomic_read(&sctx->wr_ctx.flush_all_writes)) {
-		mutex_lock(&sctx->wr_ctx.wr_lock);
+	    atomic_read(&sctx->flush_all_writes)) {
+		mutex_lock(&sctx->wr_lock);
 		scrub_wr_submit(sctx);
-		mutex_unlock(&sctx->wr_ctx.wr_lock);
+		mutex_unlock(&sctx->wr_lock);
 	}
 
 	scrub_pending_bio_dec(sctx);
@@ -3303,9 +3298,9 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
 						logic_end - logic_start);
 	scrub_parity_put(sparity);
 	scrub_submit(sctx);
-	mutex_lock(&sctx->wr_ctx.wr_lock);
+	mutex_lock(&sctx->wr_lock);
 	scrub_wr_submit(sctx);
-	mutex_unlock(&sctx->wr_ctx.wr_lock);
+	mutex_unlock(&sctx->wr_lock);
 
 	btrfs_release_path(path);
 	return ret < 0 ? ret : 0;
@@ -3461,14 +3456,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 		 */
 		if (atomic_read(&fs_info->scrub_pause_req)) {
 			/* push queued extents */
-			atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
+			atomic_set(&sctx->flush_all_writes, 1);
 			scrub_submit(sctx);
-			mutex_lock(&sctx->wr_ctx.wr_lock);
+			mutex_lock(&sctx->wr_lock);
 			scrub_wr_submit(sctx);
-			mutex_unlock(&sctx->wr_ctx.wr_lock);
+			mutex_unlock(&sctx->wr_lock);
 			wait_event(sctx->list_wait,
 				   atomic_read(&sctx->bios_in_flight) == 0);
-			atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
+			atomic_set(&sctx->flush_all_writes, 0);
 			scrub_blocked_if_needed(fs_info);
 		}
 
@@ -3675,9 +3670,9 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 out:
 	/* push queued extents */
 	scrub_submit(sctx);
-	mutex_lock(&sctx->wr_ctx.wr_lock);
+	mutex_lock(&sctx->wr_lock);
 	scrub_wr_submit(sctx);
-	mutex_unlock(&sctx->wr_ctx.wr_lock);
+	mutex_unlock(&sctx->wr_lock);
 
 	blk_finish_plug(&plug);
 	btrfs_free_path(path);
@@ -3914,11 +3909,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 		 * write requests are really completed when bios_in_flight
 		 * changes to 0.
 		 */
-		atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
+		atomic_set(&sctx->flush_all_writes, 1);
 		scrub_submit(sctx);
-		mutex_lock(&sctx->wr_ctx.wr_lock);
+		mutex_lock(&sctx->wr_lock);
 		scrub_wr_submit(sctx);
-		mutex_unlock(&sctx->wr_ctx.wr_lock);
+		mutex_unlock(&sctx->wr_lock);
 
 		wait_event(sctx->list_wait,
 			   atomic_read(&sctx->bios_in_flight) == 0);
@@ -3932,7 +3927,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 		 */
 		wait_event(sctx->list_wait,
 			   atomic_read(&sctx->workers_pending) == 0);
-		atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
+		atomic_set(&sctx->flush_all_writes, 0);
 
 		scrub_pause_off(fs_info);
 
@@ -4637,7 +4632,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
 	struct btrfs_device *dev;
 	int ret;
 
-	dev = sctx->wr_ctx.tgtdev;
+	dev = sctx->wr_tgtdev;
 	if (!dev)
 		return -EIO;
 	if (!dev->bdev) {
-- 
2.12.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 05/10] btrfs: scrub: remove pages_per_wr_bio from scrub context
  2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
                   ` (3 preceding siblings ...)
  2017-05-16 17:10 ` [PATCH 04/10] btrfs: scrub: embed scrub_wr_ctx into scrub context David Sterba
@ 2017-05-16 17:10 ` David Sterba
  2017-05-16 17:10 ` [PATCH 06/10] btrfs: scrub: use bool for flush_all_writes David Sterba
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

The only purpose seems to store SCRUB_PAGES_PER_WR_BIO, we can use the
constant directly.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/scrub.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f0c7ec38e33c..2c1d344c8edd 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -182,7 +182,6 @@ struct scrub_ctx {
 
 	struct scrub_bio        *wr_curr_bio;
 	struct mutex            wr_lock;
-	int                     pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
 	atomic_t                flush_all_writes;
 	struct btrfs_device     *wr_tgtdev;
 
@@ -718,7 +717,6 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 	sctx->wr_curr_bio = NULL;
 	if (is_dev_replace) {
 		WARN_ON(!dev->bdev);
-		sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
 		sctx->wr_tgtdev = dev;
 		atomic_set(&sctx->flush_all_writes, 0);
 	}
@@ -1918,7 +1916,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 		bio = sbio->bio;
 		if (!bio) {
 			bio = btrfs_io_bio_alloc(GFP_KERNEL,
-					sctx->pages_per_wr_bio);
+					SCRUB_PAGES_PER_WR_BIO);
 			if (!bio) {
 				mutex_unlock(&sctx->wr_lock);
 				return -ENOMEM;
@@ -1955,7 +1953,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 	sbio->pagev[sbio->page_count] = spage;
 	scrub_page_get(spage);
 	sbio->page_count++;
-	if (sbio->page_count == sctx->pages_per_wr_bio)
+	if (sbio->page_count == SCRUB_PAGES_PER_WR_BIO)
 		scrub_wr_submit(sctx);
 	mutex_unlock(&sctx->wr_lock);
 
-- 
2.12.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 06/10] btrfs: scrub: use bool for flush_all_writes
  2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
                   ` (4 preceding siblings ...)
  2017-05-16 17:10 ` [PATCH 05/10] btrfs: scrub: remove pages_per_wr_bio from " David Sterba
@ 2017-05-16 17:10 ` David Sterba
  2017-05-19  0:42   ` Liu Bo
  2017-05-16 17:10 ` [PATCH 07/10] btrfs: scrub: use fs_info::sectorsize and drop it from scrub context David Sterba
                   ` (3 subsequent siblings)
  9 siblings, 1 reply; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

flush_all_writes is an atomic but does not use the semantics at all,
it's just on/off indicator, we can use bool.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/scrub.c | 18 ++++++++----------
 1 file changed, 8 insertions(+), 10 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2c1d344c8edd..8caf775fee0a 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -182,8 +182,8 @@ struct scrub_ctx {
 
 	struct scrub_bio        *wr_curr_bio;
 	struct mutex            wr_lock;
-	atomic_t                flush_all_writes;
 	struct btrfs_device     *wr_tgtdev;
+	bool                    flush_all_writes;
 
 	/*
 	 * statistics
@@ -718,7 +718,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 	if (is_dev_replace) {
 		WARN_ON(!dev->bdev);
 		sctx->wr_tgtdev = dev;
-		atomic_set(&sctx->flush_all_writes, 0);
+		sctx->flush_all_writes = false;
 	}
 
 	return sctx;
@@ -2410,8 +2410,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
 
 	scrub_block_put(sblock);
 
-	if (sctx->is_dev_replace &&
-	    atomic_read(&sctx->flush_all_writes)) {
+	if (sctx->is_dev_replace && sctx->flush_all_writes) {
 		mutex_lock(&sctx->wr_lock);
 		scrub_wr_submit(sctx);
 		mutex_unlock(&sctx->wr_lock);
@@ -2618,8 +2617,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
 	sctx->first_free = sbio->index;
 	spin_unlock(&sctx->list_lock);
 
-	if (sctx->is_dev_replace &&
-	    atomic_read(&sctx->flush_all_writes)) {
+	if (sctx->is_dev_replace && sctx->flush_all_writes) {
 		mutex_lock(&sctx->wr_lock);
 		scrub_wr_submit(sctx);
 		mutex_unlock(&sctx->wr_lock);
@@ -3454,14 +3452,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 		 */
 		if (atomic_read(&fs_info->scrub_pause_req)) {
 			/* push queued extents */
-			atomic_set(&sctx->flush_all_writes, 1);
+			sctx->flush_all_writes = true;
 			scrub_submit(sctx);
 			mutex_lock(&sctx->wr_lock);
 			scrub_wr_submit(sctx);
 			mutex_unlock(&sctx->wr_lock);
 			wait_event(sctx->list_wait,
 				   atomic_read(&sctx->bios_in_flight) == 0);
-			atomic_set(&sctx->flush_all_writes, 0);
+			sctx->flush_all_writes = false;
 			scrub_blocked_if_needed(fs_info);
 		}
 
@@ -3907,7 +3905,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 		 * write requests are really completed when bios_in_flight
 		 * changes to 0.
 		 */
-		atomic_set(&sctx->flush_all_writes, 1);
+		sctx->flush_all_writes = true;
 		scrub_submit(sctx);
 		mutex_lock(&sctx->wr_lock);
 		scrub_wr_submit(sctx);
@@ -3925,7 +3923,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 		 */
 		wait_event(sctx->list_wait,
 			   atomic_read(&sctx->workers_pending) == 0);
-		atomic_set(&sctx->flush_all_writes, 0);
+		sctx->flush_all_writes = false;
 
 		scrub_pause_off(fs_info);
 
-- 
2.12.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 07/10] btrfs: scrub: use fs_info::sectorsize and drop it from scrub context
  2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
                   ` (5 preceding siblings ...)
  2017-05-16 17:10 ` [PATCH 06/10] btrfs: scrub: use bool for flush_all_writes David Sterba
@ 2017-05-16 17:10 ` David Sterba
  2017-05-19  0:43   ` Liu Bo
  2017-05-16 17:10 ` [PATCH 08/10] btrfs: scrub: clean up division in __scrub_mark_bitmap David Sterba
                   ` (2 subsequent siblings)
  9 siblings, 1 reply; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

As we now have the node/block sizes in fs_info, we can use them and can
drop the local copies.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/scrub.c | 24 ++++++++++--------------
 1 file changed, 10 insertions(+), 14 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 8caf775fee0a..5b135df691fa 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -175,8 +175,6 @@ struct scrub_ctx {
 	atomic_t		cancel_req;
 	int			readonly;
 	int			pages_per_rd_bio;
-	u32			sectorsize;
-	u32			nodesize;
 
 	int			is_dev_replace;
 
@@ -700,8 +698,6 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 			sctx->bios[i]->next_free = -1;
 	}
 	sctx->first_free = 0;
-	sctx->nodesize = fs_info->nodesize;
-	sctx->sectorsize = fs_info->sectorsize;
 	atomic_set(&sctx->bios_in_flight, 0);
 	atomic_set(&sctx->workers_pending, 0);
 	atomic_set(&sctx->cancel_req, 0);
@@ -2072,7 +2068,7 @@ static int scrub_checksum_data(struct scrub_block *sblock)
 	page = sblock->pagev[0]->page;
 	buffer = kmap_atomic(page);
 
-	len = sctx->sectorsize;
+	len = sctx->fs_info->sectorsize;
 	index = 0;
 	for (;;) {
 		u64 l = min_t(u64, len, PAGE_SIZE);
@@ -2137,7 +2133,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
 		   BTRFS_UUID_SIZE))
 		sblock->header_error = 1;
 
-	len = sctx->nodesize - BTRFS_CSUM_SIZE;
+	len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
 	mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
 	p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
 	index = 0;
@@ -2715,8 +2711,8 @@ static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
 	if (!sum)
 		return 0;
 
-	index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
-	num_sectors = sum->len / sctx->sectorsize;
+	index = ((u32)(logical - sum->bytenr)) / sctx->fs_info->sectorsize;
+	num_sectors = sum->len / sctx->fs_info->sectorsize;
 	memcpy(csum, sum->sums + index, sctx->csum_size);
 	if (index == num_sectors - 1) {
 		list_del(&sum->list);
@@ -2735,19 +2731,19 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
 	u32 blocksize;
 
 	if (flags & BTRFS_EXTENT_FLAG_DATA) {
-		blocksize = sctx->sectorsize;
+		blocksize = sctx->fs_info->sectorsize;
 		spin_lock(&sctx->stat_lock);
 		sctx->stat.data_extents_scrubbed++;
 		sctx->stat.data_bytes_scrubbed += len;
 		spin_unlock(&sctx->stat_lock);
 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
-		blocksize = sctx->nodesize;
+		blocksize = sctx->fs_info->nodesize;
 		spin_lock(&sctx->stat_lock);
 		sctx->stat.tree_extents_scrubbed++;
 		sctx->stat.tree_bytes_scrubbed += len;
 		spin_unlock(&sctx->stat_lock);
 	} else {
-		blocksize = sctx->sectorsize;
+		blocksize = sctx->fs_info->sectorsize;
 		WARN_ON(1);
 	}
 
@@ -2881,11 +2877,11 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
 	}
 
 	if (flags & BTRFS_EXTENT_FLAG_DATA) {
-		blocksize = sctx->sectorsize;
+		blocksize = sctx->fs_info->sectorsize;
 	} else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
-		blocksize = sctx->nodesize;
+		blocksize = sctx->fs_info->nodesize;
 	} else {
-		blocksize = sctx->sectorsize;
+		blocksize = sctx->fs_info->sectorsize;
 		WARN_ON(1);
 	}
 
-- 
2.12.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 08/10] btrfs: scrub: clean up division in __scrub_mark_bitmap
  2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
                   ` (6 preceding siblings ...)
  2017-05-16 17:10 ` [PATCH 07/10] btrfs: scrub: use fs_info::sectorsize and drop it from scrub context David Sterba
@ 2017-05-16 17:10 ` David Sterba
  2017-05-16 17:10 ` [PATCH 09/10] btrfs: scrub: clean up division in scrub_find_csum David Sterba
  2017-05-16 17:10 ` [PATCH 10/10] btrfs: scrub: simplify srrub worker initialization David Sterba
  9 siblings, 0 replies; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

Use proper helpers for 64bit division and then cast to narrower type.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/scrub.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 5b135df691fa..a2366f041f47 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2627,7 +2627,8 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
 				       u64 start, u64 len)
 {
 	u64 offset;
-	int nsectors;
+	u64 nsectors64;
+	u32 nsectors;
 	int sectorsize = sparity->sctx->fs_info->sectorsize;
 
 	if (len >= sparity->stripe_len) {
@@ -2638,7 +2639,10 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
 	start -= sparity->logic_start;
 	start = div64_u64_rem(start, sparity->stripe_len, &offset);
 	offset = div_u64(offset, sectorsize);
-	nsectors = (int)len / sectorsize;
+	nsectors64 = div_u64(len, sectorsize);
+
+	ASSERT(nsectors64 < UINT_MAX);
+	nsectors = (u32)nsectors64;
 
 	if (offset + nsectors <= sparity->nsectors) {
 		bitmap_set(bitmap, offset, nsectors);
-- 
2.12.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 09/10] btrfs: scrub: clean up division in scrub_find_csum
  2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
                   ` (7 preceding siblings ...)
  2017-05-16 17:10 ` [PATCH 08/10] btrfs: scrub: clean up division in __scrub_mark_bitmap David Sterba
@ 2017-05-16 17:10 ` David Sterba
  2017-05-16 17:10 ` [PATCH 10/10] btrfs: scrub: simplify srrub worker initialization David Sterba
  9 siblings, 0 replies; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

Use proper helpers for 64bit division.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/scrub.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index a2366f041f47..6c8c4535dc43 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2715,7 +2715,9 @@ static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
 	if (!sum)
 		return 0;
 
-	index = ((u32)(logical - sum->bytenr)) / sctx->fs_info->sectorsize;
+	index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
+	ASSERT(index < UINT_MAX);
+
 	num_sectors = sum->len / sctx->fs_info->sectorsize;
 	memcpy(csum, sum->sums + index, sctx->csum_size);
 	if (index == num_sectors - 1) {
-- 
2.12.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 10/10] btrfs: scrub: simplify srrub worker initialization
  2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
                   ` (8 preceding siblings ...)
  2017-05-16 17:10 ` [PATCH 09/10] btrfs: scrub: clean up division in scrub_find_csum David Sterba
@ 2017-05-16 17:10 ` David Sterba
  9 siblings, 0 replies; 16+ messages in thread
From: David Sterba @ 2017-05-16 17:10 UTC (permalink / raw)
  To: linux-btrfs; +Cc: David Sterba

Minor simplification, merge calls to one.

Signed-off-by: David Sterba <dsterba@suse.com>
---
 fs/btrfs/scrub.c | 10 ++--------
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6c8c4535dc43..59b053feb42e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -4026,14 +4026,8 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
 	int max_active = fs_info->thread_pool_size;
 
 	if (fs_info->scrub_workers_refcnt == 0) {
-		if (is_dev_replace)
-			fs_info->scrub_workers =
-				btrfs_alloc_workqueue(fs_info, "scrub", flags,
-						      1, 4);
-		else
-			fs_info->scrub_workers =
-				btrfs_alloc_workqueue(fs_info, "scrub", flags,
-						      max_active, 4);
+		fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
+				flags, is_dev_replace ? 1 : max_active, 4);
 		if (!fs_info->scrub_workers)
 			goto fail_scrub_workers;
 
-- 
2.12.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 03/10] btrfs: scrub: simplify cleanup of wr_ctx in scrub_free_ctx
  2017-05-16 17:10 ` [PATCH 03/10] btrfs: scrub: simplify cleanup of wr_ctx in scrub_free_ctx David Sterba
@ 2017-05-19  0:25   ` Liu Bo
  0 siblings, 0 replies; 16+ messages in thread
From: Liu Bo @ 2017-05-19  0:25 UTC (permalink / raw)
  To: David Sterba; +Cc: linux-btrfs

On Tue, May 16, 2017 at 07:10:29PM +0200, David Sterba wrote:
> We don't need to take the mutex and zero out wr_cur_bio, as this is
> called after the scrub finished.
> 
> Signed-off-by: David Sterba <dsterba@suse.com>

Looks good.

Reviewed-by: Liu Bo <bo.li.liu@oracle.com>

Thanks,

-liubo

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 06/10] btrfs: scrub: use bool for flush_all_writes
  2017-05-16 17:10 ` [PATCH 06/10] btrfs: scrub: use bool for flush_all_writes David Sterba
@ 2017-05-19  0:42   ` Liu Bo
  2017-05-19 13:08     ` David Sterba
  0 siblings, 1 reply; 16+ messages in thread
From: Liu Bo @ 2017-05-19  0:42 UTC (permalink / raw)
  To: David Sterba; +Cc: linux-btrfs

On Tue, May 16, 2017 at 07:10:38PM +0200, David Sterba wrote:
> flush_all_writes is an atomic but does not use the semantics at all,
> it's just on/off indicator, we can use bool.
> 

It might use atomic to avoid reordering, but I'm not sure if
reordering could really happen here.

Thanks,

-liubo

> Signed-off-by: David Sterba <dsterba@suse.com>
> ---
>  fs/btrfs/scrub.c | 18 ++++++++----------
>  1 file changed, 8 insertions(+), 10 deletions(-)
> 
> diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
> index 2c1d344c8edd..8caf775fee0a 100644
> --- a/fs/btrfs/scrub.c
> +++ b/fs/btrfs/scrub.c
> @@ -182,8 +182,8 @@ struct scrub_ctx {
>  
>  	struct scrub_bio        *wr_curr_bio;
>  	struct mutex            wr_lock;
> -	atomic_t                flush_all_writes;
>  	struct btrfs_device     *wr_tgtdev;
> +	bool                    flush_all_writes;
>  
>  	/*
>  	 * statistics
> @@ -718,7 +718,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
>  	if (is_dev_replace) {
>  		WARN_ON(!dev->bdev);
>  		sctx->wr_tgtdev = dev;
> -		atomic_set(&sctx->flush_all_writes, 0);
> +		sctx->flush_all_writes = false;
>  	}
>  
>  	return sctx;
> @@ -2410,8 +2410,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
>  
>  	scrub_block_put(sblock);
>  
> -	if (sctx->is_dev_replace &&
> -	    atomic_read(&sctx->flush_all_writes)) {
> +	if (sctx->is_dev_replace && sctx->flush_all_writes) {
>  		mutex_lock(&sctx->wr_lock);
>  		scrub_wr_submit(sctx);
>  		mutex_unlock(&sctx->wr_lock);
> @@ -2618,8 +2617,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
>  	sctx->first_free = sbio->index;
>  	spin_unlock(&sctx->list_lock);
>  
> -	if (sctx->is_dev_replace &&
> -	    atomic_read(&sctx->flush_all_writes)) {
> +	if (sctx->is_dev_replace && sctx->flush_all_writes) {
>  		mutex_lock(&sctx->wr_lock);
>  		scrub_wr_submit(sctx);
>  		mutex_unlock(&sctx->wr_lock);
> @@ -3454,14 +3452,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
>  		 */
>  		if (atomic_read(&fs_info->scrub_pause_req)) {
>  			/* push queued extents */
> -			atomic_set(&sctx->flush_all_writes, 1);
> +			sctx->flush_all_writes = true;
>  			scrub_submit(sctx);
>  			mutex_lock(&sctx->wr_lock);
>  			scrub_wr_submit(sctx);
>  			mutex_unlock(&sctx->wr_lock);
>  			wait_event(sctx->list_wait,
>  				   atomic_read(&sctx->bios_in_flight) == 0);
> -			atomic_set(&sctx->flush_all_writes, 0);
> +			sctx->flush_all_writes = false;
>  			scrub_blocked_if_needed(fs_info);
>  		}
>  
> @@ -3907,7 +3905,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
>  		 * write requests are really completed when bios_in_flight
>  		 * changes to 0.
>  		 */
> -		atomic_set(&sctx->flush_all_writes, 1);
> +		sctx->flush_all_writes = true;
>  		scrub_submit(sctx);
>  		mutex_lock(&sctx->wr_lock);
>  		scrub_wr_submit(sctx);
> @@ -3925,7 +3923,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
>  		 */
>  		wait_event(sctx->list_wait,
>  			   atomic_read(&sctx->workers_pending) == 0);
> -		atomic_set(&sctx->flush_all_writes, 0);
> +		sctx->flush_all_writes = false;
>  
>  		scrub_pause_off(fs_info);
>  
> -- 
> 2.12.0
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-btrfs" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 07/10] btrfs: scrub: use fs_info::sectorsize and drop it from scrub context
  2017-05-16 17:10 ` [PATCH 07/10] btrfs: scrub: use fs_info::sectorsize and drop it from scrub context David Sterba
@ 2017-05-19  0:43   ` Liu Bo
  0 siblings, 0 replies; 16+ messages in thread
From: Liu Bo @ 2017-05-19  0:43 UTC (permalink / raw)
  To: David Sterba; +Cc: linux-btrfs

On Tue, May 16, 2017 at 07:10:41PM +0200, David Sterba wrote:
> As we now have the node/block sizes in fs_info, we can use them and can
> drop the local copies.
> 
> Signed-off-by: David Sterba <dsterba@suse.com>

Reviewed-by: Liu Bo <bo.li.liu@oracle.com>

Thanks,

-liubo

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 06/10] btrfs: scrub: use bool for flush_all_writes
  2017-05-19  0:42   ` Liu Bo
@ 2017-05-19 13:08     ` David Sterba
  2017-05-29 14:59       ` David Sterba
  0 siblings, 1 reply; 16+ messages in thread
From: David Sterba @ 2017-05-19 13:08 UTC (permalink / raw)
  To: Liu Bo; +Cc: linux-btrfs

On Thu, May 18, 2017 at 05:42:24PM -0700, Liu Bo wrote:
> On Tue, May 16, 2017 at 07:10:38PM +0200, David Sterba wrote:
> > flush_all_writes is an atomic but does not use the semantics at all,
> > it's just on/off indicator, we can use bool.
> > 
> 
> It might use atomic to avoid reordering, but I'm not sure if
> reordering could really happen here.

Ok, I'll have a look. It does not seem to rely on atomics/ordering though.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 06/10] btrfs: scrub: use bool for flush_all_writes
  2017-05-19 13:08     ` David Sterba
@ 2017-05-29 14:59       ` David Sterba
  0 siblings, 0 replies; 16+ messages in thread
From: David Sterba @ 2017-05-29 14:59 UTC (permalink / raw)
  To: dsterba, Liu Bo, linux-btrfs

On Fri, May 19, 2017 at 03:08:22PM +0200, David Sterba wrote:
> On Thu, May 18, 2017 at 05:42:24PM -0700, Liu Bo wrote:
> > On Tue, May 16, 2017 at 07:10:38PM +0200, David Sterba wrote:
> > > flush_all_writes is an atomic but does not use the semantics at all,
> > > it's just on/off indicator, we can use bool.
> > > 
> > 
> > It might use atomic to avoid reordering, but I'm not sure if
> > reordering could really happen here.
> 
> Ok, I'll have a look. It does not seem to rely on atomics/ordering though.

The semantics of atomic_t to not lose concurrent increment/decrement is
not used here. The variable is always used within the following patterns:

	set to 1
	submit writes
	wait for completion
	set to 0

or

	if set: submit writes

The potential ordering issues, eg. when the worker thread that just
reads the value will not see the value 1 from other thread would need to
keep the state out of sync overal several mutex lock/unlock calls, that
imply a barrier anyway.

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2017-05-29 15:00 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-05-16 17:10 [PATCH 00/10] Btrfs, minor scrub code cleanups David Sterba
2017-05-16 17:10 ` [PATCH 01/10] btrfs: scrub: inline helper scrub_setup_wr_ctx David Sterba
2017-05-16 17:10 ` [PATCH 02/10] btrfs: scrub: inline helper scrub_free_wr_ctx David Sterba
2017-05-16 17:10 ` [PATCH 03/10] btrfs: scrub: simplify cleanup of wr_ctx in scrub_free_ctx David Sterba
2017-05-19  0:25   ` Liu Bo
2017-05-16 17:10 ` [PATCH 04/10] btrfs: scrub: embed scrub_wr_ctx into scrub context David Sterba
2017-05-16 17:10 ` [PATCH 05/10] btrfs: scrub: remove pages_per_wr_bio from " David Sterba
2017-05-16 17:10 ` [PATCH 06/10] btrfs: scrub: use bool for flush_all_writes David Sterba
2017-05-19  0:42   ` Liu Bo
2017-05-19 13:08     ` David Sterba
2017-05-29 14:59       ` David Sterba
2017-05-16 17:10 ` [PATCH 07/10] btrfs: scrub: use fs_info::sectorsize and drop it from scrub context David Sterba
2017-05-19  0:43   ` Liu Bo
2017-05-16 17:10 ` [PATCH 08/10] btrfs: scrub: clean up division in __scrub_mark_bitmap David Sterba
2017-05-16 17:10 ` [PATCH 09/10] btrfs: scrub: clean up division in scrub_find_csum David Sterba
2017-05-16 17:10 ` [PATCH 10/10] btrfs: scrub: simplify srrub worker initialization David Sterba

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).