linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Chaitanya Kulkarni <kch@nvidia.com>
To: <minchan@kernel.org>, <senozhatsky@chromium.org>
Cc: <axboe@kernel.dk>, <linux-block@vger.kernel.org>,
	Chaitanya Kulkarni <kch@nvidia.com>
Subject: [PATCH 2/3] zram: consolidate zram_bio_read()_zram_bio_write()
Date: Fri, 12 May 2023 01:29:57 -0700	[thread overview]
Message-ID: <20230512082958.6550-3-kch@nvidia.com> (raw)
In-Reply-To: <20230512082958.6550-1-kch@nvidia.com>

zram_bio_read() and zram_bio_write() are 26 lines rach and share most of
the code except call to zram_bdev_read() and zram_bvec_write() calls.
Consolidate code into single zram_bio_rw() to remove the duplicate code
and an extra function that is only needed for 2 lines of code :-

1c1
< static void zram_bio_read(struct zram *zram, struct bio *bio)
---
> static void zram_bio_write(struct zram *zram, struct bio *bio)
13,14c13,14
< 		if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
< 			atomic64_inc(&zram->stats.failed_reads);
---
> 		if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
> 			atomic64_inc(&zram->stats.failed_writes);
18d17
< 		flush_dcache_page(bv.bv_page);

diff stats with this patch :-

 drivers/block/zram/zram_drv.c | 53 ++++++++++++-----------------------
 1 file changed, 18 insertions(+), 35 deletions(-)

Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
 drivers/block/zram/zram_drv.c | 53 ++++++++++++-----------------------
 1 file changed, 18 insertions(+), 35 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index b2e419f15f71..fc37419b3735 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1873,38 +1873,12 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
 	bio_endio(bio);
 }
 
-static void zram_bio_read(struct zram *zram, struct bio *bio)
-{
-	struct bvec_iter iter;
-	struct bio_vec bv;
-	unsigned long start_time;
-
-	start_time = bio_start_io_acct(bio);
-	bio_for_each_segment(bv, bio, iter) {
-		u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
-		u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
-				SECTOR_SHIFT;
-
-		if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
-			atomic64_inc(&zram->stats.failed_reads);
-			bio->bi_status = BLK_STS_IOERR;
-			break;
-		}
-		flush_dcache_page(bv.bv_page);
-
-		zram_slot_lock(zram, index);
-		zram_accessed(zram, index);
-		zram_slot_unlock(zram, index);
-	}
-	bio_end_io_acct(bio, start_time);
-	bio_endio(bio);
-}
-
-static void zram_bio_write(struct zram *zram, struct bio *bio)
+static void zram_bio_rw(struct zram *zram, struct bio *bio)
 {
 	struct bvec_iter iter;
 	struct bio_vec bv;
 	unsigned long start_time;
+	int ret;
 
 	start_time = bio_start_io_acct(bio);
 	bio_for_each_segment(bv, bio, iter) {
@@ -1912,10 +1886,21 @@ static void zram_bio_write(struct zram *zram, struct bio *bio)
 		u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
 				SECTOR_SHIFT;
 
-		if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
-			atomic64_inc(&zram->stats.failed_writes);
-			bio->bi_status = BLK_STS_IOERR;
-			break;
+		if (op_is_write(bio_op(bio))) {
+			ret = zram_bvec_write(zram, &bv, index, offset, bio);
+			if (ret < 0) {
+				atomic64_inc(&zram->stats.failed_writes);
+				bio->bi_status = BLK_STS_IOERR;
+				break;
+			}
+		} else {
+			ret = zram_bvec_read(zram, &bv, index, offset, bio);
+			if (ret < 0) {
+				atomic64_inc(&zram->stats.failed_reads);
+				bio->bi_status = BLK_STS_IOERR;
+				break;
+			}
+			flush_dcache_page(bv.bv_page);
 		}
 
 		zram_slot_lock(zram, index);
@@ -1935,10 +1920,8 @@ static void zram_submit_bio(struct bio *bio)
 
 	switch (bio_op(bio)) {
 	case REQ_OP_READ:
-		zram_bio_read(zram, bio);
-		break;
 	case REQ_OP_WRITE:
-		zram_bio_write(zram, bio);
+		zram_bio_rw(zram, bio);
 		break;
 	case REQ_OP_DISCARD:
 	case REQ_OP_WRITE_ZEROES:
-- 
2.40.0


  parent reply	other threads:[~2023-05-12  8:31 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-12  8:29 [PATCH 0/3] zram: queue flag nowait and mior cleanup Chaitanya Kulkarni
2023-05-12  8:29 ` [PATCH 1/3] zram: allow user to set QUEUE_FLAG_NOWAIT Chaitanya Kulkarni
2023-05-12 14:31   ` Christoph Hellwig
2023-05-12 14:34     ` Jens Axboe
2023-05-13  1:06       ` Chaitanya Kulkarni
2023-05-16  5:51         ` Chaitanya Kulkarni
2023-05-16 13:08           ` Sergey Senozhatsky
2023-05-16 20:41             ` Chaitanya Kulkarni
2023-05-16 20:43               ` Jens Axboe
2023-05-16 21:32                 ` Chaitanya Kulkarni
2023-05-16 22:03                   ` Jens Axboe
2023-05-13  1:05     ` Chaitanya Kulkarni
2023-05-12  8:29 ` Chaitanya Kulkarni [this message]
2023-05-12 14:32   ` [PATCH 2/3] zram: consolidate zram_bio_read()_zram_bio_write() Christoph Hellwig
2023-05-13  1:06     ` Chaitanya Kulkarni
2023-05-12  8:29 ` [PATCH 3/3] zram: add flush_dcache_page() call for write Chaitanya Kulkarni
2023-05-12 14:34   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230512082958.6550-3-kch@nvidia.com \
    --to=kch@nvidia.com \
    --cc=axboe@kernel.dk \
    --cc=linux-block@vger.kernel.org \
    --cc=minchan@kernel.org \
    --cc=senozhatsky@chromium.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).