From: Hannes Reinecke <hare@suse.de>
To: Nitesh Shetty <nj.shetty@samsung.com>,
Jens Axboe <axboe@kernel.dk>, Jonathan Corbet <corbet@lwn.net>,
Alasdair Kergon <agk@redhat.com>,
Mike Snitzer <snitzer@kernel.org>,
dm-devel@redhat.com, Keith Busch <kbusch@kernel.org>,
Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
Chaitanya Kulkarni <kch@nvidia.com>,
Alexander Viro <viro@zeniv.linux.org.uk>,
Christian Brauner <brauner@kernel.org>
Cc: martin.petersen@oracle.com, mcgrof@kernel.org,
gost.dev@samsung.com, Anuj Gupta <anuj20.g@samsung.com>,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-doc@vger.kernel.org, linux-nvme@lists.infradead.org,
linux-fsdevel@vger.kernel.org
Subject: Re: [PATCH v15 03/12] block: add copy offload support
Date: Thu, 7 Sep 2023 07:49:59 +0200 [thread overview]
Message-ID: <b0f3d320-047b-4bd8-a6fc-25b468caf5b3@suse.de> (raw)
In-Reply-To: <20230906163844.18754-4-nj.shetty@samsung.com>
On 9/6/23 18:38, Nitesh Shetty wrote:
> Introduce blkdev_copy_offload to perform copy offload.
> Issue REQ_OP_COPY_SRC with source info along with taking a plug.
> This flows till request layer and waits for dst bio to arrive.
> Issue REQ_OP_COPY_DST with destination info and this bio reaches request
> layer and merges with src request.
> For any reason, if a request comes to the driver with only one of src/dst
> bio, we fail the copy offload.
>
> Larger copy will be divided, based on max_copy_sectors limit.
>
> Signed-off-by: Anuj Gupta <anuj20.g@samsung.com>
> Signed-off-by: Nitesh Shetty <nj.shetty@samsung.com>
> ---
> block/blk-lib.c | 202 +++++++++++++++++++++++++++++++++++++++++
> include/linux/blkdev.h | 4 +
> 2 files changed, 206 insertions(+)
>
> diff --git a/block/blk-lib.c b/block/blk-lib.c
> index e59c3069e835..d22e1e7417ca 100644
> --- a/block/blk-lib.c
> +++ b/block/blk-lib.c
> @@ -10,6 +10,22 @@
>
> #include "blk.h"
>
> +/* Keeps track of all outstanding copy IO */
> +struct blkdev_copy_io {
> + atomic_t refcount;
> + ssize_t copied;
> + int status;
> + struct task_struct *waiter;
> + void (*endio)(void *private, int status, ssize_t copied);
> + void *private;
> +};
> +
> +/* Keeps track of single outstanding copy offload IO */
> +struct blkdev_copy_offload_io {
> + struct blkdev_copy_io *cio;
> + loff_t offset;
> +};
> +
> static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
> {
> unsigned int discard_granularity = bdev_discard_granularity(bdev);
> @@ -115,6 +131,192 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
> }
> EXPORT_SYMBOL(blkdev_issue_discard);
>
> +static inline ssize_t blkdev_copy_sanity_check(struct block_device *bdev_in,
> + loff_t pos_in,
> + struct block_device *bdev_out,
> + loff_t pos_out, size_t len)
> +{
> + unsigned int align = max(bdev_logical_block_size(bdev_out),
> + bdev_logical_block_size(bdev_in)) - 1;
> +
> + if ((pos_in & align) || (pos_out & align) || (len & align) || !len ||
> + len >= BLK_COPY_MAX_BYTES)
> + return -EINVAL;
> +
> + return 0;
> +}
> +
> +static inline void blkdev_copy_endio(struct blkdev_copy_io *cio)
> +{
> + if (cio->endio) {
> + cio->endio(cio->private, cio->status, cio->copied);
> + kfree(cio);
> + } else {
> + struct task_struct *waiter = cio->waiter;
> +
> + WRITE_ONCE(cio->waiter, NULL);
> + blk_wake_io_task(waiter);
> + }
> +}
> +
> +/*
> + * This must only be called once all bios have been issued so that the refcount
> + * can only decrease. This just waits for all bios to complete.
> + * Returns the length of bytes copied or error
> + */
> +static ssize_t blkdev_copy_wait_io_completion(struct blkdev_copy_io *cio)
> +{
> + ssize_t ret;
> +
> + for (;;) {
> + __set_current_state(TASK_UNINTERRUPTIBLE);
> + if (!READ_ONCE(cio->waiter))
> + break;
> + blk_io_schedule();
> + }
> + __set_current_state(TASK_RUNNING);
> + ret = cio->copied;
> + kfree(cio);
> +
> + return ret;
> +}
> +
> +static void blkdev_copy_offload_dst_endio(struct bio *bio)
> +{
> + struct blkdev_copy_offload_io *offload_io = bio->bi_private;
> + struct blkdev_copy_io *cio = offload_io->cio;
> +
> + if (bio->bi_status) {
> + cio->copied = min_t(ssize_t, offload_io->offset, cio->copied);
> + if (!cio->status)
> + cio->status = blk_status_to_errno(bio->bi_status);
> + }
> + bio_put(bio);
> +
> + if (atomic_dec_and_test(&cio->refcount))
> + blkdev_copy_endio(cio);
> +}
> +
> +/*
> + * @bdev: block device
> + * @pos_in: source offset
> + * @pos_out: destination offset
> + * @len: length in bytes to be copied
> + * @endio: endio function to be called on completion of copy operation,
> + * for synchronous operation this should be NULL
> + * @private: endio function will be called with this private data,
> + * for synchronous operation this should be NULL
> + * @gfp_mask: memory allocation flags (for bio_alloc)
> + *
> + * For synchronous operation returns the length of bytes copied or error
> + * For asynchronous operation returns -EIOCBQUEUED or error
> + *
> + * Description:
> + * Copy source offset to destination offset within block device, using
> + * device's native copy offload feature. This function can fail, and
> + * in that case the caller can fallback to emulation.
> + * We perform copy operation using 2 bio's.
> + * 1. We take a plug and send a REQ_OP_COPY_SRC bio along with source
> + * sector and length. Once this bio reaches request layer, we form a
> + * request and wait for dst bio to arrive.
> + * 2. We issue REQ_OP_COPY_DST bio along with destination sector, length.
> + * Once this bio reaches request layer and find a request with previously
> + * sent source info we merge the destination bio and return.
> + * 3. Release the plug and request is sent to driver
> + * This design works only for drivers with request queue.
> + */
> +ssize_t blkdev_copy_offload(struct block_device *bdev, loff_t pos_in,
> + loff_t pos_out, size_t len,
> + void (*endio)(void *, int, ssize_t),
> + void *private, gfp_t gfp)
> +{
> + struct blkdev_copy_io *cio;
> + struct blkdev_copy_offload_io *offload_io;
> + struct bio *src_bio, *dst_bio;
> + ssize_t rem, chunk, ret;
> + ssize_t max_copy_bytes = bdev_max_copy_sectors(bdev) << SECTOR_SHIFT;
> + struct blk_plug plug;
> +
> + if (!max_copy_bytes)
> + return -EINVAL;
> +
> + ret = blkdev_copy_sanity_check(bdev, pos_in, bdev, pos_out, len);
> + if (ret)
> + return ret;
> +
> + cio = kzalloc(sizeof(*cio), GFP_KERNEL);
> + if (!cio)
> + return -ENOMEM;
> + atomic_set(&cio->refcount, 1);
> + cio->waiter = current;
> + cio->endio = endio;
> + cio->private = private;
> +
> + /*
> + * If there is a error, copied will be set to least successfully
> + * completed copied length
> + */
> + cio->copied = len;
> + for (rem = len; rem > 0; rem -= chunk) {
> + chunk = min(rem, max_copy_bytes);
> +
> + offload_io = kzalloc(sizeof(*offload_io), GFP_KERNEL);
> + if (!offload_io)
> + goto err_free_cio;
> + offload_io->cio = cio;
> + /*
> + * For partial completion, we use offload_io->offset to truncate
> + * successful copy length
> + */
> + offload_io->offset = len - rem;
> +
> + src_bio = bio_alloc(bdev, 0, REQ_OP_COPY_SRC, gfp);
> + if (!src_bio)
> + goto err_free_offload_io;
> + src_bio->bi_iter.bi_size = chunk;
> + src_bio->bi_iter.bi_sector = pos_in >> SECTOR_SHIFT;
> +
> + blk_start_plug(&plug);
> + dst_bio = blk_next_bio(src_bio, bdev, 0, REQ_OP_COPY_DST, gfp);
> + if (!dst_bio)
> + goto err_free_src_bio;
> + dst_bio->bi_iter.bi_size = chunk;
> + dst_bio->bi_iter.bi_sector = pos_out >> SECTOR_SHIFT;
> + dst_bio->bi_end_io = blkdev_copy_offload_dst_endio;
> + dst_bio->bi_private = offload_io;
> +
> + atomic_inc(&cio->refcount);
> + submit_bio(dst_bio);
> + blk_finish_plug(&plug);
> + pos_in += chunk;
> + pos_out += chunk;
> + }
> +
> + if (atomic_dec_and_test(&cio->refcount))
> + blkdev_copy_endio(cio);
> + if (cio->endio)
> + return -EIOCBQUEUED;
> +
> + return blkdev_copy_wait_io_completion(cio);
> +
> +err_free_src_bio:
> + bio_put(src_bio);
> +err_free_offload_io:
> + kfree(offload_io);
> +err_free_cio:
> + cio->copied = min_t(ssize_t, cio->copied, (len - rem));
> + cio->status = -ENOMEM;
> + if (rem == len) {
> + kfree(cio);
> + return cio->status;
> + }
> + if (cio->endio)
> + return cio->status;
> +
> + return blkdev_copy_wait_io_completion(cio);
> +}
> +EXPORT_SYMBOL_GPL(blkdev_copy_offload);
Hmm. That looks a bit odd. Why do you have to use wait_for_completion?
Can't you submit the 'src' bio, and then submit the 'dst' bio from the
endio handler of the 'src' bio?
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), Geschäftsführer: Ivo Totev, Andrew
Myers, Andrew McDonald, Martje Boudien Moerman
next prev parent reply other threads:[~2023-09-07 5:50 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20230906164238epcas5p4a511a029fb8ae8bbc36b750712ad64d5@epcas5p4.samsung.com>
2023-09-06 16:38 ` [PATCH v15 00/12] Implement copy offload support Nitesh Shetty
2023-09-06 16:38 ` [PATCH v15 01/12] block: Introduce queue limits and sysfs for copy-offload support Nitesh Shetty
2023-09-07 18:38 ` Luis Chamberlain
2023-09-06 16:38 ` [PATCH v15 02/12] Add infrastructure for copy offload in block and request layer Nitesh Shetty
2023-09-07 5:39 ` Hannes Reinecke
2023-09-07 7:09 ` Nitesh Shetty
2023-09-06 16:38 ` [PATCH v15 03/12] block: add copy offload support Nitesh Shetty
2023-09-07 5:49 ` Hannes Reinecke [this message]
2023-09-07 7:16 ` Nitesh Shetty
2023-09-08 5:55 ` Hannes Reinecke
2023-09-06 16:38 ` [PATCH v15 04/12] block: add emulation for copy Nitesh Shetty
2023-09-08 6:06 ` Hannes Reinecke
2023-09-11 7:09 ` Nitesh Shetty
2023-09-11 7:39 ` Hannes Reinecke
2023-09-11 10:20 ` Nitesh Shetty
2023-09-06 16:38 ` [PATCH v15 05/12] fs/read_write: Enable copy_file_range for block device Nitesh Shetty
2023-09-08 6:07 ` Hannes Reinecke
2023-09-06 16:38 ` [PATCH v15 06/12] fs, block: copy_file_range for def_blk_ops for direct " Nitesh Shetty
2023-09-08 6:08 ` Hannes Reinecke
2023-09-06 16:38 ` [PATCH v15 07/12] nvme: add copy offload support Nitesh Shetty
2023-09-08 6:09 ` Hannes Reinecke
2023-09-06 16:38 ` [PATCH v15 08/12] nvmet: add copy command support for bdev and file ns Nitesh Shetty
2023-09-08 6:11 ` Hannes Reinecke
2023-09-06 16:38 ` [PATCH v15 09/12] dm: Add support for copy offload Nitesh Shetty
2023-09-08 6:13 ` Hannes Reinecke
2023-09-11 7:07 ` Nitesh Shetty
2023-09-11 7:45 ` Hannes Reinecke
2023-09-06 16:38 ` [PATCH v15 10/12] dm: Enable copy offload for dm-linear target Nitesh Shetty
2023-09-08 6:14 ` Hannes Reinecke
2023-09-06 16:38 ` [PATCH v15 11/12] null: Enable trace capability for null block Nitesh Shetty
2023-09-08 6:14 ` Hannes Reinecke
2023-09-06 16:38 ` [PATCH v15 12/12] null_blk: add support for copy offload Nitesh Shetty
2023-09-06 22:01 ` kernel test robot
2023-09-06 22:58 ` kernel test robot
2023-09-08 6:16 ` Hannes Reinecke
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=b0f3d320-047b-4bd8-a6fc-25b468caf5b3@suse.de \
--to=hare@suse.de \
--cc=agk@redhat.com \
--cc=anuj20.g@samsung.com \
--cc=axboe@kernel.dk \
--cc=brauner@kernel.org \
--cc=corbet@lwn.net \
--cc=dm-devel@redhat.com \
--cc=gost.dev@samsung.com \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=kch@nvidia.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=martin.petersen@oracle.com \
--cc=mcgrof@kernel.org \
--cc=nj.shetty@samsung.com \
--cc=sagi@grimberg.me \
--cc=snitzer@kernel.org \
--cc=viro@zeniv.linux.org.uk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox