From: Chao Yu <chao@kernel.org>
To: martin.petersen@oracle.com, jejb@linux.ibm.com, hch@infradead.org
Cc: bvanassche@acm.org, linux-scsi@vger.kernel.org,
linux-kernel@vger.kernel.org, Chao Yu <chao@kernel.org>
Subject: Re: [PATCH v5] scsi: support packing multi-segment in UNMAP command
Date: Wed, 17 May 2023 11:49:03 +0800 [thread overview]
Message-ID: <b53321ab-679d-e007-6407-6bd00149948e@kernel.org> (raw)
In-Reply-To: <20230310123604.1820231-1-chao@kernel.org>
SCSI maintainers,
Any comments on this patch?
To Christoph, should I just drop HPB part change?
On 2023/3/10 20:36, Chao Yu wrote:
> As SCSI SBC4 specification section 5.30.2 describes that it can
> support unmapping one or more LBA range in single UNMAP command.
>
> However, previously we only pack one LBA range in UNMAP command
> by default no matter device gives the block limits that says it
> can support unmapping multiple LBA ranges with a single UNMAP
> command.
>
> This patch sets max_discard_segments config according to block
> limits of device, and supports unmapping multiple LBA ranges with
> a single UNMAP command.
>
> Signed-off-by: Chao Yu <chao@kernel.org>
> ---
> v5:
> - fix to assign cmd->cmnd[7] and cmd->cmnd[8] w/ data_len correctly.
> - fix to let ufshpb recognize and handle discard request which contains
> multiple ranges correctly.
> drivers/scsi/sd.c | 36 +++++++++++++++++++++++++-----------
> drivers/scsi/sd.h | 1 +
> drivers/ufs/core/ufshpb.c | 21 +++++++++++++++++++--
> 3 files changed, 45 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
> index 4bb87043e6db..1908b31c7342 100644
> --- a/drivers/scsi/sd.c
> +++ b/drivers/scsi/sd.c
> @@ -792,6 +792,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
> q->limits.discard_granularity =
> max(sdkp->physical_block_size,
> sdkp->unmap_granularity * logical_block_size);
> + blk_queue_max_discard_segments(q, min_t(u32, U16_MAX,
> + sdkp->max_unmap_block_desc_count));
> sdkp->provisioning_mode = mode;
>
> switch (mode) {
> @@ -851,9 +853,10 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
> struct scsi_device *sdp = cmd->device;
> struct request *rq = scsi_cmd_to_rq(cmd);
> struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
> - u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
> - u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
> - unsigned int data_len = 24;
> + unsigned short segments = blk_rq_nr_discard_segments(rq);
> + unsigned int data_len = 8 + 16 * segments;
> + unsigned int descriptor_offset = 8;
> + struct bio *bio;
> char *buf;
>
> buf = sd_set_special_bvec(rq, data_len);
> @@ -862,12 +865,20 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
>
> cmd->cmd_len = 10;
> cmd->cmnd[0] = UNMAP;
> - cmd->cmnd[8] = 24;
> + cmd->cmnd[7] = data_len >> 8;
> + cmd->cmnd[8] = data_len & 0xff;
> +
> + put_unaligned_be16(6 + 16 * segments, &buf[0]);
> + put_unaligned_be16(16 * segments, &buf[2]);
>
> - put_unaligned_be16(6 + 16, &buf[0]);
> - put_unaligned_be16(16, &buf[2]);
> - put_unaligned_be64(lba, &buf[8]);
> - put_unaligned_be32(nr_blocks, &buf[16]);
> + __rq_for_each_bio(bio, rq) {
> + u64 lba = sectors_to_logical(sdp, bio->bi_iter.bi_sector);
> + u32 nr_blocks = sectors_to_logical(sdp, bio_sectors(bio));
> +
> + put_unaligned_be64(lba, &buf[descriptor_offset]);
> + put_unaligned_be32(nr_blocks, &buf[descriptor_offset + 8]);
> + descriptor_offset += 16;
> + }
>
> cmd->allowed = sdkp->max_retries;
> cmd->transfersize = data_len;
> @@ -2917,7 +2928,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
> sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
>
> if (vpd->len >= 64) {
> - unsigned int lba_count, desc_count;
> + unsigned int lba_count;
>
> sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
>
> @@ -2925,9 +2936,12 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
> goto out;
>
> lba_count = get_unaligned_be32(&vpd->data[20]);
> - desc_count = get_unaligned_be32(&vpd->data[24]);
>
> - if (lba_count && desc_count)
> + /* Extract the MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT. */
> + sdkp->max_unmap_block_desc_count =
> + get_unaligned_be32(&vpd->data[24]);
> +
> + if (lba_count && sdkp->max_unmap_block_desc_count)
> sdkp->max_unmap_blocks = lba_count;
>
> sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
> diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
> index 5eea762f84d1..e7c51d23395b 100644
> --- a/drivers/scsi/sd.h
> +++ b/drivers/scsi/sd.h
> @@ -119,6 +119,7 @@ struct scsi_disk {
> u32 opt_xfer_blocks;
> u32 max_ws_blocks;
> u32 max_unmap_blocks;
> + u32 max_unmap_block_desc_count;
> u32 unmap_granularity;
> u32 unmap_alignment;
> u32 index;
> diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
> index a46a7666c891..327b29cf506f 100644
> --- a/drivers/ufs/core/ufshpb.c
> +++ b/drivers/ufs/core/ufshpb.c
> @@ -383,13 +383,30 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
> rgn = hpb->rgn_tbl + rgn_idx;
> srgn = rgn->srgn_tbl + srgn_idx;
>
> - /* If command type is WRITE or DISCARD, set bitmap as dirty */
> - if (ufshpb_is_write_or_discard(cmd)) {
> + /* If command type is WRITE, set bitmap as dirty */
> + if (op_is_write(req_op(scsi_cmd_to_rq(cmd)))) {
> ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
> transfer_len, true);
> return 0;
> }
>
> + /* If command type is DISCARD, set bitmap as dirty */
> + if (op_is_discard(req_op(scsi_cmd_to_rq(cmd)))) {
> + struct bio *bio;
> +
> + __rq_for_each_bio(bio, scsi_cmd_to_rq(cmd)) {
> + lpn = sectors_to_logical(cmd->device,
> + bio->bi_iter.bi_sector);
> + transfer_len = sectors_to_logical(cmd->device,
> + bio_sectors(bio));
> + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx,
> + &srgn_idx, &srgn_offset);
> + ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx,
> + srgn_offset, transfer_len, true);
> + }
> + return 0;
> + }
> +
> if (!ufshpb_is_supported_chunk(hpb, transfer_len))
> return 0;
>
next prev parent reply other threads:[~2023-05-17 3:49 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-10 12:36 [PATCH v5] scsi: support packing multi-segment in UNMAP command Chao Yu
2023-03-10 14:02 ` Christoph Hellwig
2023-03-13 1:44 ` Chao Yu
2023-03-20 13:00 ` Christoph Hellwig
2023-05-17 3:49 ` Chao Yu [this message]
2023-05-22 21:51 ` Martin K. Petersen
2023-06-25 7:07 ` Chao Yu
2023-06-29 2:02 ` Martin K. Petersen
2023-06-29 2:04 ` Chao Yu
2023-07-25 9:19 ` Chao Yu
2023-08-08 13:42 ` Chao Yu
2023-08-08 14:04 ` Martin K. Petersen
2023-08-08 14:12 ` Chao Yu
2023-08-10 2:11 ` Chao Yu
2023-08-22 8:38 ` Chao Yu
2023-10-07 10:13 ` Chao Yu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=b53321ab-679d-e007-6407-6bd00149948e@kernel.org \
--to=chao@kernel.org \
--cc=bvanassche@acm.org \
--cc=hch@infradead.org \
--cc=jejb@linux.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-scsi@vger.kernel.org \
--cc=martin.petersen@oracle.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox