From: Bart Van Assche <bvanassche@acm.org>
To: James Bottomley <James.Bottomley@HansenPartnership.com>,
Christoph Hellwig <hch@lst.de>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>,
Jens Axboe <axboe@kernel.dk>, Jaegeuk Kim <jaegeuk@kernel.org>,
alim.akhtar@samsung.com, avri.altman@wdc.com,
linux-scsi@vger.kernel.org, linux-block@vger.kernel.org,
Daejun Park <daejun7.park@samsung.com>
Subject: Re: [PATCH] scsi: ufs: mark HPB support as BROKEN
Date: Thu, 28 Oct 2021 13:21:42 -0700 [thread overview]
Message-ID: <a6af2ce7-4a03-ab0c-67cd-c58022e5ded1@acm.org> (raw)
In-Reply-To: <b2bcc13ccdc584962128a69fa5992936068e1a9b.camel@HansenPartnership.com>
On 10/27/21 5:20 AM, James Bottomley wrote:
> We don't have to revert it entirely, we can just remove the API since
> it's in an optimization path. The below patch does exactly that. It's
> compile tested but I don't have hardware so I've no idea if it works;
> Daejun can you please test it as a matter of urgency since we have to
> get this in before the end of the week.
>
> James
>
> ---
>
> diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
> index 66b19500844e..1c89eafe0c1d 100644
> --- a/drivers/scsi/ufs/ufshpb.c
> +++ b/drivers/scsi/ufs/ufshpb.c
> @@ -365,148 +365,6 @@ static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
> cdb[9] = 0x00; /* Control = 0x00 */
> }
>
> -static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
> -{
> - struct ufshpb_req *pre_req;
> -
> - if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
> - dev_info(&hpb->sdev_ufs_lu->sdev_dev,
> - "pre_req throttle. inflight %d throttle %d",
> - hpb->num_inflight_pre_req, hpb->throttle_pre_req);
> - return NULL;
> - }
> -
> - pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
> - struct ufshpb_req, list_req);
> - if (!pre_req) {
> - dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
> - return NULL;
> - }
> -
> - list_del_init(&pre_req->list_req);
> - hpb->num_inflight_pre_req++;
> -
> - return pre_req;
> -}
> -
> -static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
> - struct ufshpb_req *pre_req)
> -{
> - pre_req->req = NULL;
> - bio_reset(pre_req->bio);
> - list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
> - hpb->num_inflight_pre_req--;
> -}
> -
> -static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
> -{
> - struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
> - struct ufshpb_lu *hpb = pre_req->hpb;
> - unsigned long flags;
> -
> - if (error) {
> - struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
> - struct scsi_sense_hdr sshdr;
> -
> - dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
> - scsi_command_normalize_sense(cmd, &sshdr);
> - dev_err(&hpb->sdev_ufs_lu->sdev_dev,
> - "code %x sense_key %x asc %x ascq %x",
> - sshdr.response_code,
> - sshdr.sense_key, sshdr.asc, sshdr.ascq);
> - dev_err(&hpb->sdev_ufs_lu->sdev_dev,
> - "byte4 %x byte5 %x byte6 %x additional_len %x",
> - sshdr.byte4, sshdr.byte5,
> - sshdr.byte6, sshdr.additional_length);
> - }
> -
> - blk_mq_free_request(req);
> - spin_lock_irqsave(&hpb->rgn_state_lock, flags);
> - ufshpb_put_pre_req(pre_req->hpb, pre_req);
> - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> -}
> -
> -static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
> -{
> - struct ufshpb_lu *hpb = pre_req->hpb;
> - struct ufshpb_region *rgn;
> - struct ufshpb_subregion *srgn;
> - __be64 *addr;
> - int offset = 0;
> - int copied;
> - unsigned long lpn = pre_req->wb.lpn;
> - int rgn_idx, srgn_idx, srgn_offset;
> - unsigned long flags;
> -
> - addr = page_address(page);
> - ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
> -
> - spin_lock_irqsave(&hpb->rgn_state_lock, flags);
> -
> -next_offset:
> - rgn = hpb->rgn_tbl + rgn_idx;
> - srgn = rgn->srgn_tbl + srgn_idx;
> -
> - if (!ufshpb_is_valid_srgn(rgn, srgn))
> - goto mctx_error;
> -
> - if (!srgn->mctx)
> - goto mctx_error;
> -
> - copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
> - pre_req->wb.len - offset,
> - &addr[offset]);
> -
> - if (copied < 0)
> - goto mctx_error;
> -
> - offset += copied;
> - srgn_offset += copied;
> -
> - if (srgn_offset == hpb->entries_per_srgn) {
> - srgn_offset = 0;
> -
> - if (++srgn_idx == hpb->srgns_per_rgn) {
> - srgn_idx = 0;
> - rgn_idx++;
> - }
> - }
> -
> - if (offset < pre_req->wb.len)
> - goto next_offset;
> -
> - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> - return 0;
> -mctx_error:
> - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> - return -ENOMEM;
> -}
> -
> -static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
> - struct request_queue *q,
> - struct ufshpb_req *pre_req)
> -{
> - struct page *page = pre_req->wb.m_page;
> - struct bio *bio = pre_req->bio;
> - int entries_bytes, ret;
> -
> - if (!page)
> - return -ENOMEM;
> -
> - if (ufshpb_prep_entry(pre_req, page))
> - return -ENOMEM;
> -
> - entries_bytes = pre_req->wb.len * sizeof(__be64);
> -
> - ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
> - if (ret != entries_bytes) {
> - dev_err(&hpb->sdev_ufs_lu->sdev_dev,
> - "bio_add_pc_page fail: %d", ret);
> - return -ENOMEM;
> - }
> - return 0;
> -}
> -
> static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
> {
> if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
> @@ -514,88 +372,6 @@ static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
> return hpb->cur_read_id;
> }
>
> -static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
> - struct ufshpb_req *pre_req, int read_id)
> -{
> - struct scsi_device *sdev = cmd->device;
> - struct request_queue *q = sdev->request_queue;
> - struct request *req;
> - struct scsi_request *rq;
> - struct bio *bio = pre_req->bio;
> -
> - pre_req->hpb = hpb;
> - pre_req->wb.lpn = sectors_to_logical(cmd->device,
> - blk_rq_pos(scsi_cmd_to_rq(cmd)));
> - pre_req->wb.len = sectors_to_logical(cmd->device,
> - blk_rq_sectors(scsi_cmd_to_rq(cmd)));
> - if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
> - return -ENOMEM;
> -
> - req = pre_req->req;
> -
> - /* 1. request setup */
> - blk_rq_append_bio(req, bio);
> - req->rq_disk = NULL;
> - req->end_io_data = (void *)pre_req;
> - req->end_io = ufshpb_pre_req_compl_fn;
> -
> - /* 2. scsi_request setup */
> - rq = scsi_req(req);
> - rq->retries = 1;
> -
> - ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
> - read_id);
> - rq->cmd_len = scsi_command_size(rq->cmd);
> -
> - if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
> - return -EAGAIN;
> -
> - hpb->stats.pre_req_cnt++;
> -
> - return 0;
> -}
> -
> -static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
> - int *read_id)
> -{
> - struct ufshpb_req *pre_req;
> - struct request *req = NULL;
> - unsigned long flags;
> - int _read_id;
> - int ret = 0;
> -
> - req = blk_get_request(cmd->device->request_queue,
> - REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
> - if (IS_ERR(req))
> - return -EAGAIN;
> -
> - spin_lock_irqsave(&hpb->rgn_state_lock, flags);
> - pre_req = ufshpb_get_pre_req(hpb);
> - if (!pre_req) {
> - ret = -EAGAIN;
> - goto unlock_out;
> - }
> - _read_id = ufshpb_get_read_id(hpb);
> - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> -
> - pre_req->req = req;
> -
> - ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
> - if (ret)
> - goto free_pre_req;
> -
> - *read_id = _read_id;
> -
> - return ret;
> -free_pre_req:
> - spin_lock_irqsave(&hpb->rgn_state_lock, flags);
> - ufshpb_put_pre_req(hpb, pre_req);
> -unlock_out:
> - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> - blk_put_request(req);
> - return ret;
> -}
> -
> /*
> * This function will set up HPB read command using host-side L2P map data.
> */
> @@ -685,23 +461,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
> dev_err(hba->dev, "get ppn failed. err %d\n", err);
> return err;
> }
> - if (!ufshpb_is_legacy(hba) &&
> - ufshpb_is_required_wb(hpb, transfer_len)) {
> - err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
> - if (err) {
> - unsigned long timeout;
> -
> - timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
> - hpb->params.requeue_timeout_ms);
> -
> - if (time_before(jiffies, timeout))
> - return -EAGAIN;
> -
> - hpb->stats.miss_cnt++;
> - return 0;
> - }
> - }
> -
> ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
>
> hpb->stats.hit_cnt++;
Hi James,
The help with trying to find a solution is appreciated.
One of the software developers who is familiar with HPB explained to me that
READ BUFFER and WRITE BUFFER commands may be received in an arbitrary order
by UFS devices. The UFS HPB spec requires UFS devices to be able to stash up
to 128 such pairs. I'm concerned that leaving out WRITE BUFFER commands only
will break the HPB protocol in a subtle way.
Thanks,
Bart.
next prev parent reply other threads:[~2021-10-28 20:21 UTC|newest]
Thread overview: 43+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-26 7:12 [PATCH] scsi: ufs: mark HPB support as BROKEN Christoph Hellwig
2021-10-26 7:18 ` Hannes Reinecke
2021-10-26 7:24 ` Damien Le Moal
2021-10-26 13:04 ` James Bottomley
2021-10-26 16:36 ` Bart Van Assche
2021-10-26 17:19 ` James Bottomley
2021-10-26 17:25 ` Jens Axboe
2021-10-26 18:05 ` Bart Van Assche
2021-10-26 18:10 ` Jens Axboe
2021-10-26 18:18 ` James Bottomley
2021-10-26 18:27 ` Martin K. Petersen
2021-10-26 20:10 ` Bart Van Assche
2021-10-26 22:22 ` Daejun Park
2021-10-27 5:27 ` Christoph Hellwig
2021-10-27 12:20 ` James Bottomley
2021-10-28 20:21 ` Bart Van Assche [this message]
2021-10-28 20:33 ` James Bottomley
2021-10-28 20:53 ` Bart Van Assche
2021-10-28 21:14 ` Daejun Park
2021-10-27 13:16 ` Bart Van Assche
2021-10-27 14:12 ` Keith Busch
2021-10-27 14:38 ` Jens Axboe
2021-10-27 14:43 ` James Bottomley
2021-10-27 15:03 ` Ming Lei
2021-10-27 15:06 ` Jens Axboe
2021-10-27 15:16 ` Ming Lei
2021-10-27 15:44 ` Martin K. Petersen
2021-10-27 15:58 ` Ming Lei
2021-10-27 16:16 ` Keith Busch
2021-10-27 16:19 ` Jens Axboe
2021-10-28 0:42 ` Ming Lei
2021-10-28 1:10 ` Daejun Park
2021-10-28 2:07 ` Ming Lei
2021-10-27 16:59 ` Martin K. Petersen
2021-10-27 15:35 ` Martin K. Petersen
2021-10-27 15:40 ` Jens Axboe
2021-10-27 16:16 ` Martin K. Petersen
2021-10-27 17:01 ` Bart Van Assche
2021-10-28 1:32 ` Ming Lei
2021-10-29 10:53 ` Christoph Hellwig
2021-10-29 11:39 ` James Bottomley
2021-10-29 13:35 ` Avri Altman
2021-10-29 13:44 ` James Bottomley
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=a6af2ce7-4a03-ab0c-67cd-c58022e5ded1@acm.org \
--to=bvanassche@acm.org \
--cc=James.Bottomley@HansenPartnership.com \
--cc=alim.akhtar@samsung.com \
--cc=avri.altman@wdc.com \
--cc=axboe@kernel.dk \
--cc=daejun7.park@samsung.com \
--cc=hch@lst.de \
--cc=jaegeuk@kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-scsi@vger.kernel.org \
--cc=martin.petersen@oracle.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox