linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: John Garry <john.g.garry@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>,
	axboe@kernel.dk, kbusch@kernel.org, sagi@grimberg.me,
	jejb@linux.ibm.com, martin.petersen@oracle.com,
	djwong@kernel.org, viro@zeniv.linux.org.uk, brauner@kernel.org,
	dchinner@redhat.com, jack@suse.cz, linux-block@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-nvme@lists.infradead.org,
	linux-xfs@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	tytso@mit.edu, jbongio@google.com, linux-scsi@vger.kernel.org,
	ming.lei@redhat.com, jaswin@linux.ibm.com, bvanassche@acm.org
Subject: Re: [PATCH v2 00/16] block atomic writes
Date: Thu, 14 Dec 2023 15:37:09 +0100	[thread overview]
Message-ID: <20231214143708.GA5331@lst.de> (raw)
In-Reply-To: <c729b03c-b1d1-4458-9983-113f8cd752cd@oracle.com>

On Wed, Dec 13, 2023 at 04:27:35PM +0000, John Garry wrote:
>>> Are there any patches yet for the change to always use SGLs for transfers
>>> larger than a single PRP?
>> No.

Here is the WIP version.  With that you'd need to make atomic writes
conditional on !ctrl->need_virt_boundary.

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8ebdfd623e0f78..e04faffd6551fe 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1889,7 +1889,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
 		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
 		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
 	}
-	blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
+	if (q == ctrl->admin_q || ctrl->need_virt_boundary)
+		blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
 	blk_queue_dma_alignment(q, 3);
 	blk_queue_write_cache(q, vwc, vwc);
 }
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index e7411dac00f725..aa98794a3ec53d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -262,6 +262,7 @@ enum nvme_ctrl_flags {
 struct nvme_ctrl {
 	bool comp_seen;
 	bool identified;
+	bool need_virt_boundary;
 	enum nvme_ctrl_state state;
 	spinlock_t lock;
 	struct mutex scan_lock;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 61af7ff1a9d6ba..a8d273b475cb40 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -60,8 +60,7 @@ MODULE_PARM_DESC(max_host_mem_size_mb,
 static unsigned int sgl_threshold = SZ_32K;
 module_param(sgl_threshold, uint, 0644);
 MODULE_PARM_DESC(sgl_threshold,
-		"Use SGLs when average request segment size is larger or equal to "
-		"this size. Use 0 to disable SGLs.");
+		"Use SGLs when > 0. Use 0 to disable SGLs.");
 
 #define NVME_PCI_MIN_QUEUE_SIZE 2
 #define NVME_PCI_MAX_QUEUE_SIZE 4095
@@ -504,23 +503,6 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
 	spin_unlock(&nvmeq->sq_lock);
 }
 
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
-				     int nseg)
-{
-	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
-	unsigned int avg_seg_size;
-
-	avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
-
-	if (!nvme_ctrl_sgl_supported(&dev->ctrl))
-		return false;
-	if (!nvmeq->qid)
-		return false;
-	if (!sgl_threshold || avg_seg_size < sgl_threshold)
-		return false;
-	return true;
-}
-
 static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
 {
 	const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
@@ -769,12 +751,14 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 		struct nvme_command *cmnd)
 {
+	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+	bool sgl_supported = nvme_ctrl_sgl_supported(&dev->ctrl) &&
+			nvmeq->qid && sgl_threshold;
 	blk_status_t ret = BLK_STS_RESOURCE;
 	int rc;
 
 	if (blk_rq_nr_phys_segments(req) == 1) {
-		struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
 		struct bio_vec bv = req_bvec(req);
 
 		if (!is_pci_p2pdma_page(bv.bv_page)) {
@@ -782,8 +766,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 				return nvme_setup_prp_simple(dev, req,
 							     &cmnd->rw, &bv);
 
-			if (nvmeq->qid && sgl_threshold &&
-			    nvme_ctrl_sgl_supported(&dev->ctrl))
+			if (sgl_supported)
 				return nvme_setup_sgl_simple(dev, req,
 							     &cmnd->rw, &bv);
 		}
@@ -806,7 +789,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 		goto out_free_sg;
 	}
 
-	if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
+	if (sgl_supported)
 		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
 	else
 		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
@@ -3036,6 +3019,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	result = nvme_init_ctrl_finish(&dev->ctrl, false);
 	if (result)
 		goto out_disable;
+	if (!nvme_ctrl_sgl_supported(&dev->ctrl))
+		dev->ctrl.need_virt_boundary = true;
 
 	nvme_dbbuf_dma_alloc(dev);
 
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 81e2621169e5d3..416a9fbcccfc74 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -838,6 +838,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 	error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
 	if (error)
 		goto out_quiesce_queue;
+	ctrl->ctrl.need_virt_boundary = true;
 
 	return 0;
 

  reply	other threads:[~2023-12-14 14:37 UTC|newest]

Thread overview: 73+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-12 11:08 [PATCH v2 00/16] block atomic writes John Garry
2023-12-12 11:08 ` [PATCH v2 01/16] block: Add atomic write operations to request_queue limits John Garry
2023-12-13  1:25   ` Ming Lei
2023-12-13  9:13     ` John Garry
2023-12-13 12:28       ` Ming Lei
2023-12-13 19:01         ` John Garry
2023-12-14  4:38         ` Martin K. Petersen
2023-12-14 13:46           ` Ming Lei
2023-12-14  4:34     ` Martin K. Petersen
2023-12-14 16:12     ` Christoph Hellwig
2023-12-12 11:08 ` [PATCH v2 02/16] block: Limit atomic writes according to bio and queue limits John Garry
2023-12-12 11:08 ` [PATCH v2 03/16] fs/bdev: Add atomic write support info to statx John Garry
2023-12-13 10:24   ` Jan Kara
2023-12-13 11:02     ` John Garry
2023-12-12 11:08 ` [PATCH v2 04/16] fs: Increase fmode_t size John Garry
2023-12-13 11:20   ` Jan Kara
2023-12-13 13:03     ` John Garry
2023-12-13 13:02   ` Christian Brauner
2023-12-13 13:15     ` John Garry
2023-12-13 16:03     ` Christoph Hellwig
2023-12-14  8:56       ` John Garry
2023-12-12 11:08 ` [PATCH v2 05/16] fs: Add RWF_ATOMIC and IOCB_ATOMIC flags for atomic write support John Garry
2023-12-13 13:31   ` Al Viro
2023-12-13 16:02     ` John Garry
2024-01-22  8:29   ` John Garry
2023-12-12 11:08 ` [PATCH v2 06/16] block: Add REQ_ATOMIC flag John Garry
2023-12-12 11:08 ` [PATCH v2 07/16] block: Pass blk_queue_get_max_sectors() a request pointer John Garry
2023-12-12 11:08 ` [PATCH v2 08/16] block: Limit atomic write IO size according to atomic_write_max_sectors John Garry
2023-12-15  2:27   ` Ming Lei
2023-12-15 13:55     ` John Garry
2023-12-12 11:08 ` [PATCH v2 09/16] block: Error an attempt to split an atomic write bio John Garry
2023-12-12 11:08 ` [PATCH v2 10/16] block: Add checks to merging of atomic writes John Garry
2023-12-12 11:08 ` [PATCH v2 11/16] block: Add fops atomic write support John Garry
2023-12-12 11:08 ` [PATCH v2 12/16] scsi: sd: Support reading atomic write properties from block limits VPD John Garry
2023-12-12 11:08 ` [PATCH v2 13/16] scsi: sd: Add WRITE_ATOMIC_16 support John Garry
2023-12-12 11:08 ` [PATCH v2 14/16] scsi: scsi_debug: Atomic write support John Garry
2023-12-12 11:08 ` [PATCH v2 15/16] nvme: Support atomic writes John Garry
2023-12-12 11:08 ` [PATCH v2 16/16] nvme: Ensure atomic writes will be executed atomically John Garry
2023-12-12 16:32 ` [PATCH v2 00/16] block atomic writes Christoph Hellwig
2023-12-13  9:32   ` John Garry
2023-12-13 15:44     ` Christoph Hellwig
2023-12-13 16:27       ` John Garry
2023-12-14 14:37         ` Christoph Hellwig [this message]
2023-12-14 15:46           ` John Garry
2023-12-18 22:50           ` Keith Busch
2023-12-19  5:14         ` Darrick J. Wong
2023-12-19  5:21           ` Christoph Hellwig
2023-12-19 12:41             ` John Garry
2023-12-19 15:17               ` Christoph Hellwig
2023-12-19 16:53                 ` John Garry
2023-12-21  6:50                   ` Christoph Hellwig
2023-12-21  9:49                     ` John Garry
2023-12-21 12:19                       ` Christoph Hellwig
2023-12-21 12:48                         ` John Garry
2023-12-21 12:57                           ` Christoph Hellwig
2023-12-21 13:18                             ` John Garry
2023-12-21 13:22                               ` Christoph Hellwig
2023-12-21 13:56                                 ` John Garry
2024-01-16 11:35                                 ` John Garry
2024-01-17 15:02                                   ` Christoph Hellwig
2024-01-17 16:16                                     ` John Garry
2024-01-09  9:55                     ` John Garry
2024-01-09 16:02                       ` Christoph Hellwig
2024-01-09 16:52                         ` John Garry
2024-01-09 23:04                       ` Dave Chinner
2024-01-10  8:55                         ` John Garry
2024-01-10  9:19                         ` Christoph Hellwig
2024-01-11  1:40                           ` Darrick J. Wong
2024-01-11  5:02                             ` Christoph Hellwig
2024-01-11  9:55                               ` John Garry
2024-01-11 14:45                                 ` Christoph Hellwig
2024-01-11 16:11                                   ` John Garry
2024-01-11 16:15                                     ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231214143708.GA5331@lst.de \
    --to=hch@lst.de \
    --cc=axboe@kernel.dk \
    --cc=brauner@kernel.org \
    --cc=bvanassche@acm.org \
    --cc=dchinner@redhat.com \
    --cc=djwong@kernel.org \
    --cc=jack@suse.cz \
    --cc=jaswin@linux.ibm.com \
    --cc=jbongio@google.com \
    --cc=jejb@linux.ibm.com \
    --cc=john.g.garry@oracle.com \
    --cc=kbusch@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=martin.petersen@oracle.com \
    --cc=ming.lei@redhat.com \
    --cc=sagi@grimberg.me \
    --cc=tytso@mit.edu \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).