* [PATCH rfc 01/10] nvme-pci: Split __nvme_process_cq to poll and handle
2017-03-09 13:16 [PATCH rfc 00/10] non selective polling block interface Sagi Grimberg
@ 2017-03-09 13:16 ` Sagi Grimberg
2017-03-09 13:57 ` Johannes Thumshirn
2017-03-22 19:07 ` Christoph Hellwig
2017-03-09 13:16 ` [PATCH rfc 02/10] nvme-pci: Add budget to __nvme_process_cq Sagi Grimberg
` (8 subsequent siblings)
9 siblings, 2 replies; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-09 13:16 UTC (permalink / raw)
To: linux-block, linux-nvme, linux-rdma, target-devel
Just some rework to split the logic and make it slightly
more readable. This will help us to easily add the irq-poll
logic.
Also, introduce nvme_ring_cq_doorbell helper to mask out the
cq_vector validity check.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
drivers/nvme/host/pci.c | 109 +++++++++++++++++++++++++++++-------------------
1 file changed, 65 insertions(+), 44 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 26a5fd05fe88..d3f74fa40f26 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -71,7 +71,7 @@ struct nvme_dev;
struct nvme_queue;
static int nvme_reset(struct nvme_dev *dev);
-static void nvme_process_cq(struct nvme_queue *nvmeq);
+static int nvme_process_cq(struct nvme_queue *nvmeq);
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
/*
@@ -665,75 +665,96 @@ static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
}
-static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
+static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
{
- u16 head, phase;
+ if (likely(nvmeq->cq_vector >= 0))
+ writel(nvmeq->cq_head, nvmeq->q_db + nvmeq->dev->db_stride);
+}
- head = nvmeq->cq_head;
- phase = nvmeq->cq_phase;
+static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
+ struct nvme_completion *cqe)
+{
+ struct request *req;
- while (nvme_cqe_valid(nvmeq, head, phase)) {
- struct nvme_completion cqe = nvmeq->cqes[head];
- struct request *req;
+ if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
+ dev_warn(nvmeq->dev->ctrl.device,
+ "invalid id %d completed on queue %d\n",
+ cqe->command_id, le16_to_cpu(cqe->sq_id));
+ return;
+ }
- if (++head == nvmeq->q_depth) {
- head = 0;
- phase = !phase;
- }
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvmeq->qid == 0 &&
+ cqe->command_id >= NVME_AQ_BLKMQ_DEPTH)) {
+ nvme_complete_async_event(&nvmeq->dev->ctrl,
+ cqe->status, &cqe->result);
+ return;
+ }
- if (tag && *tag == cqe.command_id)
- *tag = -1;
+ req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
+ nvme_req(req)->result = cqe->result;
+ blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1);
+}
- if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
- dev_warn(nvmeq->dev->ctrl.device,
- "invalid id %d completed on queue %d\n",
- cqe.command_id, le16_to_cpu(cqe.sq_id));
- continue;
- }
+static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
+ struct nvme_completion *cqe)
+{
+ if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
+ *cqe = nvmeq->cqes[nvmeq->cq_head];
- /*
- * AEN requests are special as they don't time out and can
- * survive any kind of queue freeze and often don't respond to
- * aborts. We don't even bother to allocate a struct request
- * for them but rather special case them here.
- */
- if (unlikely(nvmeq->qid == 0 &&
- cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
- nvme_complete_async_event(&nvmeq->dev->ctrl,
- cqe.status, &cqe.result);
- continue;
+ if (++nvmeq->cq_head == nvmeq->q_depth) {
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = !nvmeq->cq_phase;
}
-
- req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
- nvme_req(req)->result = cqe.result;
- blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
+ return true;
}
+ return false;
+}
- if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
- return;
+static int __nvme_process_cq(struct nvme_queue *nvmeq, int *tag)
+{
+ struct nvme_completion cqe;
+ int consumed = 0;
- if (likely(nvmeq->cq_vector >= 0))
- writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
- nvmeq->cq_head = head;
- nvmeq->cq_phase = phase;
+ while (nvme_read_cqe(nvmeq, &cqe)) {
+ nvme_handle_cqe(nvmeq, &cqe);
+ consumed++;
- nvmeq->cqe_seen = 1;
+ if (tag && *tag == cqe.command_id) {
+ *tag = -1;
+ break;
+ }
+ }
+
+ if (consumed) {
+ nvme_ring_cq_doorbell(nvmeq);
+ nvmeq->cqe_seen = 1;
+ }
+
+ return consumed;
}
-static void nvme_process_cq(struct nvme_queue *nvmeq)
+static int nvme_process_cq(struct nvme_queue *nvmeq)
{
- __nvme_process_cq(nvmeq, NULL);
+ return __nvme_process_cq(nvmeq, NULL);
}
static irqreturn_t nvme_irq(int irq, void *data)
{
irqreturn_t result;
struct nvme_queue *nvmeq = data;
+
spin_lock(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
nvmeq->cqe_seen = 0;
spin_unlock(&nvmeq->q_lock);
+
return result;
}
--
2.7.4
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH rfc 01/10] nvme-pci: Split __nvme_process_cq to poll and handle
2017-03-09 13:16 ` [PATCH rfc 01/10] nvme-pci: Split __nvme_process_cq to poll and handle Sagi Grimberg
@ 2017-03-09 13:57 ` Johannes Thumshirn
2017-03-22 19:07 ` Christoph Hellwig
1 sibling, 0 replies; 30+ messages in thread
From: Johannes Thumshirn @ 2017-03-09 13:57 UTC (permalink / raw)
To: Sagi Grimberg, linux-block, linux-nvme, linux-rdma, target-devel
On 03/09/2017 02:16 PM, Sagi Grimberg wrote:
> Just some rework to split the logic and make it slightly
> more readable. This will help us to easily add the irq-poll
> logic.
>
> Also, introduce nvme_ring_cq_doorbell helper to mask out the
> cq_vector validity check.
>
> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
> ---
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
--
Johannes Thumshirn Storage
jthumshirn@suse.de +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH rfc 01/10] nvme-pci: Split __nvme_process_cq to poll and handle
2017-03-09 13:16 ` [PATCH rfc 01/10] nvme-pci: Split __nvme_process_cq to poll and handle Sagi Grimberg
2017-03-09 13:57 ` Johannes Thumshirn
@ 2017-03-22 19:07 ` Christoph Hellwig
1 sibling, 0 replies; 30+ messages in thread
From: Christoph Hellwig @ 2017-03-22 19:07 UTC (permalink / raw)
To: Sagi Grimberg; +Cc: linux-block, linux-nvme, linux-rdma, target-devel
Looks fine,
Reviewed-by: Christoph Hellwig <hch@lst.de>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH rfc 02/10] nvme-pci: Add budget to __nvme_process_cq
2017-03-09 13:16 [PATCH rfc 00/10] non selective polling block interface Sagi Grimberg
2017-03-09 13:16 ` [PATCH rfc 01/10] nvme-pci: Split __nvme_process_cq to poll and handle Sagi Grimberg
@ 2017-03-09 13:16 ` Sagi Grimberg
2017-03-09 13:46 ` Johannes Thumshirn
2017-03-22 19:08 ` Christoph Hellwig
2017-03-09 13:16 ` [PATCH rfc 03/10] nvme-pci: open-code polling logic in nvme_poll Sagi Grimberg
` (7 subsequent siblings)
9 siblings, 2 replies; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-09 13:16 UTC (permalink / raw)
To: linux-block, linux-nvme, linux-rdma, target-devel
Prepare to allow passing a batch size to nvme cq processing.
This patch does not change an functionality.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
drivers/nvme/host/pci.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d3f74fa40f26..4ed67f194cfd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -716,14 +716,15 @@ static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
return false;
}
-static int __nvme_process_cq(struct nvme_queue *nvmeq, int *tag)
+static int __nvme_process_cq(struct nvme_queue *nvmeq, int budget, int *tag)
{
struct nvme_completion cqe;
int consumed = 0;
while (nvme_read_cqe(nvmeq, &cqe)) {
nvme_handle_cqe(nvmeq, &cqe);
- consumed++;
+ if (++consumed == budget)
+ break;
if (tag && *tag == cqe.command_id) {
*tag = -1;
@@ -741,7 +742,7 @@ static int __nvme_process_cq(struct nvme_queue *nvmeq, int *tag)
static int nvme_process_cq(struct nvme_queue *nvmeq)
{
- return __nvme_process_cq(nvmeq, NULL);
+ return __nvme_process_cq(nvmeq, INT_MAX, NULL);
}
static irqreturn_t nvme_irq(int irq, void *data)
@@ -772,7 +773,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
spin_lock_irq(&nvmeq->q_lock);
- __nvme_process_cq(nvmeq, &tag);
+ __nvme_process_cq(nvmeq, INT_MAX, &tag);
spin_unlock_irq(&nvmeq->q_lock);
if (tag == -1)
--
2.7.4
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH rfc 02/10] nvme-pci: Add budget to __nvme_process_cq
2017-03-09 13:16 ` [PATCH rfc 02/10] nvme-pci: Add budget to __nvme_process_cq Sagi Grimberg
@ 2017-03-09 13:46 ` Johannes Thumshirn
2017-03-22 19:08 ` Christoph Hellwig
1 sibling, 0 replies; 30+ messages in thread
From: Johannes Thumshirn @ 2017-03-09 13:46 UTC (permalink / raw)
To: Sagi Grimberg, linux-block, linux-nvme, linux-rdma, target-devel
On 03/09/2017 02:16 PM, Sagi Grimberg wrote:
> Prepare to allow passing a batch size to nvme cq processing.
>
> This patch does not change an functionality.
>
> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
> ---
I think I already did this with the 1st RFC (this patch gives me a deja
vu moment), but:
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
--
Johannes Thumshirn Storage
jthumshirn@suse.de +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH rfc 02/10] nvme-pci: Add budget to __nvme_process_cq
2017-03-09 13:16 ` [PATCH rfc 02/10] nvme-pci: Add budget to __nvme_process_cq Sagi Grimberg
2017-03-09 13:46 ` Johannes Thumshirn
@ 2017-03-22 19:08 ` Christoph Hellwig
1 sibling, 0 replies; 30+ messages in thread
From: Christoph Hellwig @ 2017-03-22 19:08 UTC (permalink / raw)
To: Sagi Grimberg; +Cc: linux-block, linux-nvme, linux-rdma, target-devel
Looks good,
Reviewed-by: Christoph Hellwig <hch@lst.de>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH rfc 03/10] nvme-pci: open-code polling logic in nvme_poll
2017-03-09 13:16 [PATCH rfc 00/10] non selective polling block interface Sagi Grimberg
2017-03-09 13:16 ` [PATCH rfc 01/10] nvme-pci: Split __nvme_process_cq to poll and handle Sagi Grimberg
2017-03-09 13:16 ` [PATCH rfc 02/10] nvme-pci: Add budget to __nvme_process_cq Sagi Grimberg
@ 2017-03-09 13:16 ` Sagi Grimberg
[not found] ` <1489065402-14757-4-git-send-email-sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-03-09 13:16 ` [PATCH rfc 04/10] block: Add a non-selective polling interface Sagi Grimberg
` (6 subsequent siblings)
9 siblings, 1 reply; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-09 13:16 UTC (permalink / raw)
To: linux-block, linux-nvme, linux-rdma, target-devel
Given that the code is simple enough it seems better
then passing a tag by reference for each call site.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
drivers/nvme/host/pci.c | 34 ++++++++++++++++++++--------------
1 file changed, 20 insertions(+), 14 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4ed67f194cfd..a7ad514c2451 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -716,7 +716,7 @@ static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
return false;
}
-static int __nvme_process_cq(struct nvme_queue *nvmeq, int budget, int *tag)
+static int __nvme_process_cq(struct nvme_queue *nvmeq, int budget)
{
struct nvme_completion cqe;
int consumed = 0;
@@ -725,11 +725,6 @@ static int __nvme_process_cq(struct nvme_queue *nvmeq, int budget, int *tag)
nvme_handle_cqe(nvmeq, &cqe);
if (++consumed == budget)
break;
-
- if (tag && *tag == cqe.command_id) {
- *tag = -1;
- break;
- }
}
if (consumed) {
@@ -742,7 +737,7 @@ static int __nvme_process_cq(struct nvme_queue *nvmeq, int budget, int *tag)
static int nvme_process_cq(struct nvme_queue *nvmeq)
{
- return __nvme_process_cq(nvmeq, INT_MAX, NULL);
+ return __nvme_process_cq(nvmeq, INT_MAX);
}
static irqreturn_t nvme_irq(int irq, void *data)
@@ -770,17 +765,28 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
{
struct nvme_queue *nvmeq = hctx->driver_data;
+ struct nvme_completion cqe;
+ int found = 0, consumed = 0;
- if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
- spin_lock_irq(&nvmeq->q_lock);
- __nvme_process_cq(nvmeq, INT_MAX, &tag);
- spin_unlock_irq(&nvmeq->q_lock);
+ if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
+ return 0;
- if (tag == -1)
- return 1;
+ spin_lock_irq(&nvmeq->q_lock);
+ while (nvme_read_cqe(nvmeq, &cqe)) {
+ nvme_handle_cqe(nvmeq, &cqe);
+ consumed++;
+
+ if (tag == cqe.command_id) {
+ found = 1;
+ break;
+ }
}
- return 0;
+ if (consumed)
+ nvme_ring_cq_doorbell(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+
+ return found;
}
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
--
2.7.4
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH rfc 04/10] block: Add a non-selective polling interface
2017-03-09 13:16 [PATCH rfc 00/10] non selective polling block interface Sagi Grimberg
` (2 preceding siblings ...)
2017-03-09 13:16 ` [PATCH rfc 03/10] nvme-pci: open-code polling logic in nvme_poll Sagi Grimberg
@ 2017-03-09 13:16 ` Sagi Grimberg
2017-03-09 13:44 ` Johannes Thumshirn
[not found] ` <1489065402-14757-5-git-send-email-sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-03-09 13:16 ` [PATCH rfc 05/10] nvme-pci: Support blk_poll_batch Sagi Grimberg
` (5 subsequent siblings)
9 siblings, 2 replies; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-09 13:16 UTC (permalink / raw)
To: linux-block, linux-nvme, linux-rdma, target-devel
For a server/target appliance mode where we don't
necessarily care about specific IOs but rather want
to poll opportunisticly, it is useful to have a
non-selective polling interface.
Expose a blk_poll_batch for a batched blkdev polling
interface so our nvme target (and others) can use.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
block/blk-mq.c | 14 ++++++++++++++
include/linux/blk-mq.h | 2 ++
include/linux/blkdev.h | 1 +
3 files changed, 17 insertions(+)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b2fd175e84d7..1962785b571a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2911,6 +2911,20 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
}
EXPORT_SYMBOL_GPL(blk_mq_poll);
+int blk_mq_poll_batch(struct request_queue *q, unsigned int batch)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ if (!q->mq_ops || !q->mq_ops->poll_batch)
+ return 0;
+
+ hctx = blk_mq_map_queue(q, smp_processor_id());
+ return q->mq_ops->poll_batch(hctx, batch);
+}
+EXPORT_SYMBOL_GPL(blk_mq_poll_batch);
+
+
+
void blk_mq_disable_hotplug(void)
{
mutex_lock(&all_q_mutex);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b296a9006117..e1f33cad3067 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -100,6 +100,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
+typedef int (poll_batch_fn)(struct blk_mq_hw_ctx *, unsigned int);
struct blk_mq_ops {
@@ -117,6 +118,7 @@ struct blk_mq_ops {
* Called to poll for completion of a specific tag.
*/
poll_fn *poll;
+ poll_batch_fn *poll_batch;
softirq_done_fn *complete;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 796016e63c1d..a93507e61a57 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -971,6 +971,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
+int blk_mq_poll_batch(struct request_queue *q, unsigned int batch);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
--
2.7.4
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH rfc 04/10] block: Add a non-selective polling interface
2017-03-09 13:16 ` [PATCH rfc 04/10] block: Add a non-selective polling interface Sagi Grimberg
@ 2017-03-09 13:44 ` Johannes Thumshirn
2017-03-10 3:04 ` Damien Le Moal
[not found] ` <1489065402-14757-5-git-send-email-sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
1 sibling, 1 reply; 30+ messages in thread
From: Johannes Thumshirn @ 2017-03-09 13:44 UTC (permalink / raw)
To: Sagi Grimberg, linux-block, linux-nvme, linux-rdma, target-devel
On 03/09/2017 02:16 PM, Sagi Grimberg wrote:
> For a server/target appliance mode where we don't
> necessarily care about specific IOs but rather want
> to poll opportunisticly, it is useful to have a
> non-selective polling interface.
>
> Expose a blk_poll_batch for a batched blkdev polling
> interface so our nvme target (and others) can use.
>
> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
> ---
> block/blk-mq.c | 14 ++++++++++++++
> include/linux/blk-mq.h | 2 ++
> include/linux/blkdev.h | 1 +
> 3 files changed, 17 insertions(+)
>
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index b2fd175e84d7..1962785b571a 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -2911,6 +2911,20 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
> }
> EXPORT_SYMBOL_GPL(blk_mq_poll);
>
> +int blk_mq_poll_batch(struct request_queue *q, unsigned int batch)
> +{
> + struct blk_mq_hw_ctx *hctx;
> +
> + if (!q->mq_ops || !q->mq_ops->poll_batch)
> + return 0;
> +
> + hctx = blk_mq_map_queue(q, smp_processor_id());
> + return q->mq_ops->poll_batch(hctx, batch);
> +}
> +EXPORT_SYMBOL_GPL(blk_mq_poll_batch);
> +
> +
> +
Quite some additional newlines and I'm not really fond of the
->poll_batch() name. It's a bit confusing with ->poll() and we also have
irq_poll(). But the only thing that would come to my mind is
complete_batch() which "races" with ->complete().
Otherwise looks OK,
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
--
Johannes Thumshirn Storage
jthumshirn@suse.de +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH rfc 04/10] block: Add a non-selective polling interface
2017-03-09 13:44 ` Johannes Thumshirn
@ 2017-03-10 3:04 ` Damien Le Moal
2017-03-13 8:26 ` Sagi Grimberg
0 siblings, 1 reply; 30+ messages in thread
From: Damien Le Moal @ 2017-03-10 3:04 UTC (permalink / raw)
To: Johannes Thumshirn, Sagi Grimberg, linux-block, linux-nvme,
linux-rdma, target-devel
On 3/9/17 22:44, Johannes Thumshirn wrote:
> On 03/09/2017 02:16 PM, Sagi Grimberg wrote:
>> For a server/target appliance mode where we don't
>> necessarily care about specific IOs but rather want
>> to poll opportunisticly, it is useful to have a
>> non-selective polling interface.
>>
>> Expose a blk_poll_batch for a batched blkdev polling
>> interface so our nvme target (and others) can use.
>>
>> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
>> ---
>> block/blk-mq.c | 14 ++++++++++++++
>> include/linux/blk-mq.h | 2 ++
>> include/linux/blkdev.h | 1 +
>> 3 files changed, 17 insertions(+)
>>
>> diff --git a/block/blk-mq.c b/block/blk-mq.c
>> index b2fd175e84d7..1962785b571a 100644
>> --- a/block/blk-mq.c
>> +++ b/block/blk-mq.c
>> @@ -2911,6 +2911,20 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
>> }
>> EXPORT_SYMBOL_GPL(blk_mq_poll);
>>
>> +int blk_mq_poll_batch(struct request_queue *q, unsigned int batch)
>> +{
>> + struct blk_mq_hw_ctx *hctx;
>> +
>> + if (!q->mq_ops || !q->mq_ops->poll_batch)
>> + return 0;
>> +
>> + hctx = blk_mq_map_queue(q, smp_processor_id());
>> + return q->mq_ops->poll_batch(hctx, batch);
>> +}
>> +EXPORT_SYMBOL_GPL(blk_mq_poll_batch);
>> +
>> +
>> +
>
> Quite some additional newlines and I'm not really fond of the
> ->poll_batch() name. It's a bit confusing with ->poll() and we also have
> irq_poll(). But the only thing that would come to my mind is
> complete_batch() which "races" with ->complete().
What about ->check_completions()? After all, that is what both ->poll()
and ->poll_batch do but with a different stop condition, no ?
So it would also be easy to merge the two: both tag and batch are
unsigned int which could be called "cookie", and add a flag to tell how
to interpret it (as a tag or a batch limit).
e.g. something like:
+typedef int (check_completions_fn)(struct blk_mq_hw_ctx *,
enum blk_mq_check_flags, /* flag (TAG or BATCH) */
unsigned int); /* Target tag or batch limit */
Best regards.
--
Damien Le Moal, Ph.D.
Sr. Manager, System Software Research Group,
Western Digital Corporation
Damien.LeMoal@wdc.com
(+81) 0466-98-3593 (ext. 513593)
1 kirihara-cho, Fujisawa,
Kanagawa, 252-0888 Japan
www.wdc.com, www.hgst.com
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH rfc 04/10] block: Add a non-selective polling interface
2017-03-10 3:04 ` Damien Le Moal
@ 2017-03-13 8:26 ` Sagi Grimberg
0 siblings, 0 replies; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-13 8:26 UTC (permalink / raw)
To: Damien Le Moal, Johannes Thumshirn, linux-block, linux-nvme,
linux-rdma, target-devel
>> Quite some additional newlines and I'm not really fond of the
>> ->poll_batch() name. It's a bit confusing with ->poll() and we also have
>> irq_poll(). But the only thing that would come to my mind is
>> complete_batch() which "races" with ->complete().
>
> What about ->check_completions()? After all, that is what both ->poll()
> and ->poll_batch do but with a different stop condition, no ?
> So it would also be easy to merge the two: both tag and batch are
> unsigned int which could be called "cookie", and add a flag to tell how
> to interpret it (as a tag or a batch limit).
> e.g. something like:
>
> +typedef int (check_completions_fn)(struct blk_mq_hw_ctx *,
> enum blk_mq_check_flags, /* flag (TAG or BATCH) */
> unsigned int); /* Target tag or batch limit */
>
I'd rather not to unite poll/poll_batch, but if this is something
that people want I can definitely do it.
^ permalink raw reply [flat|nested] 30+ messages in thread
[parent not found: <1489065402-14757-5-git-send-email-sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>]
* Re: [PATCH rfc 04/10] block: Add a non-selective polling interface
[not found] ` <1489065402-14757-5-git-send-email-sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
@ 2017-03-09 16:25 ` Bart Van Assche
2017-03-13 8:15 ` Sagi Grimberg
0 siblings, 1 reply; 30+ messages in thread
From: Bart Van Assche @ 2017-03-09 16:25 UTC (permalink / raw)
To: linux-rdma-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
linux-block-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
linux-nvme-IAPFreCvJWM7uuMidbF8XUB+6BGkLq7r@public.gmane.org,
target-devel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org
On Thu, 2017-03-09 at 15:16 +0200, Sagi Grimberg wrote:
> +int blk_mq_poll_batch(struct request_queue *q, unsigned int batch)
> +{
> + struct blk_mq_hw_ctx *hctx;
> +
> + if (!q->mq_ops || !q->mq_ops->poll_batch)
> + return 0;
> +
> + hctx = blk_mq_map_queue(q, smp_processor_id());
> + return q->mq_ops->poll_batch(hctx, batch);
> +}
> +EXPORT_SYMBOL_GPL(blk_mq_poll_batch);
A new exported function without any documentation? Wow. Please add a header
above this function that documents at least which other completion processing
code can execute concurrently with this function and from which contexts the
other completion processing code can be called (e.g. blk_mq_poll() and
.complete()).
Why to return if (!q->mq_ops || !q->mq_ops->poll_batch)? Shouldn't that be a
WARN_ON_ONCE() instead? I think it is an error to calling blk_mq_poll_batch()
against a queue that does not define .poll_batch().
Additionally, I think making the hardware context an argument of this function
instead of using blk_mq_map_queue(q, smp_processor_id()) would make this
function much more versatile.
Bart.--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 30+ messages in thread
* Re: [PATCH rfc 04/10] block: Add a non-selective polling interface
2017-03-09 16:25 ` Bart Van Assche
@ 2017-03-13 8:15 ` Sagi Grimberg
[not found] ` <b8124df3-bd09-2eb2-9899-3c9195605510-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
0 siblings, 1 reply; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-13 8:15 UTC (permalink / raw)
To: Bart Van Assche, linux-rdma@vger.kernel.org,
linux-block@vger.kernel.org, linux-nvme@lists.infradead.org,
target-devel@vger.kernel.org
>> +int blk_mq_poll_batch(struct request_queue *q, unsigned int batch)
>> +{
>> + struct blk_mq_hw_ctx *hctx;
>> +
>> + if (!q->mq_ops || !q->mq_ops->poll_batch)
>> + return 0;
>> +
>> + hctx = blk_mq_map_queue(q, smp_processor_id());
>> + return q->mq_ops->poll_batch(hctx, batch);
>> +}
>> +EXPORT_SYMBOL_GPL(blk_mq_poll_batch);
>
> A new exported function without any documentation? Wow.
I just copied blk_mq_poll export...
> Please add a header
> above this function that documents at least which other completion processing
> code can execute concurrently with this function and from which contexts the
> other completion processing code can be called (e.g. blk_mq_poll() and
> .complete()).
I can do that, I'll document blk_mq_poll too..
> Why to return if (!q->mq_ops || !q->mq_ops->poll_batch)? Shouldn't that be a
> WARN_ON_ONCE() instead? I think it is an error to calling blk_mq_poll_batch()
> against a queue that does not define .poll_batch().
Not really, we don't know if the block driver actually supports
poll_batch (or poll for that matter). Instead of conditioning in the
call-site, we condition within the call.
Its not really a bug, its harmless.
> Additionally, I think making the hardware context an argument of this function
> instead of using blk_mq_map_queue(q, smp_processor_id()) would make this
> function much more versatile.
What do you mean? remember that the callers interface to the device is
a request queue, it doesn't even know if its a blk-mq device. Can you
explain in more details what you would like to see?
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH rfc 05/10] nvme-pci: Support blk_poll_batch
2017-03-09 13:16 [PATCH rfc 00/10] non selective polling block interface Sagi Grimberg
` (3 preceding siblings ...)
2017-03-09 13:16 ` [PATCH rfc 04/10] block: Add a non-selective polling interface Sagi Grimberg
@ 2017-03-09 13:16 ` Sagi Grimberg
2017-03-09 13:16 ` [PATCH rfc 06/10] IB/cq: Don't force IB_POLL_DIRECT poll context for ib_process_cq_direct Sagi Grimberg
` (4 subsequent siblings)
9 siblings, 0 replies; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-09 13:16 UTC (permalink / raw)
To: linux-block, linux-nvme, linux-rdma, target-devel
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
drivers/nvme/host/pci.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a7ad514c2451..f8dcd0bd19f8 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -789,6 +789,21 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
return found;
}
+static int nvme_poll_batch(struct blk_mq_hw_ctx *hctx, unsigned int batch)
+{
+ struct nvme_queue *nvmeq = hctx->driver_data;
+ int completed;
+
+ if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
+ return 0;
+
+ spin_lock_irq(&nvmeq->q_lock);
+ completed = __nvme_process_cq(nvmeq, batch);
+ spin_unlock_irq(&nvmeq->q_lock);
+
+ return completed;
+}
+
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
@@ -1174,6 +1189,7 @@ static struct blk_mq_ops nvme_mq_ops = {
.map_queues = nvme_pci_map_queues,
.timeout = nvme_timeout,
.poll = nvme_poll,
+ .poll_batch = nvme_poll_batch,
};
static void nvme_dev_remove_admin(struct nvme_dev *dev)
--
2.7.4
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH rfc 06/10] IB/cq: Don't force IB_POLL_DIRECT poll context for ib_process_cq_direct
2017-03-09 13:16 [PATCH rfc 00/10] non selective polling block interface Sagi Grimberg
` (4 preceding siblings ...)
2017-03-09 13:16 ` [PATCH rfc 05/10] nvme-pci: Support blk_poll_batch Sagi Grimberg
@ 2017-03-09 13:16 ` Sagi Grimberg
[not found] ` <1489065402-14757-7-git-send-email-sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
2017-03-09 13:16 ` [PATCH rfc 07/10] nvme-rdma: Don't rearm the CQ when polling directly Sagi Grimberg
` (3 subsequent siblings)
9 siblings, 1 reply; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-09 13:16 UTC (permalink / raw)
To: linux-block, linux-nvme, linux-rdma, target-devel
polling the completion queue directly does not interfere
with the existing polling logic, hence drop the requirement.
This can be used for polling mode ULPs.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
drivers/infiniband/core/cq.c | 2 --
1 file changed, 2 deletions(-)
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 21d1a38af489..7f6ae0ecb0c5 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -64,8 +64,6 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
*/
int ib_process_cq_direct(struct ib_cq *cq, int budget)
{
- WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT);
-
return __ib_process_cq(cq, budget);
}
EXPORT_SYMBOL(ib_process_cq_direct);
--
2.7.4
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH rfc 07/10] nvme-rdma: Don't rearm the CQ when polling directly
2017-03-09 13:16 [PATCH rfc 00/10] non selective polling block interface Sagi Grimberg
` (5 preceding siblings ...)
2017-03-09 13:16 ` [PATCH rfc 06/10] IB/cq: Don't force IB_POLL_DIRECT poll context for ib_process_cq_direct Sagi Grimberg
@ 2017-03-09 13:16 ` Sagi Grimberg
2017-03-09 13:52 ` Johannes Thumshirn
2017-03-09 13:16 ` [PATCH rfc 08/10] nvme-rdma: Support blk_poll_batch Sagi Grimberg
` (2 subsequent siblings)
9 siblings, 1 reply; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-09 13:16 UTC (permalink / raw)
To: linux-block, linux-nvme, linux-rdma, target-devel
We don't need it as the core polling context will take
are of rearming the completion queue.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
drivers/nvme/host/rdma.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 7bad791a7fe9..a17eef3ef6ff 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1474,7 +1474,6 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
struct ib_wc wc;
int found = 0;
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
while (ib_poll_cq(cq, 1, &wc) > 0) {
struct ib_cqe *cqe = wc.wr_cqe;
--
2.7.4
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH rfc 07/10] nvme-rdma: Don't rearm the CQ when polling directly
2017-03-09 13:16 ` [PATCH rfc 07/10] nvme-rdma: Don't rearm the CQ when polling directly Sagi Grimberg
@ 2017-03-09 13:52 ` Johannes Thumshirn
0 siblings, 0 replies; 30+ messages in thread
From: Johannes Thumshirn @ 2017-03-09 13:52 UTC (permalink / raw)
To: Sagi Grimberg, linux-block, linux-nvme, linux-rdma, target-devel
On 03/09/2017 02:16 PM, Sagi Grimberg wrote:
> We don't need it as the core polling context will take
> are of rearming the completion queue.
^ care?
--
Johannes Thumshirn Storage
jthumshirn@suse.de +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH rfc 08/10] nvme-rdma: Support blk_poll_batch
2017-03-09 13:16 [PATCH rfc 00/10] non selective polling block interface Sagi Grimberg
` (6 preceding siblings ...)
2017-03-09 13:16 ` [PATCH rfc 07/10] nvme-rdma: Don't rearm the CQ when polling directly Sagi Grimberg
@ 2017-03-09 13:16 ` Sagi Grimberg
2017-03-09 13:16 ` [PATCH rfc 09/10] nvmet: Use non-selective polling Sagi Grimberg
2017-03-09 13:16 ` [PATCH rfc 10/10] target: " Sagi Grimberg
9 siblings, 0 replies; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-09 13:16 UTC (permalink / raw)
To: linux-block, linux-nvme, linux-rdma, target-devel
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
drivers/nvme/host/rdma.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a17eef3ef6ff..29ac8fcb8d2c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1488,6 +1488,13 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
return found;
}
+static int nvme_rdma_poll_batch(struct blk_mq_hw_ctx *hctx, unsigned int batch)
+{
+ struct nvme_rdma_queue *queue = hctx->driver_data;
+
+ return ib_process_cq_direct(queue->ib_cq, batch);
+}
+
static void nvme_rdma_complete_rq(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
@@ -1519,6 +1526,7 @@ static struct blk_mq_ops nvme_rdma_mq_ops = {
.reinit_request = nvme_rdma_reinit_request,
.init_hctx = nvme_rdma_init_hctx,
.poll = nvme_rdma_poll,
+ .poll_batch = nvme_rdma_poll_batch,
.timeout = nvme_rdma_timeout,
};
--
2.7.4
^ permalink raw reply related [flat|nested] 30+ messages in thread
* [PATCH rfc 09/10] nvmet: Use non-selective polling
2017-03-09 13:16 [PATCH rfc 00/10] non selective polling block interface Sagi Grimberg
` (7 preceding siblings ...)
2017-03-09 13:16 ` [PATCH rfc 08/10] nvme-rdma: Support blk_poll_batch Sagi Grimberg
@ 2017-03-09 13:16 ` Sagi Grimberg
2017-03-09 13:54 ` Johannes Thumshirn
2017-03-09 13:16 ` [PATCH rfc 10/10] target: " Sagi Grimberg
9 siblings, 1 reply; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-09 13:16 UTC (permalink / raw)
To: linux-block, linux-nvme, linux-rdma, target-devel
It doesn't really make sense to do selective polling
because we never care about specific IOs. Non selective
polling can actually help by doing some useful work
while we're submitting a command.
We ask for a batch of (magic) 4 completions which looks
like a decent network<->backend proportion, if less are
available we'll see less.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
drivers/nvme/target/io-cmd.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 4195115c7e54..8e4fd7ca4a8a 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -46,7 +46,6 @@ static void nvmet_execute_rw(struct nvmet_req *req)
struct scatterlist *sg;
struct bio *bio;
sector_t sector;
- blk_qc_t cookie;
int op, op_flags = 0, i;
if (!req->sg_cnt) {
@@ -85,16 +84,17 @@ static void nvmet_execute_rw(struct nvmet_req *req)
bio_set_op_attrs(bio, op, op_flags);
bio_chain(bio, prev);
- cookie = submit_bio(prev);
+ submit_bio(prev);
}
sector += sg->length >> 9;
sg_cnt--;
}
- cookie = submit_bio(bio);
+ submit_bio(bio);
- blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
+ /* magic 4 is what we are willing to grab before we return */
+ blk_mq_poll_batch(bdev_get_queue(req->ns->bdev), 4);
}
static void nvmet_execute_flush(struct nvmet_req *req)
--
2.7.4
^ permalink raw reply related [flat|nested] 30+ messages in thread
* Re: [PATCH rfc 09/10] nvmet: Use non-selective polling
2017-03-09 13:16 ` [PATCH rfc 09/10] nvmet: Use non-selective polling Sagi Grimberg
@ 2017-03-09 13:54 ` Johannes Thumshirn
0 siblings, 0 replies; 30+ messages in thread
From: Johannes Thumshirn @ 2017-03-09 13:54 UTC (permalink / raw)
To: Sagi Grimberg, linux-block, linux-nvme, linux-rdma, target-devel
On 03/09/2017 02:16 PM, Sagi Grimberg wrote:
> It doesn't really make sense to do selective polling
> because we never care about specific IOs. Non selective
> polling can actually help by doing some useful work
> while we're submitting a command.
>
> We ask for a batch of (magic) 4 completions which looks
> like a decent network<->backend proportion, if less are
> available we'll see less.
>
> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
> ---
Just out of curiosity, how did you come up with the magic 4?
Thanks,
Johannes
--
Johannes Thumshirn Storage
jthumshirn@suse.de +49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH rfc 10/10] target: Use non-selective polling
2017-03-09 13:16 [PATCH rfc 00/10] non selective polling block interface Sagi Grimberg
` (8 preceding siblings ...)
2017-03-09 13:16 ` [PATCH rfc 09/10] nvmet: Use non-selective polling Sagi Grimberg
@ 2017-03-09 13:16 ` Sagi Grimberg
[not found] ` <1489065402-14757-11-git-send-email-sagi-NQWnxTmZq1alnMjI0IkVqw@public.gmane.org>
9 siblings, 1 reply; 30+ messages in thread
From: Sagi Grimberg @ 2017-03-09 13:16 UTC (permalink / raw)
To: linux-block, linux-nvme, linux-rdma, target-devel
It doesn't really make sense to do selective polling
because we never care about specific IOs. Non selective
polling can actually help by doing some useful work
while we're submitting a command.
We ask for a batch of (magic) 4 completions which looks
like a decent network<->backend proportion, if less are
available we'll see less.
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
drivers/target/target_core_iblock.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index d316ed537d59..00726b6e51c4 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -757,6 +757,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
iblock_submit_bios(&list);
+ blk_mq_poll_batch(bdev_get_queue(IBLOCK_DEV(dev)->ibd_bd), 4);
iblock_complete_cmd(cmd);
return 0;
--
2.7.4
^ permalink raw reply related [flat|nested] 30+ messages in thread