* [PATCH] Cancel outstanding IOs on queue deletion
@ 2012-08-02 17:55 Matthew Wilcox
2012-08-02 18:51 ` Busch, Keith
0 siblings, 1 reply; 4+ messages in thread
From: Matthew Wilcox @ 2012-08-02 17:55 UTC (permalink / raw)
If the device is hot-unplugged while there are active commands, we should
time out the I/Os so that upper layers don't just see the I/Os disappear.
Most of the bulk of this patch is just moving nvme_timeout_ios so that it
doesn't need to be declared before its first use.
Signed-off-by: Matthew Wilcox <matthew.r.wilcox at intel.com>
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 3278fbd..570080e 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -868,11 +869,33 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
return nvme_submit_admin_cmd(dev, &c, result);
}
+static void nvme_timeout_ios(struct nvme_queue *nvmeq)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ unsigned long now = jiffies;
+ int cmdid;
+
+ for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+ void *ctx;
+ nvme_completion_fn fn;
+ static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
+
+ if (!time_after(now, info[cmdid].timeout))
+ continue;
+ dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
+ ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+}
+
static void nvme_free_queue(struct nvme_dev *dev, int qid)
{
struct nvme_queue *nvmeq = dev->queues[qid];
int vector = dev->entry[nvmeq->cq_vector].vector;
+ nvme_timeout_ios(nvmeq);
+
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
@@ -1226,26 +1249,6 @@ static const struct block_device_operations nvme_fops = {
.compat_ioctl = nvme_ioctl,
};
-static void nvme_timeout_ios(struct nvme_queue *nvmeq)
-{
- int depth = nvmeq->q_depth - 1;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- unsigned long now = jiffies;
- int cmdid;
-
- for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
- void *ctx;
- nvme_completion_fn fn;
- static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
-
- if (!time_after(now, info[cmdid].timeout))
- continue;
- dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
- ctx = cancel_cmdid(nvmeq, cmdid, &fn);
- fn(nvmeq->dev, ctx, &cqe);
- }
-}
-
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
{
while (bio_list_peek(&nvmeq->sq_cong)) {
@@ -1539,8 +1542,6 @@ static int nvme_dev_remove(struct nvme_dev *dev)
list_del(&dev->node);
spin_unlock(&dev_list_lock);
- /* TODO: wait all I/O finished or cancel them */
-
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
list_del(&ns->list);
del_gendisk(ns->disk);
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH] Cancel outstanding IOs on queue deletion
2012-08-02 17:55 [PATCH] Cancel outstanding IOs on queue deletion Matthew Wilcox
@ 2012-08-02 18:51 ` Busch, Keith
2012-08-02 19:20 ` Matthew Wilcox
0 siblings, 1 reply; 4+ messages in thread
From: Busch, Keith @ 2012-08-02 18:51 UTC (permalink / raw)
> -----Original Message-----
> From: linux-nvme-bounces at lists.infradead.org [mailto:linux-nvme-
> bounces at lists.infradead.org] On Behalf Of Matthew Wilcox
> Sent: Thursday, August 02, 2012 11:56 AM
> To: linux-nvme at lists.infradead.org
> Subject: [PATCH] Cancel outstanding IOs on queue deletion
> static void nvme_free_queue(struct nvme_dev *dev, int qid) {
> struct nvme_queue *nvmeq = dev->queues[qid];
> int vector = dev->entry[nvmeq->cq_vector].vector;
>
> + nvme_timeout_ios(nvmeq);
> +
> irq_set_affinity_hint(vector, NULL);
> free_irq(vector, nvmeq);
>
nvme_timeout_ios cancels an io only if it has timed out, but I think you want to unconditionally cancel them out when freeing the queue. Also, should you hold the q_lock when calling this function?
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH] Cancel outstanding IOs on queue deletion
2012-08-02 18:51 ` Busch, Keith
@ 2012-08-02 19:20 ` Matthew Wilcox
2012-08-03 19:16 ` Matthew Wilcox
0 siblings, 1 reply; 4+ messages in thread
From: Matthew Wilcox @ 2012-08-02 19:20 UTC (permalink / raw)
On Thu, Aug 02, 2012@06:51:35PM +0000, Busch, Keith wrote:
> nvme_timeout_ios cancels an io only if it has timed out, but I think
> you want to unconditionally cancel them out when freeing the queue. Also,
> should you hold the q_lock when calling this function?
Good points. Here's v2. I don't particularly like the 'immediately'
parameter; better naming would be appreciated. I don't think there's
really a good alternative to adding a parameter though; this function
is almost exactly what we want except for the timeout check.
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 3278fbd..ed3c171 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -868,11 +869,37 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
return nvme_submit_admin_cmd(dev, &c, result);
}
+static void nvme_timeout_ios(struct nvme_queue *nvmeq, bool immediately)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ unsigned long now = jiffies;
+ int cmdid;
+
+ for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+ void *ctx;
+ nvme_completion_fn fn;
+ static struct nvme_completion cqe = {
+ .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
+ };
+
+ if (!immediately && !time_after(now, info[cmdid].timeout))
+ continue;
+ dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
+ ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+}
+
static void nvme_free_queue(struct nvme_dev *dev, int qid)
{
struct nvme_queue *nvmeq = dev->queues[qid];
int vector = dev->entry[nvmeq->cq_vector].vector;
+ spin_lock_irq(&nvmeq->q_lock);
+ nvme_timeout_ios(nvmeq, true);
+ spin_unlock_irq(&nvmeq->q_lock);
+
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
@@ -1226,26 +1253,6 @@ static const struct block_device_operations nvme_fops = {
.compat_ioctl = nvme_ioctl,
};
-static void nvme_timeout_ios(struct nvme_queue *nvmeq)
-{
- int depth = nvmeq->q_depth - 1;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- unsigned long now = jiffies;
- int cmdid;
-
- for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
- void *ctx;
- nvme_completion_fn fn;
- static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
-
- if (!time_after(now, info[cmdid].timeout))
- continue;
- dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
- ctx = cancel_cmdid(nvmeq, cmdid, &fn);
- fn(nvmeq->dev, ctx, &cqe);
- }
-}
-
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
{
while (bio_list_peek(&nvmeq->sq_cong)) {
@@ -1277,7 +1284,7 @@ static int nvme_kthread(void *data)
spin_lock_irq(&nvmeq->q_lock);
if (nvme_process_cq(nvmeq))
printk("process_cq did something\n");
- nvme_timeout_ios(nvmeq);
+ nvme_timeout_ios(nvmeq, false);
nvme_resubmit_bios(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
}
@@ -1539,8 +1546,6 @@ static int nvme_dev_remove(struct nvme_dev *dev)
list_del(&dev->node);
spin_unlock(&dev_list_lock);
- /* TODO: wait all I/O finished or cancel them */
-
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
list_del(&ns->list);
del_gendisk(ns->disk);
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH] Cancel outstanding IOs on queue deletion
2012-08-02 19:20 ` Matthew Wilcox
@ 2012-08-03 19:16 ` Matthew Wilcox
0 siblings, 0 replies; 4+ messages in thread
From: Matthew Wilcox @ 2012-08-03 19:16 UTC (permalink / raw)
On Thu, Aug 02, 2012@03:20:14PM -0400, Matthew Wilcox wrote:
> On Thu, Aug 02, 2012@06:51:35PM +0000, Busch, Keith wrote:
> > nvme_timeout_ios cancels an io only if it has timed out, but I think
> > you want to unconditionally cancel them out when freeing the queue. Also,
> > should you hold the q_lock when calling this function?
>
> Good points. Here's v2. I don't particularly like the 'immediately'
> parameter; better naming would be appreciated. I don't think there's
> really a good alternative to adding a parameter though; this function
> is almost exactly what we want except for the timeout check.
Here's v3. I realised that this isn't really 'timeout I/O' any more;
it's 'cancel I/O' with an optional filter on the timeout. And that
realisation tells me how to name the function and its parameters.
It also leads me to change the warning message in the function.
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index 2140370..f9ad514 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -868,6 +868,33 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
return nvme_submit_admin_cmd(dev, &c, result);
}
+/**
+ * nvme_cancel_ios - Cancel outstanding I/Os
+ * @queue: The queue to cancel I/Os on
+ * @timeout: True to only cancel I/Os which have timed out
+ */
+static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ unsigned long now = jiffies;
+ int cmdid;
+
+ for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+ void *ctx;
+ nvme_completion_fn fn;
+ static struct nvme_completion cqe = {
+ .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
+ };
+
+ if (timeout && !time_after(now, info[cmdid].timeout))
+ continue;
+ dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+ ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+}
+
static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
@@ -882,6 +909,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
struct nvme_queue *nvmeq = dev->queues[qid];
int vector = dev->entry[nvmeq->cq_vector].vector;
+ spin_lock_irq(&nvmeq->q_lock);
+ nvme_cancel_ios(nvmeq, false);
+ spin_unlock_irq(&nvmeq->q_lock);
+
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
@@ -1236,26 +1267,6 @@ static const struct block_device_operations nvme_fops = {
.compat_ioctl = nvme_ioctl,
};
-static void nvme_timeout_ios(struct nvme_queue *nvmeq)
-{
- int depth = nvmeq->q_depth - 1;
- struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
- unsigned long now = jiffies;
- int cmdid;
-
- for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
- void *ctx;
- nvme_completion_fn fn;
- static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
-
- if (!time_after(now, info[cmdid].timeout))
- continue;
- dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
- ctx = cancel_cmdid(nvmeq, cmdid, &fn);
- fn(nvmeq->dev, ctx, &cqe);
- }
-}
-
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
{
while (bio_list_peek(&nvmeq->sq_cong)) {
@@ -1287,7 +1298,7 @@ static int nvme_kthread(void *data)
spin_lock_irq(&nvmeq->q_lock);
if (nvme_process_cq(nvmeq))
printk("process_cq did something\n");
- nvme_timeout_ios(nvmeq);
+ nvme_cancel_ios(nvmeq, true);
nvme_resubmit_bios(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
}
@@ -1549,8 +1560,6 @@ static int nvme_dev_remove(struct nvme_dev *dev)
list_del(&dev->node);
spin_unlock(&dev_list_lock);
- /* TODO: wait all I/O finished or cancel them */
-
list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
list_del(&ns->list);
del_gendisk(ns->disk);
^ permalink raw reply related [flat|nested] 4+ messages in thread
end of thread, other threads:[~2012-08-03 19:16 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-08-02 17:55 [PATCH] Cancel outstanding IOs on queue deletion Matthew Wilcox
2012-08-02 18:51 ` Busch, Keith
2012-08-02 19:20 ` Matthew Wilcox
2012-08-03 19:16 ` Matthew Wilcox
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).