virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
From: Chaitanya Kulkarni <kch@nvidia.com>
To: <virtualization@lists.linux.dev>, <linux-block@vger.kernel.org>,
	<linux-kernel@vger.kernel.org>
Cc: <mst@redhat.com>, <hch@lst.de>, <jasowang@redhat.com>,
	<pbonzini@redhat.com>, <stefanha@redhat.com>,
	<xuanzhuo@linux.alibaba.com>, <axboe@kernel.dk>,
	Chaitanya Kulkarni <kch@nvidia.com>
Subject: [RFC PATCH 1/1] virtio-blk: process block layer timedout request
Date: Wed, 29 Nov 2023 23:01:33 -0800	[thread overview]
Message-ID: <20231130070133.8059-2-kch@nvidia.com> (raw)
In-Reply-To: <20231130070133.8059-1-kch@nvidia.com>

Improve block layer request handling by implementing a timeout handler.
Current implementation assums that request will never timeout and will
be completed by underlaying transport. However, this assumption can
cause issues under heavy load especially when dealing with different
subsystems and real hardware.

To solve this, add a block layer request timeout handler that will
complete timed-out requests in the same context if the virtio device
has a VIRTIO_CONFIG_S_DRIVER_OK status. If the device has any other
status, we'll stop the block layer request queue and proceed with the
teardown sequence, allowing applications waiting for I/O to exit
gracefully with appropriate error.

Also, add two new module parameters that allows user to specify the
I/O timeout for the tagset when allocating the disk and a teardown limit
for the timed out requeets before we initiate device teardown from the
timeout handler. These changes will improve the stability and
reliability of our system under request timeout scenario.

Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
---
 drivers/block/virtio_blk.c      | 122 ++++++++++++++++++++++++++++++++
 include/uapi/linux/virtio_blk.h |   1 +
 2 files changed, 123 insertions(+)

diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4689ac2e0c0e..da26c2bf933b 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -16,6 +16,7 @@
 #include <linux/blk-mq-virtio.h>
 #include <linux/numa.h>
 #include <linux/vmalloc.h>
+#include <linux/xarray.h>
 #include <uapi/linux/virtio_ring.h>
 
 #define PART_BITS 4
@@ -31,6 +32,15 @@
 #define VIRTIO_BLK_INLINE_SG_CNT	2
 #endif
 
+static unsigned int io_timeout = 20;
+module_param(io_timeout, uint, 0644);
+MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O requests. Default:20");
+
+static unsigned int timeout_teardown_limit = 2;
+module_param(timeout_teardown_limit, uint, 0644);
+MODULE_PARM_DESC(timeout_teardown_limit,
+		"request timeout teardown limit for stable dev. Default:2");
+
 static unsigned int num_request_queues;
 module_param(num_request_queues, uint, 0644);
 MODULE_PARM_DESC(num_request_queues,
@@ -84,6 +94,20 @@ struct virtio_blk {
 
 	/* For zoned device */
 	unsigned int zone_sectors;
+
+	/*
+	 * Block layer Request timeout teardown limit when device is in the
+	 * stable state, i.e. it has VIRTIO_CONFIG_S_DRIVER_OK value for its
+	 * config status. Once this limit is reached issue
+	 * virtblk_teardown_work to teardown the device in the block lyaer
+	 * request timeout callback.
+	 */
+	atomic_t rq_timeout_count;
+	/* avoid tear down race between remove and teardown work */
+	struct mutex teardown_mutex;
+	/* tear down work to be scheduled from block layer request handler */
+	struct work_struct teardown_work;
+
 };
 
 struct virtblk_req {
@@ -117,6 +141,8 @@ static inline blk_status_t virtblk_result(u8 status)
 	case VIRTIO_BLK_S_OK:
 		return BLK_STS_OK;
 	case VIRTIO_BLK_S_UNSUPP:
+	case VIRTIO_BLK_S_TIMEOUT:
+		return BLK_STS_TIMEOUT;
 		return BLK_STS_NOTSUPP;
 	case VIRTIO_BLK_S_ZONE_OPEN_RESOURCE:
 		return BLK_STS_ZONE_OPEN_RESOURCE;
@@ -926,6 +952,7 @@ static void virtblk_free_disk(struct gendisk *disk)
 	struct virtio_blk *vblk = disk->private_data;
 
 	ida_free(&vd_index_ida, vblk->index);
+	mutex_destroy(&vblk->teardown_mutex);
 	mutex_destroy(&vblk->vdev_mutex);
 	kfree(vblk);
 }
@@ -1287,6 +1314,86 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
 	return found;
 }
 
+static bool virtblk_cancel_request(struct request *rq, void *data)
+{
+	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
+
+	vbr->in_hdr.status = VIRTIO_BLK_S_TIMEOUT;
+	if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq))
+		blk_mq_complete_request(rq);
+
+	return true;
+}
+
+static void virtblk_teardown_work(struct work_struct *w)
+{
+	struct virtio_blk *vblk =
+		container_of(w, struct virtio_blk, teardown_work);
+	struct request_queue *q = vblk->disk->queue;
+	struct virtio_device *vdev = vblk->vdev;
+	struct blk_mq_hw_ctx *hctx;
+	unsigned long idx;
+
+	mutex_lock(&vblk->teardown_mutex);
+	if (!vblk->vdev)
+		goto unlock;
+
+	blk_mq_quiesce_queue(q);
+
+	/* Process any outstanding request from device. */
+	xa_for_each(&q->hctx_table, idx, hctx)
+		virtblk_poll(hctx, NULL);
+
+	blk_sync_queue(q);
+	blk_mq_tagset_busy_iter(&vblk->tag_set, virtblk_cancel_request, vblk);
+	blk_mq_tagset_wait_completed_request(&vblk->tag_set);
+
+	/*
+	 * Unblock any pending dispatch I/Os before we destroy device. From
+	 * del_gendisk() -> __blk_mark_disk_dead(disk) will set GD_DEAD flag,
+	 * that will make sure any new I/O from bio_queue_enter() to fail.
+	 */
+	blk_mq_unquiesce_queue(q);
+	del_gendisk(vblk->disk);
+	blk_mq_free_tag_set(&vblk->tag_set);
+
+	mutex_lock(&vblk->vdev_mutex);
+	flush_work(&vblk->config_work);
+
+	virtio_reset_device(vdev);
+
+	vblk->vdev = NULL;
+
+	vdev->config->del_vqs(vdev);
+	kfree(vblk->vqs);
+
+	mutex_unlock(&vblk->vdev_mutex);
+
+	put_disk(vblk->disk);
+
+unlock:
+	mutex_unlock(&vblk->teardown_mutex);
+}
+
+static enum blk_eh_timer_return virtblk_timeout(struct request *req)
+{
+	struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
+	struct virtio_device *vdev = vblk->vdev;
+	bool ok = vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK;
+
+	if ((atomic_dec_return(&vblk->rq_timeout_count) != 0) && ok) {
+		virtblk_cancel_request(req, NULL);
+		return BLK_EH_DONE;
+	}
+
+	dev_err(&vdev->dev, "%s:%s initiating teardown\n", __func__,
+		vblk->disk->disk_name);
+
+	queue_work(virtblk_wq, &vblk->teardown_work);
+
+	return BLK_EH_RESET_TIMER;
+}
+
 static const struct blk_mq_ops virtio_mq_ops = {
 	.queue_rq	= virtio_queue_rq,
 	.queue_rqs	= virtio_queue_rqs,
@@ -1294,6 +1401,7 @@ static const struct blk_mq_ops virtio_mq_ops = {
 	.complete	= virtblk_request_done,
 	.map_queues	= virtblk_map_queues,
 	.poll		= virtblk_poll,
+	.timeout	= virtblk_timeout,
 };
 
 static unsigned int virtblk_queue_depth;
@@ -1365,6 +1473,7 @@ static int virtblk_probe(struct virtio_device *vdev)
 	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
 	vblk->tag_set.ops = &virtio_mq_ops;
 	vblk->tag_set.queue_depth = queue_depth;
+	vblk->tag_set.timeout = io_timeout * HZ;
 	vblk->tag_set.numa_node = NUMA_NO_NODE;
 	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 	vblk->tag_set.cmd_size =
@@ -1387,6 +1496,10 @@ static int virtblk_probe(struct virtio_device *vdev)
 	}
 	q = vblk->disk->queue;
 
+	mutex_init(&vblk->teardown_mutex);
+	INIT_WORK(&vblk->teardown_work, virtblk_teardown_work);
+	atomic_set(&vblk->rq_timeout_count, timeout_teardown_limit);
+
 	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
 
 	vblk->disk->major = major;
@@ -1598,6 +1711,12 @@ static void virtblk_remove(struct virtio_device *vdev)
 {
 	struct virtio_blk *vblk = vdev->priv;
 
+	mutex_lock(&vblk->teardown_mutex);
+
+	/* we did the cleanup in the timeout handler */
+	if (!vblk->vdev)
+		goto unlock;
+
 	/* Make sure no work handler is accessing the device. */
 	flush_work(&vblk->config_work);
 
@@ -1618,6 +1737,9 @@ static void virtblk_remove(struct virtio_device *vdev)
 	mutex_unlock(&vblk->vdev_mutex);
 
 	put_disk(vblk->disk);
+
+unlock:
+	mutex_unlock(&vblk->teardown_mutex);
 }
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h
index 3744e4da1b2a..ed864195ab26 100644
--- a/include/uapi/linux/virtio_blk.h
+++ b/include/uapi/linux/virtio_blk.h
@@ -317,6 +317,7 @@ struct virtio_scsi_inhdr {
 #define VIRTIO_BLK_S_OK		0
 #define VIRTIO_BLK_S_IOERR	1
 #define VIRTIO_BLK_S_UNSUPP	2
+#define VIRTIO_BLK_S_TIMEOUT	3
 
 /* Error codes that are specific to zoned block devices */
 #define VIRTIO_BLK_S_ZONE_INVALID_CMD     3
-- 
2.40.0


  reply	other threads:[~2023-11-30  7:02 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-30  7:01 [RFC PATCH 0/1] virtio-blk: handle block layer timedout request Chaitanya Kulkarni
2023-11-30  7:01 ` Chaitanya Kulkarni [this message]
2023-12-01  1:25   ` [RFC PATCH 1/1] virtio-blk: process " Stefan Hajnoczi
2024-01-09  3:33     ` Chaitanya Kulkarni
2024-01-22 17:47       ` Stefan Hajnoczi
2024-01-31  7:00         ` Chaitanya Kulkarni

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231130070133.8059-2-kch@nvidia.com \
    --to=kch@nvidia.com \
    --cc=axboe@kernel.dk \
    --cc=hch@lst.de \
    --cc=jasowang@redhat.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=stefanha@redhat.com \
    --cc=virtualization@lists.linux.dev \
    --cc=xuanzhuo@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).