From: Jens Axboe <axboe@kernel.dk>
To: linux-block@vger.kernel.org, linux-scsi@vger.kernel.org,
linux-ide@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>, David Miller <davem@davemloft.net>
Subject: [PATCH 01/28] sunvdc: convert to blk-mq
Date: Thu, 25 Oct 2018 15:10:12 -0600 [thread overview]
Message-ID: <20181025211039.11559-2-axboe@kernel.dk> (raw)
In-Reply-To: <20181025211039.11559-1-axboe@kernel.dk>
Convert from the old request_fn style driver to blk-mq.
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
drivers/block/sunvdc.c | 149 +++++++++++++++++++++++++++--------------
1 file changed, 98 insertions(+), 51 deletions(-)
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index b54fa6726303..95cb4ea8e402 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/hdreg.h>
#include <linux/genhd.h>
#include <linux/cdrom.h>
@@ -66,9 +66,10 @@ struct vdc_port {
u64 max_xfer_size;
u32 vdisk_block_size;
+ u32 drain;
u64 ldc_timeout;
- struct timer_list ldc_reset_timer;
+ struct delayed_work ldc_reset_timer_work;
struct work_struct ldc_reset_work;
/* The server fills these in for us in the disk attribute
@@ -80,12 +81,14 @@ struct vdc_port {
u8 vdisk_mtype;
u32 vdisk_phys_blksz;
+ struct blk_mq_tag_set tag_set;
+
char disk_name[32];
};
static void vdc_ldc_reset(struct vdc_port *port);
static void vdc_ldc_reset_work(struct work_struct *work);
-static void vdc_ldc_reset_timer(struct timer_list *t);
+static void vdc_ldc_reset_timer_work(struct work_struct *work);
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{
@@ -175,11 +178,8 @@ static void vdc_blk_queue_start(struct vdc_port *port)
* handshake completes, so check for initial handshake before we've
* allocated a disk.
*/
- if (port->disk && blk_queue_stopped(port->disk->queue) &&
- vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) {
- blk_start_queue(port->disk->queue);
- }
-
+ if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
+ blk_mq_start_hw_queues(port->disk->queue);
}
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
@@ -197,7 +197,7 @@ static void vdc_handshake_complete(struct vio_driver_state *vio)
{
struct vdc_port *port = to_vdc_port(vio);
- del_timer(&port->ldc_reset_timer);
+ cancel_delayed_work(&port->ldc_reset_timer_work);
vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
vdc_blk_queue_start(port);
}
@@ -320,7 +320,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
rqe->req = NULL;
- __blk_end_request(req, (desc->status ? BLK_STS_IOERR : 0), desc->size);
+ blk_mq_end_request(req, desc->status ? BLK_STS_IOERR : 0);
vdc_blk_queue_start(port);
}
@@ -525,29 +525,40 @@ static int __send_request(struct request *req)
return err;
}
-static void do_vdc_request(struct request_queue *rq)
+static blk_status_t vdc_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request *req;
+ struct vdc_port *port = hctx->queue->queuedata;
+ struct vio_dring_state *dr;
+ unsigned long flags;
- while ((req = blk_peek_request(rq)) != NULL) {
- struct vdc_port *port;
- struct vio_dring_state *dr;
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- port = req->rq_disk->private_data;
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- if (unlikely(vdc_tx_dring_avail(dr) < 1))
- goto wait;
+ blk_mq_start_request(bd->rq);
- blk_start_request(req);
+ spin_lock_irqsave(&port->vio.lock, flags);
- if (__send_request(req) < 0) {
- blk_requeue_request(rq, req);
-wait:
- /* Avoid pointless unplugs. */
- blk_stop_queue(rq);
- break;
- }
+ /*
+ * Doing drain, just end the request in error
+ */
+ if (unlikely(port->drain)) {
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ return BLK_STS_IOERR;
}
+
+ if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ blk_mq_stop_hw_queue(hctx);
+ return BLK_STS_DEV_RESOURCE;
+ }
+
+ if (__send_request(bd->rq) < 0) {
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ return BLK_STS_IOERR;
+ }
+
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ return BLK_STS_OK;
}
static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
@@ -759,6 +770,32 @@ static void vdc_port_down(struct vdc_port *port)
vio_ldc_free(&port->vio);
}
+static const struct blk_mq_ops vdc_mq_ops = {
+ .queue_rq = vdc_queue_rq,
+};
+
+static void cleanup_queue(struct request_queue *q)
+{
+ struct vdc_port *port = q->queuedata;
+
+ blk_cleanup_queue(q);
+ blk_mq_free_tag_set(&port->tag_set);
+}
+
+static struct request_queue *init_queue(struct vdc_port *port)
+{
+ struct request_queue *q;
+ int ret;
+
+ q = blk_mq_init_sq_queue(&port->tag_set, &vdc_mq_ops, VDC_TX_RING_SIZE,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(q))
+ return q;
+
+ q->queuedata = port;
+ return q;
+}
+
static int probe_disk(struct vdc_port *port)
{
struct request_queue *q;
@@ -796,17 +833,17 @@ static int probe_disk(struct vdc_port *port)
(u64)geom.num_sec);
}
- q = blk_init_queue(do_vdc_request, &port->vio.lock);
- if (!q) {
+ q = init_queue(port);
+ if (IS_ERR(q)) {
printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
port->vio.name);
- return -ENOMEM;
+ return PTR_ERR(q);
}
g = alloc_disk(1 << PARTITION_SHIFT);
if (!g) {
printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
port->vio.name);
- blk_cleanup_queue(q);
+ cleanup_queue(q);
return -ENOMEM;
}
@@ -981,7 +1018,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
*/
ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
- timer_setup(&port->ldc_reset_timer, vdc_ldc_reset_timer, 0);
+ INIT_DELAYED_WORK(&port->ldc_reset_timer_work, vdc_ldc_reset_timer_work);
INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
@@ -1034,18 +1071,14 @@ static int vdc_port_remove(struct vio_dev *vdev)
struct vdc_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
- unsigned long flags;
-
- spin_lock_irqsave(&port->vio.lock, flags);
- blk_stop_queue(port->disk->queue);
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ blk_mq_stop_hw_queues(port->disk->queue);
flush_work(&port->ldc_reset_work);
- del_timer_sync(&port->ldc_reset_timer);
+ cancel_delayed_work_sync(&port->ldc_reset_timer_work);
del_timer_sync(&port->vio.timer);
del_gendisk(port->disk);
- blk_cleanup_queue(port->disk->queue);
+ cleanup_queue(port->disk->queue);
put_disk(port->disk);
port->disk = NULL;
@@ -1080,32 +1113,46 @@ static void vdc_requeue_inflight(struct vdc_port *port)
}
rqe->req = NULL;
- blk_requeue_request(port->disk->queue, req);
+ blk_mq_requeue_request(req, false);
}
}
static void vdc_queue_drain(struct vdc_port *port)
{
- struct request *req;
+ struct request_queue *q = port->disk->queue;
- while ((req = blk_fetch_request(port->disk->queue)) != NULL)
- __blk_end_request_all(req, BLK_STS_IOERR);
+ /*
+ * Mark the queue as draining, then freeze/quiesce to ensure
+ * that all existing requests are seen in ->queue_rq() and killed
+ */
+ port->drain = 1;
+ spin_unlock_irq(&port->vio.lock);
+
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+
+ spin_lock_irq(&port->vio.lock);
+ port->drain = 0;
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
}
-static void vdc_ldc_reset_timer(struct timer_list *t)
+static void vdc_ldc_reset_timer_work(struct work_struct *work)
{
- struct vdc_port *port = from_timer(port, t, ldc_reset_timer);
- struct vio_driver_state *vio = &port->vio;
- unsigned long flags;
+ struct vdc_port *port;
+ struct vio_driver_state *vio;
- spin_lock_irqsave(&vio->lock, flags);
+ port = container_of(work, struct vdc_port, ldc_reset_timer_work.work);
+ vio = &port->vio;
+
+ spin_lock_irq(&vio->lock);
if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
port->disk_name, port->ldc_timeout);
vdc_queue_drain(port);
vdc_blk_queue_start(port);
}
- spin_unlock_irqrestore(&vio->lock, flags);
+ spin_unlock_irq(&vio->lock);
}
static void vdc_ldc_reset_work(struct work_struct *work)
@@ -1129,7 +1176,7 @@ static void vdc_ldc_reset(struct vdc_port *port)
assert_spin_locked(&port->vio.lock);
pr_warn(PFX "%s ldc link reset\n", port->disk_name);
- blk_stop_queue(port->disk->queue);
+ blk_mq_stop_hw_queues(port->disk->queue);
vdc_requeue_inflight(port);
vdc_port_down(port);
@@ -1146,7 +1193,7 @@ static void vdc_ldc_reset(struct vdc_port *port)
}
if (port->ldc_timeout)
- mod_timer(&port->ldc_reset_timer,
+ mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
round_jiffies(jiffies + HZ * port->ldc_timeout));
mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
return;
--
2.17.1
next prev parent reply other threads:[~2018-10-25 21:10 UTC|newest]
Thread overview: 90+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-25 21:10 [PATCHSET 0/28] blk-mq driver conversions and legacy path removal Jens Axboe
2018-10-25 21:10 ` Jens Axboe [this message]
2018-10-27 10:42 ` [PATCH 01/28] sunvdc: convert to blk-mq Hannes Reinecke
2018-10-25 21:10 ` [PATCH 02/28] ms_block: " Jens Axboe
2018-10-27 10:43 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 03/28] mspro_block: " Jens Axboe
2018-10-27 10:44 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 04/28] ide: " Jens Axboe
2018-10-27 10:51 ` Hannes Reinecke
2018-10-27 16:51 ` Jens Axboe
2018-10-25 21:10 ` [PATCH 05/28] IB/srp: remove old request_fn_active check Jens Axboe
2018-10-25 21:23 ` Bart Van Assche
2018-10-25 21:24 ` Jens Axboe
2018-10-26 7:08 ` Hannes Reinecke
2018-10-26 14:32 ` Jens Axboe
2018-10-26 15:03 ` Bart Van Assche
2018-10-25 21:10 ` [PATCH 06/28] blk-mq: remove the request_list usage Jens Axboe
2018-10-27 10:52 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 07/28] blk-mq: remove legacy check in queue blk_freeze_queue() Jens Axboe
2018-10-27 10:52 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 08/28] scsi: kill off the legacy IO path Jens Axboe
2018-10-25 21:36 ` Bart Van Assche
2018-10-25 22:18 ` Jens Axboe
2018-10-25 22:44 ` Madhani, Himanshu
2018-10-25 23:00 ` Jens Axboe
2018-10-25 23:06 ` Madhani, Himanshu
2018-10-29 6:48 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 09/28] dm: remove " Jens Axboe
2018-10-29 6:53 ` Hannes Reinecke
2018-10-29 14:17 ` Jens Axboe
2018-10-25 21:10 ` [PATCH 10/28] dasd: remove dead code Jens Axboe
2018-10-29 6:54 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 11/28] bsg: pass in desired timeout handler Jens Axboe
2018-10-28 15:53 ` Christoph Hellwig
2018-10-28 23:05 ` Jens Axboe
2018-10-29 6:55 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 12/28] bsg: provide bsg_remove_queue() helper Jens Axboe
2018-10-28 15:53 ` Christoph Hellwig
2018-10-29 6:55 ` Hannes Reinecke
2018-10-29 10:16 ` Johannes Thumshirn
2018-10-29 14:15 ` Jens Axboe
2018-10-25 21:10 ` [PATCH 13/28] bsg: convert to use blk-mq Jens Axboe
2018-10-28 16:07 ` Christoph Hellwig
2018-10-28 23:25 ` Jens Axboe
2018-10-29 6:57 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 14/28] block: remove blk_complete_request() Jens Axboe
2018-10-29 6:59 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 15/28] blk-wbt: kill check for legacy queue type Jens Axboe
2018-10-29 6:59 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 16/28] blk-cgroup: remove legacy queue bypassing Jens Axboe
2018-10-29 7:00 ` Hannes Reinecke
2018-10-29 11:00 ` Johannes Thumshirn
2018-10-29 14:23 ` Jens Axboe
2018-10-29 14:25 ` Johannes Thumshirn
2018-10-29 14:59 ` Jens Axboe
2018-10-25 21:10 ` [PATCH 17/28] block: remove legacy rq tagging Jens Axboe
2018-10-29 7:01 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 18/28] block: remove non mq parts from the flush code Jens Axboe
2018-10-29 7:02 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 19/28] block: remove legacy IO schedulers Jens Axboe
2018-10-25 21:10 ` [PATCH 20/28] block: remove dead elevator code Jens Axboe
2018-10-25 21:10 ` [PATCH 21/28] block: remove __blk_put_request() Jens Axboe
2018-10-29 7:03 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 22/28] block: kill legacy parts of timeout handling Jens Axboe
2018-10-29 7:04 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 23/28] block: kill lld busy Jens Axboe
2018-10-25 21:42 ` Bart Van Assche
2018-10-25 22:18 ` Jens Axboe
2018-10-29 7:10 ` Hannes Reinecke
2018-10-29 14:25 ` Jens Axboe
2018-10-29 15:51 ` Mike Snitzer
2018-10-29 16:26 ` Jens Axboe
2018-10-25 21:10 ` [PATCH 24/28] block: remove request_list code Jens Axboe
2018-10-29 7:10 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 25/28] block: kill request slab cache Jens Axboe
2018-10-29 7:11 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 26/28] block: remove req_no_special_merge() from merging code Jens Axboe
2018-10-29 7:12 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 27/28] blk-merge: kill dead queue lock held check Jens Axboe
2018-10-29 7:12 ` Hannes Reinecke
2018-10-25 21:10 ` [PATCH 28/28] block: get rid of blk_queued_rq() Jens Axboe
2018-10-29 7:12 ` Hannes Reinecke
2018-10-25 23:09 ` [PATCHSET 0/28] blk-mq driver conversions and legacy path removal Bart Van Assche
2018-10-25 23:11 ` Jens Axboe
2018-10-29 12:00 ` Ming Lei
2018-10-29 14:50 ` Jens Axboe
2018-10-29 15:04 ` Jens Axboe
2018-10-30 9:41 ` Ming Lei
2018-10-30 14:13 ` Jens Axboe
2018-10-29 15:05 ` Ming Lei
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181025211039.11559-2-axboe@kernel.dk \
--to=axboe@kernel.dk \
--cc=davem@davemloft.net \
--cc=linux-block@vger.kernel.org \
--cc=linux-ide@vger.kernel.org \
--cc=linux-scsi@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).