* [PATCH 0/2] vhost-scsi: Support worker ioctls
@ 2023-11-14 0:36 Mike Christie
2023-11-14 0:36 ` [PATCH 1/2] vhost: Add worker backend callouts Mike Christie
2023-11-14 0:36 ` [PATCH 2/2] vhost-scsi: Add support for a worker thread per virtqueue Mike Christie
0 siblings, 2 replies; 8+ messages in thread
From: Mike Christie @ 2023-11-14 0:36 UTC (permalink / raw)
To: fam, stefanha, jasowang, mst, sgarzare, pbonzini, qemu-devel
The following patches allow users to configure the vhost worker threads
for vhost-scsi. With vhost-net we get a worker thread per rx/tx virtqueue
pair, but for vhost-scsi we get one worker for all workqueues. This
becomes a bottlneck after 2 queues are used.
In the upstream linux kernel commit:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/drivers/vhost/vhost.c?id=c1ecd8e9500797748ae4f79657971955d452d69d
we enabled the vhost layer to be able to create a worker thread and
attach it to a virtqueue.
This patchset adds support to vhost-scsi to use these ioctls so we are
no longer limited to the single worker.
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 1/2] vhost: Add worker backend callouts
2023-11-14 0:36 [PATCH 0/2] vhost-scsi: Support worker ioctls Mike Christie
@ 2023-11-14 0:36 ` Mike Christie
2023-11-14 0:36 ` [PATCH 2/2] vhost-scsi: Add support for a worker thread per virtqueue Mike Christie
1 sibling, 0 replies; 8+ messages in thread
From: Mike Christie @ 2023-11-14 0:36 UTC (permalink / raw)
To: fam, stefanha, jasowang, mst, sgarzare, pbonzini, qemu-devel
Cc: Mike Christie
This adds the vhost backend callouts for the worker ioctls added in the
6.4 linux kernel commit:
c1ecd8e95007 ("vhost: allow userspace to create workers")
Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
hw/virtio/vhost-backend.c | 28 ++++++++++++++++++++++++++++
include/hw/virtio/vhost-backend.h | 14 ++++++++++++++
2 files changed, 42 insertions(+)
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 17f3fc6a0823..833804dd40f2 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -158,6 +158,30 @@ static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
}
+static int vhost_kernel_new_worker(struct vhost_dev *dev,
+ struct vhost_worker_state *worker)
+{
+ return vhost_kernel_call(dev, VHOST_NEW_WORKER, worker);
+}
+
+static int vhost_kernel_free_worker(struct vhost_dev *dev,
+ struct vhost_worker_state *worker)
+{
+ return vhost_kernel_call(dev, VHOST_FREE_WORKER, worker);
+}
+
+static int vhost_kernel_attach_vring_worker(struct vhost_dev *dev,
+ struct vhost_vring_worker *worker)
+{
+ return vhost_kernel_call(dev, VHOST_ATTACH_VRING_WORKER, worker);
+}
+
+static int vhost_kernel_get_vring_worker(struct vhost_dev *dev,
+ struct vhost_vring_worker *worker)
+{
+ return vhost_kernel_call(dev, VHOST_GET_VRING_WORKER, worker);
+}
+
static int vhost_kernel_set_features(struct vhost_dev *dev,
uint64_t features)
{
@@ -313,6 +337,10 @@ const VhostOps kernel_ops = {
.vhost_set_vring_err = vhost_kernel_set_vring_err,
.vhost_set_vring_busyloop_timeout =
vhost_kernel_set_vring_busyloop_timeout,
+ .vhost_get_vring_worker = vhost_kernel_get_vring_worker,
+ .vhost_attach_vring_worker = vhost_kernel_attach_vring_worker,
+ .vhost_new_worker = vhost_kernel_new_worker,
+ .vhost_free_worker = vhost_kernel_free_worker,
.vhost_set_features = vhost_kernel_set_features,
.vhost_get_features = vhost_kernel_get_features,
.vhost_set_backend_cap = vhost_kernel_set_backend_cap,
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 96ccc18cd33b..9f16d0884e8f 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -33,6 +33,8 @@ struct vhost_memory;
struct vhost_vring_file;
struct vhost_vring_state;
struct vhost_vring_addr;
+struct vhost_vring_worker;
+struct vhost_worker_state;
struct vhost_scsi_target;
struct vhost_iotlb_msg;
struct vhost_virtqueue;
@@ -73,6 +75,14 @@ typedef int (*vhost_set_vring_err_op)(struct vhost_dev *dev,
struct vhost_vring_file *file);
typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev,
struct vhost_vring_state *r);
+typedef int (*vhost_attach_vring_worker_op)(struct vhost_dev *dev,
+ struct vhost_vring_worker *worker);
+typedef int (*vhost_get_vring_worker_op)(struct vhost_dev *dev,
+ struct vhost_vring_worker *worker);
+typedef int (*vhost_new_worker_op)(struct vhost_dev *dev,
+ struct vhost_worker_state *worker);
+typedef int (*vhost_free_worker_op)(struct vhost_dev *dev,
+ struct vhost_worker_state *worker);
typedef int (*vhost_set_features_op)(struct vhost_dev *dev,
uint64_t features);
typedef int (*vhost_get_features_op)(struct vhost_dev *dev,
@@ -151,6 +161,10 @@ typedef struct VhostOps {
vhost_set_vring_call_op vhost_set_vring_call;
vhost_set_vring_err_op vhost_set_vring_err;
vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout;
+ vhost_new_worker_op vhost_new_worker;
+ vhost_free_worker_op vhost_free_worker;
+ vhost_get_vring_worker_op vhost_get_vring_worker;
+ vhost_attach_vring_worker_op vhost_attach_vring_worker;
vhost_set_features_op vhost_set_features;
vhost_get_features_op vhost_get_features;
vhost_set_backend_cap_op vhost_set_backend_cap;
--
2.34.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH 2/2] vhost-scsi: Add support for a worker thread per virtqueue
2023-11-14 0:36 [PATCH 0/2] vhost-scsi: Support worker ioctls Mike Christie
2023-11-14 0:36 ` [PATCH 1/2] vhost: Add worker backend callouts Mike Christie
@ 2023-11-14 0:36 ` Mike Christie
2023-11-15 11:43 ` Stefano Garzarella
1 sibling, 1 reply; 8+ messages in thread
From: Mike Christie @ 2023-11-14 0:36 UTC (permalink / raw)
To: fam, stefanha, jasowang, mst, sgarzare, pbonzini, qemu-devel
Cc: Mike Christie
This adds support for vhost-scsi to be able to create a worker thread
per virtqueue. Right now for vhost-net we get a worker thread per
tx/rx virtqueue pair which scales nicely as we add more virtqueues and
CPUs, but for scsi we get the single worker thread that's shared by all
virtqueues. When trying to send IO to more than 2 virtqueues the single
thread becomes a bottlneck.
This patch adds a new setting, virtqueue_workers, which can be set to:
1: Existing behavior whre we get the single thread.
-1: Create a worker per IO virtqueue.
Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
hw/scsi/vhost-scsi.c | 68 +++++++++++++++++++++++++++++++++
include/hw/virtio/virtio-scsi.h | 1 +
2 files changed, 69 insertions(+)
diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
index 3126df9e1d9d..5cf669b6563b 100644
--- a/hw/scsi/vhost-scsi.c
+++ b/hw/scsi/vhost-scsi.c
@@ -31,6 +31,9 @@
#include "qemu/cutils.h"
#include "sysemu/sysemu.h"
+#define VHOST_SCSI_WORKER_PER_VQ -1
+#define VHOST_SCSI_WORKER_DEF 1
+
/* Features supported by host kernel. */
static const int kernel_feature_bits[] = {
VIRTIO_F_NOTIFY_ON_EMPTY,
@@ -165,6 +168,62 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
.pre_save = vhost_scsi_pre_save,
};
+static int vhost_scsi_set_workers(VHostSCSICommon *vsc, int workers_cnt)
+{
+ struct vhost_dev *dev = &vsc->dev;
+ struct vhost_vring_worker vq_worker;
+ struct vhost_worker_state worker;
+ int i, ret;
+
+ /* Use default worker */
+ if (workers_cnt == VHOST_SCSI_WORKER_DEF ||
+ dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
+ return 0;
+ }
+
+ if (workers_cnt != VHOST_SCSI_WORKER_PER_VQ) {
+ return -EINVAL;
+ }
+
+ /*
+ * ctl/evt share the first worker since it will be rare for them
+ * to send cmds while IO is running.
+ */
+ for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
+ memset(&worker, 0, sizeof(worker));
+
+ ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
+ if (ret == -ENOTTY) {
+ /*
+ * worker ioctls are not implemented so just ignore and
+ * and continue device setup.
+ */
+ ret = 0;
+ break;
+ } else if (ret) {
+ break;
+ }
+
+ memset(&vq_worker, 0, sizeof(vq_worker));
+ vq_worker.worker_id = worker.worker_id;
+ vq_worker.index = i;
+
+ ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker);
+ if (ret == -ENOTTY) {
+ /*
+ * It's a bug for the kernel to have supported the worker creation
+ * ioctl but not attach.
+ */
+ dev->vhost_ops->vhost_free_worker(dev, &worker);
+ break;
+ } else if (ret) {
+ break;
+ }
+ }
+
+ return ret;
+}
+
static void vhost_scsi_realize(DeviceState *dev, Error **errp)
{
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
@@ -232,6 +291,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
goto free_vqs;
}
+ ret = vhost_scsi_set_workers(vsc, vs->conf.virtqueue_workers);
+ if (ret < 0) {
+ error_setg(errp, "vhost-scsi: vhost worker setup failed: %s",
+ strerror(-ret));
+ goto free_vqs;
+ }
+
/* At present, channel and lun both are 0 for bootable vhost-scsi disk */
vsc->channel = 0;
vsc->lun = 0;
@@ -297,6 +363,8 @@ static Property vhost_scsi_properties[] = {
VIRTIO_SCSI_F_T10_PI,
false),
DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
+ DEFINE_PROP_INT32("virtqueue_workers", VirtIOSCSICommon,
+ conf.virtqueue_workers, VHOST_SCSI_WORKER_DEF),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
index 779568ab5d28..f70624ece564 100644
--- a/include/hw/virtio/virtio-scsi.h
+++ b/include/hw/virtio/virtio-scsi.h
@@ -51,6 +51,7 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig;
struct VirtIOSCSIConf {
uint32_t num_queues;
uint32_t virtqueue_size;
+ int virtqueue_workers;
bool seg_max_adjust;
uint32_t max_sectors;
uint32_t cmd_per_lun;
--
2.34.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] vhost-scsi: Add support for a worker thread per virtqueue
2023-11-14 0:36 ` [PATCH 2/2] vhost-scsi: Add support for a worker thread per virtqueue Mike Christie
@ 2023-11-15 11:43 ` Stefano Garzarella
2023-11-15 12:57 ` Stefan Hajnoczi
2023-11-15 16:48 ` Mike Christie
0 siblings, 2 replies; 8+ messages in thread
From: Stefano Garzarella @ 2023-11-15 11:43 UTC (permalink / raw)
To: Mike Christie; +Cc: fam, stefanha, jasowang, mst, pbonzini, qemu-devel
On Mon, Nov 13, 2023 at 06:36:44PM -0600, Mike Christie wrote:
>This adds support for vhost-scsi to be able to create a worker thread
>per virtqueue. Right now for vhost-net we get a worker thread per
>tx/rx virtqueue pair which scales nicely as we add more virtqueues and
>CPUs, but for scsi we get the single worker thread that's shared by all
>virtqueues. When trying to send IO to more than 2 virtqueues the single
>thread becomes a bottlneck.
>
>This patch adds a new setting, virtqueue_workers, which can be set to:
>
>1: Existing behavior whre we get the single thread.
>-1: Create a worker per IO virtqueue.
I find this setting a bit odd. What about a boolean instead?
`per_virtqueue_workers`:
false: Existing behavior whre we get the single thread.
true: Create a worker per IO virtqueue.
>
>Signed-off-by: Mike Christie <michael.christie@oracle.com>
>---
> hw/scsi/vhost-scsi.c | 68 +++++++++++++++++++++++++++++++++
> include/hw/virtio/virtio-scsi.h | 1 +
> 2 files changed, 69 insertions(+)
>
>diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
>index 3126df9e1d9d..5cf669b6563b 100644
>--- a/hw/scsi/vhost-scsi.c
>+++ b/hw/scsi/vhost-scsi.c
>@@ -31,6 +31,9 @@
> #include "qemu/cutils.h"
> #include "sysemu/sysemu.h"
>
>+#define VHOST_SCSI_WORKER_PER_VQ -1
>+#define VHOST_SCSI_WORKER_DEF 1
>+
> /* Features supported by host kernel. */
> static const int kernel_feature_bits[] = {
> VIRTIO_F_NOTIFY_ON_EMPTY,
>@@ -165,6 +168,62 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
> .pre_save = vhost_scsi_pre_save,
> };
>
>+static int vhost_scsi_set_workers(VHostSCSICommon *vsc, int workers_cnt)
>+{
>+ struct vhost_dev *dev = &vsc->dev;
>+ struct vhost_vring_worker vq_worker;
>+ struct vhost_worker_state worker;
>+ int i, ret;
>+
>+ /* Use default worker */
>+ if (workers_cnt == VHOST_SCSI_WORKER_DEF ||
>+ dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
>+ return 0;
>+ }
>+
>+ if (workers_cnt != VHOST_SCSI_WORKER_PER_VQ) {
>+ return -EINVAL;
>+ }
>+
>+ /*
>+ * ctl/evt share the first worker since it will be rare for them
>+ * to send cmds while IO is running.
>+ */
>+ for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
>+ memset(&worker, 0, sizeof(worker));
>+
>+ ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
Should we call vhost_free_worker() in the vhost_scsi_unrealize() or are
workers automatically freed when `vhostfd` is closed?
The rest LGTM.
Thanks,
Stefano
>+ if (ret == -ENOTTY) {
>+ /*
>+ * worker ioctls are not implemented so just ignore and
>+ * and continue device setup.
>+ */
>+ ret = 0;
>+ break;
>+ } else if (ret) {
>+ break;
>+ }
>+
>+ memset(&vq_worker, 0, sizeof(vq_worker));
>+ vq_worker.worker_id = worker.worker_id;
>+ vq_worker.index = i;
>+
>+ ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker);
>+ if (ret == -ENOTTY) {
>+ /*
>+ * It's a bug for the kernel to have supported the worker creation
>+ * ioctl but not attach.
>+ */
>+ dev->vhost_ops->vhost_free_worker(dev, &worker);
>+ break;
>+ } else if (ret) {
>+ break;
>+ }
>+ }
>+
>+ return ret;
>+}
>+
> static void vhost_scsi_realize(DeviceState *dev, Error **errp)
> {
> VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
>@@ -232,6 +291,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
> goto free_vqs;
> }
>
>+ ret = vhost_scsi_set_workers(vsc, vs->conf.virtqueue_workers);
>+ if (ret < 0) {
>+ error_setg(errp, "vhost-scsi: vhost worker setup failed: %s",
>+ strerror(-ret));
>+ goto free_vqs;
>+ }
>+
> /* At present, channel and lun both are 0 for bootable vhost-scsi disk */
> vsc->channel = 0;
> vsc->lun = 0;
>@@ -297,6 +363,8 @@ static Property vhost_scsi_properties[] = {
> VIRTIO_SCSI_F_T10_PI,
> false),
> DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
>+ DEFINE_PROP_INT32("virtqueue_workers", VirtIOSCSICommon,
>+ conf.virtqueue_workers, VHOST_SCSI_WORKER_DEF),
> DEFINE_PROP_END_OF_LIST(),
> };
>
>diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
>index 779568ab5d28..f70624ece564 100644
>--- a/include/hw/virtio/virtio-scsi.h
>+++ b/include/hw/virtio/virtio-scsi.h
>@@ -51,6 +51,7 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig;
> struct VirtIOSCSIConf {
> uint32_t num_queues;
> uint32_t virtqueue_size;
>+ int virtqueue_workers;
> bool seg_max_adjust;
> uint32_t max_sectors;
> uint32_t cmd_per_lun;
>--
>2.34.1
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] vhost-scsi: Add support for a worker thread per virtqueue
2023-11-15 11:43 ` Stefano Garzarella
@ 2023-11-15 12:57 ` Stefan Hajnoczi
2023-11-15 16:56 ` Mike Christie
2023-11-15 16:48 ` Mike Christie
1 sibling, 1 reply; 8+ messages in thread
From: Stefan Hajnoczi @ 2023-11-15 12:57 UTC (permalink / raw)
To: Stefano Garzarella
Cc: Mike Christie, fam, jasowang, mst, pbonzini, qemu-devel
[-- Attachment #1: Type: text/plain, Size: 5792 bytes --]
On Wed, Nov 15, 2023 at 12:43:02PM +0100, Stefano Garzarella wrote:
> On Mon, Nov 13, 2023 at 06:36:44PM -0600, Mike Christie wrote:
> > This adds support for vhost-scsi to be able to create a worker thread
> > per virtqueue. Right now for vhost-net we get a worker thread per
> > tx/rx virtqueue pair which scales nicely as we add more virtqueues and
> > CPUs, but for scsi we get the single worker thread that's shared by all
> > virtqueues. When trying to send IO to more than 2 virtqueues the single
> > thread becomes a bottlneck.
> >
> > This patch adds a new setting, virtqueue_workers, which can be set to:
> >
> > 1: Existing behavior whre we get the single thread.
> > -1: Create a worker per IO virtqueue.
>
> I find this setting a bit odd. What about a boolean instead?
>
> `per_virtqueue_workers`:
> false: Existing behavior whre we get the single thread.
> true: Create a worker per IO virtqueue.
Me too, I thought there would be round-robin assignment for 1 <
worker_cnt < (dev->nvqs - VHOST_SCSI_VQ_NUM_FIXED) but instead only 1
and -1 have any meaning.
Do you want to implement round-robin assignment?
>
> >
> > Signed-off-by: Mike Christie <michael.christie@oracle.com>
> > ---
> > hw/scsi/vhost-scsi.c | 68 +++++++++++++++++++++++++++++++++
> > include/hw/virtio/virtio-scsi.h | 1 +
> > 2 files changed, 69 insertions(+)
> >
> > diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
> > index 3126df9e1d9d..5cf669b6563b 100644
> > --- a/hw/scsi/vhost-scsi.c
> > +++ b/hw/scsi/vhost-scsi.c
> > @@ -31,6 +31,9 @@
> > #include "qemu/cutils.h"
> > #include "sysemu/sysemu.h"
> >
> > +#define VHOST_SCSI_WORKER_PER_VQ -1
> > +#define VHOST_SCSI_WORKER_DEF 1
> > +
> > /* Features supported by host kernel. */
> > static const int kernel_feature_bits[] = {
> > VIRTIO_F_NOTIFY_ON_EMPTY,
> > @@ -165,6 +168,62 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
> > .pre_save = vhost_scsi_pre_save,
> > };
> >
> > +static int vhost_scsi_set_workers(VHostSCSICommon *vsc, int workers_cnt)
> > +{
> > + struct vhost_dev *dev = &vsc->dev;
> > + struct vhost_vring_worker vq_worker;
> > + struct vhost_worker_state worker;
> > + int i, ret;
> > +
> > + /* Use default worker */
> > + if (workers_cnt == VHOST_SCSI_WORKER_DEF ||
> > + dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
> > + return 0;
> > + }
> > +
> > + if (workers_cnt != VHOST_SCSI_WORKER_PER_VQ) {
> > + return -EINVAL;
> > + }
> > +
> > + /*
> > + * ctl/evt share the first worker since it will be rare for them
> > + * to send cmds while IO is running.
> > + */
> > + for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
> > + memset(&worker, 0, sizeof(worker));
> > +
> > + ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
>
> Should we call vhost_free_worker() in the vhost_scsi_unrealize() or are
> workers automatically freed when `vhostfd` is closed?
>
> The rest LGTM.
>
> Thanks,
> Stefano
>
> > + if (ret == -ENOTTY) {
> > + /*
> > + * worker ioctls are not implemented so just ignore and
> > + * and continue device setup.
> > + */
> > + ret = 0;
> > + break;
> > + } else if (ret) {
> > + break;
> > + }
> > +
> > + memset(&vq_worker, 0, sizeof(vq_worker));
> > + vq_worker.worker_id = worker.worker_id;
> > + vq_worker.index = i;
> > +
> > + ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker);
> > + if (ret == -ENOTTY) {
> > + /*
> > + * It's a bug for the kernel to have supported the worker creation
> > + * ioctl but not attach.
> > + */
> > + dev->vhost_ops->vhost_free_worker(dev, &worker);
> > + break;
> > + } else if (ret) {
> > + break;
> > + }
> > + }
> > +
> > + return ret;
> > +}
> > +
> > static void vhost_scsi_realize(DeviceState *dev, Error **errp)
> > {
> > VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
> > @@ -232,6 +291,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
> > goto free_vqs;
> > }
> >
> > + ret = vhost_scsi_set_workers(vsc, vs->conf.virtqueue_workers);
> > + if (ret < 0) {
> > + error_setg(errp, "vhost-scsi: vhost worker setup failed: %s",
> > + strerror(-ret));
> > + goto free_vqs;
> > + }
> > +
> > /* At present, channel and lun both are 0 for bootable vhost-scsi disk */
> > vsc->channel = 0;
> > vsc->lun = 0;
> > @@ -297,6 +363,8 @@ static Property vhost_scsi_properties[] = {
> > VIRTIO_SCSI_F_T10_PI,
> > false),
> > DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
> > + DEFINE_PROP_INT32("virtqueue_workers", VirtIOSCSICommon,
> > + conf.virtqueue_workers, VHOST_SCSI_WORKER_DEF),
> > DEFINE_PROP_END_OF_LIST(),
> > };
> >
> > diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h
> > index 779568ab5d28..f70624ece564 100644
> > --- a/include/hw/virtio/virtio-scsi.h
> > +++ b/include/hw/virtio/virtio-scsi.h
> > @@ -51,6 +51,7 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig;
> > struct VirtIOSCSIConf {
> > uint32_t num_queues;
> > uint32_t virtqueue_size;
> > + int virtqueue_workers;
> > bool seg_max_adjust;
> > uint32_t max_sectors;
> > uint32_t cmd_per_lun;
> > --
> > 2.34.1
> >
>
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] vhost-scsi: Add support for a worker thread per virtqueue
2023-11-15 11:43 ` Stefano Garzarella
2023-11-15 12:57 ` Stefan Hajnoczi
@ 2023-11-15 16:48 ` Mike Christie
1 sibling, 0 replies; 8+ messages in thread
From: Mike Christie @ 2023-11-15 16:48 UTC (permalink / raw)
To: Stefano Garzarella; +Cc: fam, stefanha, jasowang, mst, pbonzini, qemu-devel
On 11/15/23 5:43 AM, Stefano Garzarella wrote:
> On Mon, Nov 13, 2023 at 06:36:44PM -0600, Mike Christie wrote:
>> This adds support for vhost-scsi to be able to create a worker thread
>> per virtqueue. Right now for vhost-net we get a worker thread per
>> tx/rx virtqueue pair which scales nicely as we add more virtqueues and
>> CPUs, but for scsi we get the single worker thread that's shared by all
>> virtqueues. When trying to send IO to more than 2 virtqueues the single
>> thread becomes a bottlneck.
>>
>> This patch adds a new setting, virtqueue_workers, which can be set to:
>>
>> 1: Existing behavior whre we get the single thread.
>> -1: Create a worker per IO virtqueue.
>
> I find this setting a bit odd. What about a boolean instead?
>
> `per_virtqueue_workers`:
> false: Existing behavior whre we get the single thread.
> true: Create a worker per IO virtqueue.
Sound good.
>
>>
>> Signed-off-by: Mike Christie <michael.christie@oracle.com>
>> ---
>> hw/scsi/vhost-scsi.c | 68 +++++++++++++++++++++++++++++++++
>> include/hw/virtio/virtio-scsi.h | 1 +
>> 2 files changed, 69 insertions(+)
>>
>> diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
>> index 3126df9e1d9d..5cf669b6563b 100644
>> --- a/hw/scsi/vhost-scsi.c
>> +++ b/hw/scsi/vhost-scsi.c
>> @@ -31,6 +31,9 @@
>> #include "qemu/cutils.h"
>> #include "sysemu/sysemu.h"
>>
>> +#define VHOST_SCSI_WORKER_PER_VQ -1
>> +#define VHOST_SCSI_WORKER_DEF 1
>> +
>> /* Features supported by host kernel. */
>> static const int kernel_feature_bits[] = {
>> VIRTIO_F_NOTIFY_ON_EMPTY,
>> @@ -165,6 +168,62 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
>> .pre_save = vhost_scsi_pre_save,
>> };
>>
>> +static int vhost_scsi_set_workers(VHostSCSICommon *vsc, int workers_cnt)
>> +{
>> + struct vhost_dev *dev = &vsc->dev;
>> + struct vhost_vring_worker vq_worker;
>> + struct vhost_worker_state worker;
>> + int i, ret;
>> +
>> + /* Use default worker */
>> + if (workers_cnt == VHOST_SCSI_WORKER_DEF ||
>> + dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
>> + return 0;
>> + }
>> +
>> + if (workers_cnt != VHOST_SCSI_WORKER_PER_VQ) {
>> + return -EINVAL;
>> + }
>> +
>> + /*
>> + * ctl/evt share the first worker since it will be rare for them
>> + * to send cmds while IO is running.
>> + */
>> + for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
>> + memset(&worker, 0, sizeof(worker));
>> +
>> + ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
>
> Should we call vhost_free_worker() in the vhost_scsi_unrealize() or are
> workers automatically freed when `vhostfd` is closed?
>
All worker threads are freed automatically like how the default worker
created from VHOST_SET_OWNER is freed on close.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] vhost-scsi: Add support for a worker thread per virtqueue
2023-11-15 12:57 ` Stefan Hajnoczi
@ 2023-11-15 16:56 ` Mike Christie
2023-11-15 17:24 ` Stefan Hajnoczi
0 siblings, 1 reply; 8+ messages in thread
From: Mike Christie @ 2023-11-15 16:56 UTC (permalink / raw)
To: Stefan Hajnoczi, Stefano Garzarella
Cc: fam, jasowang, mst, pbonzini, qemu-devel
On 11/15/23 6:57 AM, Stefan Hajnoczi wrote:
> On Wed, Nov 15, 2023 at 12:43:02PM +0100, Stefano Garzarella wrote:
>> On Mon, Nov 13, 2023 at 06:36:44PM -0600, Mike Christie wrote:
>>> This adds support for vhost-scsi to be able to create a worker thread
>>> per virtqueue. Right now for vhost-net we get a worker thread per
>>> tx/rx virtqueue pair which scales nicely as we add more virtqueues and
>>> CPUs, but for scsi we get the single worker thread that's shared by all
>>> virtqueues. When trying to send IO to more than 2 virtqueues the single
>>> thread becomes a bottlneck.
>>>
>>> This patch adds a new setting, virtqueue_workers, which can be set to:
>>>
>>> 1: Existing behavior whre we get the single thread.
>>> -1: Create a worker per IO virtqueue.
>>
>> I find this setting a bit odd. What about a boolean instead?
>>
>> `per_virtqueue_workers`:
>> false: Existing behavior whre we get the single thread.
>> true: Create a worker per IO virtqueue.
>
> Me too, I thought there would be round-robin assignment for 1 <
> worker_cnt < (dev->nvqs - VHOST_SCSI_VQ_NUM_FIXED) but instead only 1
> and -1 have any meaning.
>
> Do you want to implement round-robin assignment?
>
It was an int because I originally did round robin but at some point
dropped it. I found that our users at least:
1. Are used to configuring number of virtqueues.
2. In the userspace guest OS are used to checking the queue to CPU
mappings to figure out how their app should optimize itself.
So users would just do a virtqueue per vCPU or if trying to reduce
mem usage would do N virtqueues < vCPUs. For both cases they just did the
worker per virtqueue.
However, I left it an int in case in the future someone wanted
the future.
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] vhost-scsi: Add support for a worker thread per virtqueue
2023-11-15 16:56 ` Mike Christie
@ 2023-11-15 17:24 ` Stefan Hajnoczi
0 siblings, 0 replies; 8+ messages in thread
From: Stefan Hajnoczi @ 2023-11-15 17:24 UTC (permalink / raw)
To: Mike Christie
Cc: Stefan Hajnoczi, Stefano Garzarella, fam, jasowang, mst, pbonzini,
qemu-devel
On Wed, 15 Nov 2023 at 11:57, Mike Christie <michael.christie@oracle.com> wrote:
>
> On 11/15/23 6:57 AM, Stefan Hajnoczi wrote:
> > On Wed, Nov 15, 2023 at 12:43:02PM +0100, Stefano Garzarella wrote:
> >> On Mon, Nov 13, 2023 at 06:36:44PM -0600, Mike Christie wrote:
> >>> This adds support for vhost-scsi to be able to create a worker thread
> >>> per virtqueue. Right now for vhost-net we get a worker thread per
> >>> tx/rx virtqueue pair which scales nicely as we add more virtqueues and
> >>> CPUs, but for scsi we get the single worker thread that's shared by all
> >>> virtqueues. When trying to send IO to more than 2 virtqueues the single
> >>> thread becomes a bottlneck.
> >>>
> >>> This patch adds a new setting, virtqueue_workers, which can be set to:
> >>>
> >>> 1: Existing behavior whre we get the single thread.
> >>> -1: Create a worker per IO virtqueue.
> >>
> >> I find this setting a bit odd. What about a boolean instead?
> >>
> >> `per_virtqueue_workers`:
> >> false: Existing behavior whre we get the single thread.
> >> true: Create a worker per IO virtqueue.
> >
> > Me too, I thought there would be round-robin assignment for 1 <
> > worker_cnt < (dev->nvqs - VHOST_SCSI_VQ_NUM_FIXED) but instead only 1
> > and -1 have any meaning.
> >
> > Do you want to implement round-robin assignment?
> >
>
> It was an int because I originally did round robin but at some point
> dropped it. I found that our users at least:
>
> 1. Are used to configuring number of virtqueues.
> 2. In the userspace guest OS are used to checking the queue to CPU
> mappings to figure out how their app should optimize itself.
>
> So users would just do a virtqueue per vCPU or if trying to reduce
> mem usage would do N virtqueues < vCPUs. For both cases they just did the
> worker per virtqueue.
>
> However, I left it an int in case in the future someone wanted
> the future.
Okay. I have a slight preference for a vq_workers bool parameter in
that case. An int parameter can be added later if the number of
workers need to be specified. But if you want to keep -1/1 then I
guess it's fine unless someone else has a strong opinion.
Stefan
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2023-11-15 17:24 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-11-14 0:36 [PATCH 0/2] vhost-scsi: Support worker ioctls Mike Christie
2023-11-14 0:36 ` [PATCH 1/2] vhost: Add worker backend callouts Mike Christie
2023-11-14 0:36 ` [PATCH 2/2] vhost-scsi: Add support for a worker thread per virtqueue Mike Christie
2023-11-15 11:43 ` Stefano Garzarella
2023-11-15 12:57 ` Stefan Hajnoczi
2023-11-15 16:56 ` Mike Christie
2023-11-15 17:24 ` Stefan Hajnoczi
2023-11-15 16:48 ` Mike Christie
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).