From: Asias He <asias@redhat.com>
To: "Nicholas A. Bellinger" <nab@linux-iscsi.org>
Cc: kvm@vger.kernel.org, "Michael S. Tsirkin" <mst@redhat.com>,
virtualization@lists.linux-foundation.org,
target-devel@vger.kernel.org,
Stefan Hajnoczi <stefanha@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>
Subject: Re: [PATCH] tcm_vhost: Multi-queue support
Date: Wed, 06 Feb 2013 17:51:34 +0800 [thread overview]
Message-ID: <511227A6.4000107@redhat.com> (raw)
In-Reply-To: <1360139997.25879.189.camel@haakon2.linux-iscsi.org>
On 02/06/2013 04:39 PM, Nicholas A. Bellinger wrote:
> On Wed, 2013-02-06 at 15:09 +0800, Asias He wrote:
>> On 02/06/2013 02:45 PM, Nicholas A. Bellinger wrote:
>>> On Wed, 2013-02-06 at 13:20 +0800, Asias He wrote:
>>>> This adds virtio-scsi multi-queue support to tcm_vhost.
>>>>
>>>> Guest side virtio-scsi multi-queue support can be found here:
>>>>
>>>> https://lkml.org/lkml/2012/12/18/166
>>>>
>>>> Some initial perf numbers:
>>>> 1 queue, 4 targets, 1 lun per target
>>>> 4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
>>>>
>>>> 4 queues, 4 targets, 1 lun per target
>>>> 4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
>>>>
>>>
>>> Nice single LUN small block random I/O improvement here with 4x vqueues.
>>>
>>> Curious to see how virtio-scsi small block performance looks with
>>> SCSI-core to multi-LUN tcm_vhost endpoints as well.. 8-)
>>
>> Do you mean something like this?
>>
>> 1 queue, 2 targets, 2 lun per target
>> 4 queue, 2 targets, 2 lun per target
>>
>>> Btw, this does not apply atop current target-pending.git/for-next with
>>> your other pending vhost patch series, and AFAICT this patch is supposed
>>> to apply on top of your last PATCH-v3, no..?
>>
>> Ah, this applies on top of mst's 'tcm_vhost: fix pr_err on early kick
>> patch.' plus my last v3 of 'tcm_vhost: Multi-target support'.
>>
>
> In that case, applying this patch + PATCH-v3 to auto-next for testing
> for the moment, and will respin for-next against upstream w/ MST's patch
> shortly.
Okay. Looking forward to more perf numbers.
> Also, please include a proper changelog for this second patch. :)
Sure.
---->
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
<----
> Thank you!
>
> --nab
>
>
>
>>> --nab
>>>
>>>> Signed-off-by: Asias He <asias@redhat.com>
>>>> ---
>>>> drivers/vhost/tcm_vhost.c | 46 +++++++++++++++++++++++++++++-----------------
>>>> drivers/vhost/tcm_vhost.h | 2 ++
>>>> 2 files changed, 31 insertions(+), 17 deletions(-)
>>>>
>>>> diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
>>>> index 81ecda5..9951297 100644
>>>> --- a/drivers/vhost/tcm_vhost.c
>>>> +++ b/drivers/vhost/tcm_vhost.c
>>>> @@ -48,6 +48,7 @@
>>>> #include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
>>>> #include <linux/virtio_scsi.h>
>>>> #include <linux/llist.h>
>>>> +#include <linux/bitmap.h>
>>>>
>>>> #include "vhost.c"
>>>> #include "vhost.h"
>>>> @@ -59,7 +60,8 @@ enum {
>>>> VHOST_SCSI_VQ_IO = 2,
>>>> };
>>>>
>>>> -#define VHOST_SCSI_MAX_TARGET 256
>>>> +#define VHOST_SCSI_MAX_TARGET 256
>>>> +#define VHOST_SCSI_MAX_VQ 128
>>>>
>>>> struct vhost_scsi {
>>>> /* Protected by vhost_scsi->dev.mutex */
>>>> @@ -68,7 +70,7 @@ struct vhost_scsi {
>>>> bool vs_endpoint;
>>>>
>>>> struct vhost_dev dev;
>>>> - struct vhost_virtqueue vqs[3];
>>>> + struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
>>>>
>>>> struct vhost_work vs_completion_work; /* cmd completion work item */
>>>> struct llist_head vs_completion_list; /* cmd completion queue */
>>>> @@ -366,12 +368,14 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
>>>> {
>>>> struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
>>>> vs_completion_work);
>>>> + DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
>>>> struct virtio_scsi_cmd_resp v_rsp;
>>>> struct tcm_vhost_cmd *tv_cmd;
>>>> struct llist_node *llnode;
>>>> struct se_cmd *se_cmd;
>>>> - int ret;
>>>> + int ret, vq;
>>>>
>>>> + bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
>>>> llnode = llist_del_all(&vs->vs_completion_list);
>>>> while (llnode) {
>>>> tv_cmd = llist_entry(llnode, struct tcm_vhost_cmd,
>>>> @@ -390,15 +394,20 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
>>>> memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
>>>> v_rsp.sense_len);
>>>> ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
>>>> - if (likely(ret == 0))
>>>> - vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
>>>> - else
>>>> + if (likely(ret == 0)) {
>>>> + vhost_add_used(tv_cmd->tvc_vq, tv_cmd->tvc_vq_desc, 0);
>>>> + vq = tv_cmd->tvc_vq - vs->vqs;
>>>> + __set_bit(vq, signal);
>>>> + } else
>>>> pr_err("Faulted on virtio_scsi_cmd_resp\n");
>>>>
>>>> vhost_scsi_free_cmd(tv_cmd);
>>>> }
>>>>
>>>> - vhost_signal(&vs->dev, &vs->vqs[2]);
>>>> + vq = -1;
>>>> + while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
>>>> + < VHOST_SCSI_MAX_VQ)
>>>> + vhost_signal(&vs->dev, &vs->vqs[vq]);
>>>> }
>>>>
>>>> static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
>>>> @@ -561,9 +570,9 @@ static void tcm_vhost_submission_work(struct work_struct *work)
>>>> }
>>>> }
>>>>
>>>> -static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
>>>> +static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
>>>> + struct vhost_virtqueue *vq)
>>>> {
>>>> - struct vhost_virtqueue *vq = &vs->vqs[2];
>>>> struct virtio_scsi_cmd_req v_req;
>>>> struct tcm_vhost_tpg *tv_tpg;
>>>> struct tcm_vhost_cmd *tv_cmd;
>>>> @@ -656,7 +665,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
>>>> ret = __copy_to_user(resp, &rsp, sizeof(rsp));
>>>> if (!ret)
>>>> vhost_add_used_and_signal(&vs->dev,
>>>> - &vs->vqs[2], head, 0);
>>>> + vq, head, 0);
>>>> else
>>>> pr_err("Faulted on virtio_scsi_cmd_resp\n");
>>>>
>>>> @@ -678,6 +687,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
>>>> ": %d\n", tv_cmd, exp_data_len, data_direction);
>>>>
>>>> tv_cmd->tvc_vhost = vs;
>>>> + tv_cmd->tvc_vq = vq;
>>>>
>>>> if (unlikely(vq->iov[out].iov_len !=
>>>> sizeof(struct virtio_scsi_cmd_resp))) {
>>>> @@ -758,7 +768,7 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
>>>> poll.work);
>>>> struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
>>>>
>>>> - vhost_scsi_handle_vq(vs);
>>>> + vhost_scsi_handle_vq(vs, vq);
>>>> }
>>>>
>>>> /*
>>>> @@ -879,7 +889,7 @@ err:
>>>> static int vhost_scsi_open(struct inode *inode, struct file *f)
>>>> {
>>>> struct vhost_scsi *s;
>>>> - int r;
>>>> + int r, i;
>>>>
>>>> s = kzalloc(sizeof(*s), GFP_KERNEL);
>>>> if (!s)
>>>> @@ -889,8 +899,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
>>>>
>>>> s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
>>>> s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
>>>> - s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
>>>> - r = vhost_dev_init(&s->dev, s->vqs, 3);
>>>> + for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++)
>>>> + s->vqs[i].handle_kick = vhost_scsi_handle_kick;
>>>> + r = vhost_dev_init(&s->dev, s->vqs, VHOST_SCSI_MAX_VQ);
>>>> if (r < 0) {
>>>> kfree(s);
>>>> return r;
>>>> @@ -922,9 +933,10 @@ static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
>>>>
>>>> static void vhost_scsi_flush(struct vhost_scsi *vs)
>>>> {
>>>> - vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
>>>> - vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
>>>> - vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
>>>> + int i;
>>>> +
>>>> + for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
>>>> + vhost_scsi_flush_vq(vs, i);
>>>> }
>>>>
>>>> static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
>>>> diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
>>>> index 519a550..1d2ae7a 100644
>>>> --- a/drivers/vhost/tcm_vhost.h
>>>> +++ b/drivers/vhost/tcm_vhost.h
>>>> @@ -23,6 +23,8 @@ struct tcm_vhost_cmd {
>>>> struct virtio_scsi_cmd_resp __user *tvc_resp;
>>>> /* Pointer to vhost_scsi for our device */
>>>> struct vhost_scsi *tvc_vhost;
>>>> + /* Pointer to vhost_virtqueue for the cmd */
>>>> + struct vhost_virtqueue *tvc_vq;
>>>> /* Pointer to vhost nexus memory */
>>>> struct tcm_vhost_nexus *tvc_nexus;
>>>> /* The TCM I/O descriptor that is accessed via container_of() */
>>>
>>>
>>
>>
>
>
--
Asias
next prev parent reply other threads:[~2013-02-06 9:51 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-02-06 5:20 [PATCH] tcm_vhost: Multi-queue support Asias He
2013-02-06 6:45 ` Nicholas A. Bellinger
2013-02-06 7:09 ` Asias He
2013-02-06 8:39 ` Nicholas A. Bellinger
2013-02-06 9:51 ` Asias He [this message]
2013-02-06 11:59 ` Paolo Bonzini
2013-02-07 0:49 ` Asias He
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=511227A6.4000107@redhat.com \
--to=asias@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=mst@redhat.com \
--cc=nab@linux-iscsi.org \
--cc=pbonzini@redhat.com \
--cc=stefanha@redhat.com \
--cc=target-devel@vger.kernel.org \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox