From: Jason Wang <jasowang@redhat.com>
To: Eugenio Perez Martin <eperezma@redhat.com>
Cc: Laurent Vivier <lvivier@redhat.com>,
Parav Pandit <parav@mellanox.com>, Cindy Lu <lulu@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Juan Quintela <quintela@redhat.com>,
Richard Henderson <richard.henderson@linaro.org>,
qemu-level <qemu-devel@nongnu.org>,
Gautam Dawar <gdawar@xilinx.com>,
Markus Armbruster <armbru@redhat.com>,
Eduardo Habkost <ehabkost@redhat.com>,
Harpreet Singh Anand <hanand@xilinx.com>,
Xiao W Wang <xiao.w.wang@intel.com>, Peter Xu <peterx@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Eli Cohen <eli@mellanox.com>, Paolo Bonzini <pbonzini@redhat.com>,
Zhu Lingshan <lingshan.zhu@intel.com>,
virtualization <virtualization@lists.linux-foundation.org>,
Eric Blake <eblake@redhat.com>,
Stefano Garzarella <sgarzare@redhat.com>
Subject: Re: [PATCH 06/31] vhost: Route guest->host notification through shadow virtqueue
Date: Tue, 8 Feb 2022 17:02:33 +0800 [thread overview]
Message-ID: <05de0101-6481-22fd-e2f8-2fa3213f47a1@redhat.com> (raw)
In-Reply-To: <CAJaqyWeRbmwW80q3q52nFw=iz1xcPRFviFaRHo0nzXpEb+3m3A@mail.gmail.com>
在 2022/1/31 下午7:33, Eugenio Perez Martin 写道:
> On Fri, Jan 28, 2022 at 7:57 AM Jason Wang <jasowang@redhat.com> wrote:
>>
>> 在 2022/1/22 上午4:27, Eugenio Pérez 写道:
>>> At this moment no buffer forwarding will be performed in SVQ mode: Qemu
>>> just forward the guest's kicks to the device. This commit also set up
>>> SVQs in the vhost device.
>>>
>>> Host memory notifiers regions are left out for simplicity, and they will
>>> not be addressed in this series.
>>
>> I wonder if it's better to squash this into patch 5 since it gives us a
>> full guest->host forwarding.
>>
> I'm fine with that if you think it makes the review easier.
Yes please.
>
>>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
>>> ---
>>> include/hw/virtio/vhost-vdpa.h | 4 ++
>>> hw/virtio/vhost-vdpa.c | 122 ++++++++++++++++++++++++++++++++-
>>> 2 files changed, 124 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
>>> index 3ce79a646d..009a9f3b6b 100644
>>> --- a/include/hw/virtio/vhost-vdpa.h
>>> +++ b/include/hw/virtio/vhost-vdpa.h
>>> @@ -12,6 +12,8 @@
>>> #ifndef HW_VIRTIO_VHOST_VDPA_H
>>> #define HW_VIRTIO_VHOST_VDPA_H
>>>
>>> +#include <gmodule.h>
>>> +
>>> #include "hw/virtio/virtio.h"
>>> #include "standard-headers/linux/vhost_types.h"
>>>
>>> @@ -27,6 +29,8 @@ typedef struct vhost_vdpa {
>>> bool iotlb_batch_begin_sent;
>>> MemoryListener listener;
>>> struct vhost_vdpa_iova_range iova_range;
>>> + bool shadow_vqs_enabled;
>>> + GPtrArray *shadow_vqs;
>>> struct vhost_dev *dev;
>>> VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
>>> } VhostVDPA;
>>> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
>>> index 6c10a7f05f..18de14f0fb 100644
>>> --- a/hw/virtio/vhost-vdpa.c
>>> +++ b/hw/virtio/vhost-vdpa.c
>>> @@ -17,12 +17,14 @@
>>> #include "hw/virtio/vhost.h"
>>> #include "hw/virtio/vhost-backend.h"
>>> #include "hw/virtio/virtio-net.h"
>>> +#include "hw/virtio/vhost-shadow-virtqueue.h"
>>> #include "hw/virtio/vhost-vdpa.h"
>>> #include "exec/address-spaces.h"
>>> #include "qemu/main-loop.h"
>>> #include "cpu.h"
>>> #include "trace.h"
>>> #include "qemu-common.h"
>>> +#include "qapi/error.h"
>>>
>>> /*
>>> * Return one past the end of the end of section. Be careful with uint64_t
>>> @@ -409,8 +411,14 @@ err:
>>>
>>> static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
>>> {
>>> + struct vhost_vdpa *v = dev->opaque;
>>> int i;
>>>
>>> + if (v->shadow_vqs_enabled) {
>>> + /* SVQ is not compatible with host notifiers mr */
>>
>> I guess there should be a TODO or FIXME here.
>>
> Sure I can add it.
>
>>> + return;
>>> + }
>>> +
>>> for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
>>> if (vhost_vdpa_host_notifier_init(dev, i)) {
>>> goto err;
>>> @@ -424,6 +432,17 @@ err:
>>> return;
>>> }
>>>
>>> +static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
>>> +{
>>> + struct vhost_vdpa *v = dev->opaque;
>>> + size_t idx;
>>> +
>>> + for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
>>> + vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
>>> + }
>>> + g_ptr_array_free(v->shadow_vqs, true);
>>> +}
>>> +
>>> static int vhost_vdpa_cleanup(struct vhost_dev *dev)
>>> {
>>> struct vhost_vdpa *v;
>>> @@ -432,6 +451,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev)
>>> trace_vhost_vdpa_cleanup(dev, v);
>>> vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
>>> memory_listener_unregister(&v->listener);
>>> + vhost_vdpa_svq_cleanup(dev);
>>>
>>> dev->opaque = NULL;
>>> ram_block_discard_disable(false);
>>> @@ -507,9 +527,15 @@ static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
>>>
>>> static int vhost_vdpa_reset_device(struct vhost_dev *dev)
>>> {
>>> + struct vhost_vdpa *v = dev->opaque;
>>> int ret;
>>> uint8_t status = 0;
>>>
>>> + for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
>>> + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
>>> + vhost_svq_stop(svq);
>>> + }
>>> +
>>> ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
>>> trace_vhost_vdpa_reset_device(dev, status);
>>> return ret;
>>> @@ -639,13 +665,28 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
>>> return ret;
>>> }
>>>
>>> -static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
>>> - struct vhost_vring_file *file)
>>> +static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
>>> + struct vhost_vring_file *file)
>>> {
>>> trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
>>> return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
>>> }
>>>
>>> +static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
>>> + struct vhost_vring_file *file)
>>> +{
>>> + struct vhost_vdpa *v = dev->opaque;
>>> + int vdpa_idx = vhost_vdpa_get_vq_index(dev, file->index);
>>> +
>>> + if (v->shadow_vqs_enabled) {
>>> + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
>>> + vhost_svq_set_svq_kick_fd(svq, file->fd);
>>> + return 0;
>>> + } else {
>>> + return vhost_vdpa_set_vring_dev_kick(dev, file);
>>> + }
>>> +}
>>> +
>>> static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
>>> struct vhost_vring_file *file)
>>> {
>>> @@ -653,6 +694,33 @@ static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
>>> return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
>>> }
>>>
>>> +/**
>>> + * Set shadow virtqueue descriptors to the device
>>> + *
>>> + * @dev The vhost device model
>>> + * @svq The shadow virtqueue
>>> + * @idx The index of the virtqueue in the vhost device
>>> + */
>>> +static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
>>> + VhostShadowVirtqueue *svq,
>>> + unsigned idx)
>>> +{
>>> + struct vhost_vring_file file = {
>>> + .index = dev->vq_index + idx,
>>> + };
>>> + const EventNotifier *event_notifier;
>>> + int r;
>>> +
>>> + event_notifier = vhost_svq_get_dev_kick_notifier(svq);
>>
>> A question, any reason for making VhostShadowVirtqueue private? If we
>> export it in .h we don't need helper to access its member like
>> vhost_svq_get_dev_kick_notifier().
>>
> To export it it's always a possibility of course, but that direct
> access will not be thread safe if we decide to move SVQ to its own
> iothread for example.
I don't get this, maybe you can give me an example.
>
> I feel it will be easier to work with it this way but it might be that
> I'm just used to making as much as possible private. Not like it's
> needed to use the helpers in the hot paths, only in the setup and
> teardown.
>
>> Note that vhost_dev is a public structure.
>>
> Sure we could embed in vhost_virtqueue if we choose to do it that way,
> for example.
>
>>> + file.fd = event_notifier_get_fd(event_notifier);
>>> + r = vhost_vdpa_set_vring_dev_kick(dev, &file);
>>> + if (unlikely(r != 0)) {
>>> + error_report("Can't set device kick fd (%d)", -r);
>>> + }
>>
>> I wonder whether or not we can generalize the logic here and
>> vhost_vdpa_set_vring_kick(). There's nothing vdpa specific unless the
>> vhost_ops->set_vring_kick().
>>
> If we call vhost_ops->set_vring_kick we are setting guest->SVQ kick
> notifier, not SVQ -> vDPA device, because the
> if(v->shadow_vqs_enabled). All of the modified ops callbacks are
> hiding the actual device from the vhost subsystem so we need to
> explicitly use the newly created _dev_ ones.
Ok, I'm fine to start with vhost_vdpa specific code.
>
>>> +
>>> + return r == 0;
>>> +}
>>> +
>>> static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
>>> {
>>> struct vhost_vdpa *v = dev->opaque;
>>> @@ -660,6 +728,13 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
>>>
>>> if (started) {
>>> vhost_vdpa_host_notifiers_init(dev);
>>> + for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
>>> + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
>>> + bool ok = vhost_vdpa_svq_setup(dev, svq, i);
>>> + if (unlikely(!ok)) {
>>> + return -1;
>>> + }
>>> + }
>>> vhost_vdpa_set_vring_ready(dev);
>>> } else {
>>> vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
>>> @@ -737,6 +812,41 @@ static bool vhost_vdpa_force_iommu(struct vhost_dev *dev)
>>> return true;
>>> }
>>>
>>> +/**
>>> + * Adaptor function to free shadow virtqueue through gpointer
>>> + *
>>> + * @svq The Shadow Virtqueue
>>> + */
>>> +static void vhost_psvq_free(gpointer svq)
>>> +{
>>> + vhost_svq_free(svq);
>>> +}
>>
>> Any reason for such indirection? Can we simply use vhost_svq_free()?
>>
> GCC complains about different types. I think we could do a function
> type cast and it's valid for every architecture qemu supports, but the
> indirection seems cleaner to me, and I would be surprised if the
> compiler does not optimize it away in the cases that the casting are
> valid.
>
> ../hw/virtio/vhost-vdpa.c:1186:60: error: incompatible function
> pointer types passing 'void (VhostShadowVirtqueue *)' (aka 'void
> (struct VhostShadowVirtqueue *)') to parameter of type
> 'GDestroyNotify' (aka 'void (*)(void *)')
Or just change vhost_svq_free() to take gpointer instead? Then we don't
need a cast.
Thanks
>
> Thanks!
>
>> Thanks
>>
>>
>>> +
>>> +static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
>>> + Error **errp)
>>> +{
>>> + size_t n_svqs = v->shadow_vqs_enabled ? hdev->nvqs : 0;
>>> + g_autoptr(GPtrArray) shadow_vqs = g_ptr_array_new_full(n_svqs,
>>> + vhost_psvq_free);
>>> + if (!v->shadow_vqs_enabled) {
>>> + goto out;
>>> + }
>>> +
>>> + for (unsigned n = 0; n < hdev->nvqs; ++n) {
>>> + VhostShadowVirtqueue *svq = vhost_svq_new();
>>> +
>>> + if (unlikely(!svq)) {
>>> + error_setg(errp, "Cannot create svq %u", n);
>>> + return -1;
>>> + }
>>> + g_ptr_array_add(v->shadow_vqs, svq);
>>> + }
>>> +
>>> +out:
>>> + v->shadow_vqs = g_steal_pointer(&shadow_vqs);
>>> + return 0;
>>> +}
>>> +
>>> static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
>>> {
>>> struct vhost_vdpa *v;
>>> @@ -759,6 +869,10 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
>>> dev->opaque = opaque ;
>>> v->listener = vhost_vdpa_memory_listener;
>>> v->msg_type = VHOST_IOTLB_MSG_V2;
>>> + ret = vhost_vdpa_init_svq(dev, v, errp);
>>> + if (ret) {
>>> + goto err;
>>> + }
>>>
>>> vhost_vdpa_get_iova_range(v);
>>>
>>> @@ -770,6 +884,10 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
>>> VIRTIO_CONFIG_S_DRIVER);
>>>
>>> return 0;
>>> +
>>> +err:
>>> + ram_block_discard_disable(false);
>>> + return ret;
>>> }
>>>
>>> const VhostOps vdpa_ops = {
next prev parent reply other threads:[~2022-02-08 10:31 UTC|newest]
Thread overview: 130+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-21 20:27 [PATCH 00/31] vDPA shadow virtqueue Eugenio Pérez
2022-01-21 20:27 ` [PATCH 01/31] vdpa: Reorder virtio/vhost-vdpa.c functions Eugenio Pérez
2022-01-28 5:59 ` Jason Wang
2022-01-28 7:57 ` Eugenio Perez Martin
2022-02-21 7:31 ` Jason Wang
2022-02-21 7:42 ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 02/31] vhost: Add VhostShadowVirtqueue Eugenio Pérez
2022-01-26 8:53 ` Eugenio Perez Martin
2022-01-28 6:00 ` Jason Wang
2022-01-28 8:10 ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 03/31] vdpa: Add vhost_svq_get_dev_kick_notifier Eugenio Pérez
2022-01-28 6:03 ` Jason Wang
2022-01-31 9:33 ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 04/31] vdpa: Add vhost_svq_set_svq_kick_fd Eugenio Pérez
2022-01-28 6:29 ` Jason Wang
2022-01-31 10:18 ` Eugenio Perez Martin
2022-02-08 8:47 ` Jason Wang
2022-02-18 18:22 ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 05/31] vhost: Add Shadow VirtQueue kick forwarding capabilities Eugenio Pérez
2022-01-28 6:32 ` Jason Wang
2022-01-31 10:48 ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 06/31] vhost: Route guest->host notification through shadow virtqueue Eugenio Pérez
2022-01-28 6:56 ` Jason Wang
2022-01-31 11:33 ` Eugenio Perez Martin
2022-02-08 9:02 ` Jason Wang [this message]
2022-01-21 20:27 ` [PATCH 07/31] vhost: dd vhost_svq_get_svq_call_notifier Eugenio Pérez
2022-01-29 7:57 ` Jason Wang
2022-01-29 17:49 ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 08/31] vhost: Add vhost_svq_set_guest_call_notifier Eugenio Pérez
2022-01-21 20:27 ` [PATCH 09/31] vhost-vdpa: Take into account SVQ in vhost_vdpa_set_vring_call Eugenio Pérez
2022-01-29 8:05 ` Jason Wang
2022-01-31 15:34 ` Eugenio Perez Martin
2022-02-08 3:23 ` Jason Wang
2022-02-18 12:35 ` Eugenio Perez Martin
2022-02-21 7:39 ` Jason Wang
2022-02-21 8:01 ` Eugenio Perez Martin
2022-02-22 7:18 ` Jason Wang
2022-01-21 20:27 ` [PATCH 10/31] vhost: Route host->guest notification through shadow virtqueue Eugenio Pérez
2022-01-21 20:27 ` [PATCH 11/31] vhost: Add vhost_svq_valid_device_features to shadow vq Eugenio Pérez
2022-01-29 8:11 ` Jason Wang
2022-01-31 15:49 ` Eugenio Perez Martin
2022-02-01 10:57 ` Eugenio Perez Martin
2022-02-08 3:37 ` Jason Wang
2022-02-26 9:11 ` Liuxiangdong via
2022-02-26 11:12 ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 12/31] vhost: Add vhost_svq_valid_guest_features " Eugenio Pérez
2022-01-21 20:27 ` [PATCH 13/31] vhost: Add vhost_svq_ack_guest_features " Eugenio Pérez
2022-01-21 20:27 ` [PATCH 14/31] virtio: Add vhost_shadow_vq_get_vring_addr Eugenio Pérez
2022-01-21 20:27 ` [PATCH 15/31] vdpa: Add vhost_svq_get_num Eugenio Pérez
2022-01-29 8:14 ` Jason Wang
2022-01-31 16:36 ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 16/31] vhost: pass queue index to vhost_vq_get_addr Eugenio Pérez
2022-01-29 8:20 ` Jason Wang
2022-01-31 17:44 ` Eugenio Perez Martin
2022-02-08 6:58 ` Jason Wang
2022-01-21 20:27 ` [PATCH 17/31] vdpa: adapt vhost_ops callbacks to svq Eugenio Pérez
2022-01-30 4:03 ` Jason Wang
2022-01-31 18:58 ` Eugenio Perez Martin
2022-02-08 3:57 ` Jason Wang
2022-02-17 17:13 ` Eugenio Perez Martin
2022-02-21 7:15 ` Jason Wang
2022-02-21 17:22 ` Eugenio Perez Martin
2022-02-22 3:16 ` Jason Wang
2022-02-22 7:42 ` Eugenio Perez Martin
2022-02-22 7:59 ` Jason Wang
2022-01-21 20:27 ` [PATCH 18/31] vhost: Shadow virtqueue buffers forwarding Eugenio Pérez
2022-01-30 4:42 ` Jason Wang
2022-02-01 17:08 ` Eugenio Perez Martin
2022-02-08 8:11 ` Jason Wang
2022-02-22 19:01 ` Eugenio Perez Martin
2022-02-23 2:03 ` Jason Wang
2022-01-30 6:46 ` Jason Wang
2022-02-01 11:25 ` Eugenio Perez Martin
2022-02-08 8:15 ` Jason Wang
2022-02-17 12:48 ` Eugenio Perez Martin
2022-02-21 7:43 ` Jason Wang
2022-02-21 8:15 ` Eugenio Perez Martin
2022-02-22 7:26 ` Jason Wang
2022-02-22 8:55 ` Eugenio Perez Martin
2022-02-23 2:26 ` Jason Wang
2022-01-21 20:27 ` [PATCH 19/31] utils: Add internal DMAMap to iova-tree Eugenio Pérez
2022-01-21 20:27 ` [PATCH 20/31] util: Store DMA entries in a list Eugenio Pérez
2022-01-21 20:27 ` [PATCH 21/31] util: Add iova_tree_alloc Eugenio Pérez
2022-01-24 4:32 ` Peter Xu
2022-01-24 9:20 ` Eugenio Perez Martin
2022-01-24 11:07 ` Peter Xu
2022-01-25 9:40 ` Eugenio Perez Martin
2022-01-27 8:06 ` Peter Xu
2022-01-27 9:24 ` Eugenio Perez Martin
2022-01-28 3:57 ` Peter Xu
2022-01-28 5:55 ` Jason Wang
2022-01-28 7:48 ` Eugenio Perez Martin
2022-02-15 19:34 ` Eugenio Pérez
2022-02-15 19:34 ` [PATCH] util: Add iova_tree_alloc Eugenio Pérez
2022-02-16 7:25 ` Peter Xu
2022-01-30 5:06 ` [PATCH 21/31] " Jason Wang
2022-01-21 20:27 ` [PATCH 22/31] vhost: Add VhostIOVATree Eugenio Pérez
2022-01-30 5:21 ` Jason Wang
2022-02-01 17:27 ` Eugenio Perez Martin
2022-02-08 8:17 ` Jason Wang
2022-01-21 20:27 ` [PATCH 23/31] vdpa: Add custom IOTLB translations to SVQ Eugenio Pérez
2022-01-30 5:57 ` Jason Wang
2022-01-31 19:11 ` Eugenio Perez Martin
2022-02-08 8:19 ` Jason Wang
2022-01-21 20:27 ` [PATCH 24/31] vhost: Add vhost_svq_get_last_used_idx Eugenio Pérez
2022-01-21 20:27 ` [PATCH 25/31] vdpa: Adapt vhost_vdpa_get_vring_base to SVQ Eugenio Pérez
2022-01-21 20:27 ` [PATCH 26/31] vdpa: Clear VHOST_VRING_F_LOG at vhost_vdpa_set_vring_addr in SVQ Eugenio Pérez
2022-01-21 20:27 ` [PATCH 27/31] vdpa: Never set log_base addr if SVQ is enabled Eugenio Pérez
2022-01-21 20:27 ` [PATCH 28/31] vdpa: Expose VHOST_F_LOG_ALL on SVQ Eugenio Pérez
2022-01-30 6:50 ` Jason Wang
2022-02-01 11:45 ` Eugenio Perez Martin
2022-02-08 8:25 ` Jason Wang
2022-02-16 15:53 ` Eugenio Perez Martin
2022-02-17 6:02 ` Jason Wang
2022-02-17 8:22 ` Eugenio Perez Martin
2022-02-22 7:41 ` Jason Wang
2022-02-22 8:05 ` Eugenio Perez Martin
2022-02-23 3:46 ` Jason Wang
2022-02-23 8:06 ` Eugenio Perez Martin
2022-02-24 3:45 ` Jason Wang
2022-01-21 20:27 ` [PATCH 29/31] vdpa: Make ncs autofree Eugenio Pérez
2022-01-30 6:51 ` Jason Wang
2022-02-01 17:10 ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 30/31] vdpa: Move vhost_vdpa_get_iova_range to net/vhost-vdpa.c Eugenio Pérez
2022-01-30 6:53 ` Jason Wang
2022-02-01 17:11 ` Eugenio Perez Martin
2022-01-21 20:27 ` [PATCH 31/31] vdpa: Add x-svq to NetdevVhostVDPAOptions Eugenio Pérez
2022-01-28 6:02 ` [PATCH 00/31] vDPA shadow virtqueue Jason Wang
2022-01-31 9:15 ` Eugenio Perez Martin
2022-02-08 8:27 ` Jason Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=05de0101-6481-22fd-e2f8-2fa3213f47a1@redhat.com \
--to=jasowang@redhat.com \
--cc=armbru@redhat.com \
--cc=eblake@redhat.com \
--cc=ehabkost@redhat.com \
--cc=eli@mellanox.com \
--cc=eperezma@redhat.com \
--cc=gdawar@xilinx.com \
--cc=hanand@xilinx.com \
--cc=lingshan.zhu@intel.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=richard.henderson@linaro.org \
--cc=sgarzare@redhat.com \
--cc=stefanha@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=xiao.w.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).