From: Wei Xu <wexu@redhat.com>
To: Jason Wang <jasowang@redhat.com>
Cc: qemu-devel@nongnu.org, tiwei.bie@intel.com, jfreimann@redhat.com,
maxime.coquelin@redhat.com
Subject: Re: [Qemu-devel] [[RFC v3 08/12] virtio: event suppression support for packed ring
Date: Mon, 15 Oct 2018 16:20:50 +0800 [thread overview]
Message-ID: <20181015082050.GG27871@wei-ubt> (raw)
In-Reply-To: <f9b19079-65fa-4d21-63ba-00f79095bc4f@redhat.com>
On Mon, Oct 15, 2018 at 02:59:48PM +0800, Jason Wang wrote:
>
>
> On 2018年10月11日 22:08, wexu@redhat.com wrote:
> >From: Wei Xu <wexu@redhat.com>
> >
> >Signed-off-by: Wei Xu <wexu@redhat.com>
> >---
> > hw/virtio/virtio.c | 126 +++++++++++++++++++++++++++++++++++++++++++++++++++--
> > 1 file changed, 123 insertions(+), 3 deletions(-)
> >
> >diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> >index d12a7e3..1d25776 100644
> >--- a/hw/virtio/virtio.c
> >+++ b/hw/virtio/virtio.c
> >@@ -241,6 +241,30 @@ static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
> > virtio_tswap16s(vdev, &desc->next);
> > }
> >+static void vring_packed_event_read(VirtIODevice *vdev,
> >+ MemoryRegionCache *cache, VRingPackedDescEvent *e)
> >+{
> >+ address_space_read_cached(cache, 0, e, sizeof(*e));
> >+ virtio_tswap16s(vdev, &e->off_wrap);
> >+ virtio_tswap16s(vdev, &e->flags);
> >+}
> >+
> >+static void vring_packed_off_wrap_write(VirtIODevice *vdev,
> >+ MemoryRegionCache *cache, uint16_t off_wrap)
> >+{
> >+ virtio_tswap16s(vdev, &off_wrap);
> >+ address_space_write_cached(cache, 0, &off_wrap, sizeof(off_wrap));
> >+ address_space_cache_invalidate(cache, 0, sizeof(off_wrap));
> >+}
> >+
> >+static void vring_packed_flags_write(VirtIODevice *vdev,
> >+ MemoryRegionCache *cache, uint16_t flags)
> >+{
> >+ virtio_tswap16s(vdev, &flags);
> >+ address_space_write_cached(cache, sizeof(uint16_t), &flags, sizeof(flags));
> >+ address_space_cache_invalidate(cache, sizeof(uint16_t), sizeof(flags));
> >+}
> >+
> > static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
> > {
> > VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
> >@@ -347,7 +371,7 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
> > address_space_cache_invalidate(&caches->used, pa, sizeof(val));
> > }
> >-void virtio_queue_set_notification(VirtQueue *vq, int enable)
> >+static void virtio_queue_set_notification_split(VirtQueue *vq, int enable)
> > {
> > vq->notification = enable;
> >@@ -370,6 +394,51 @@ void virtio_queue_set_notification(VirtQueue *vq, int enable)
> > rcu_read_unlock();
> > }
> >+static void virtio_queue_set_notification_packed(VirtQueue *vq, int enable)
> >+{
> >+ VRingPackedDescEvent e;
> >+ VRingMemoryRegionCaches *caches;
> >+
> >+ rcu_read_lock();
> >+ caches = vring_get_region_caches(vq);
> >+ vring_packed_event_read(vq->vdev, &caches->device, &e);
> >+
> >+ if (!enable) {
> >+ e.flags = RING_EVENT_FLAGS_DISABLE;
> >+ goto out;
> >+ }
> >+
> >+ e.flags = RING_EVENT_FLAGS_ENABLE;
> >+ if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
> >+ uint16_t off_wrap = vq->event_idx | vq->event_wrap_counter << 15;
>
> Btw, why not just use shadow_avail_idx here?
It is nice to do that but an issue here is that it is 'shadow_avail_idx' for
Rx but 'used_idx' for Tx when setting up for a kick, haven't figured out a
clear fix because it helps easier migration part work, any idea?
Wei
>
> Thanks
>
> >+
> >+ vring_packed_off_wrap_write(vq->vdev, &caches->device, off_wrap);
> >+ /* Make sure off_wrap is wrote before flags */
> >+ smp_wmb();
> >+
> >+ e.flags = RING_EVENT_FLAGS_DESC;
> >+ }
> >+
> >+out:
> >+ vring_packed_flags_write(vq->vdev, &caches->device, e.flags);
> >+ rcu_read_unlock();
> >+}
> >+
> >+void virtio_queue_set_notification(VirtQueue *vq, int enable)
> >+{
> >+ vq->notification = enable;
> >+
> >+ if (!vq->vring.desc) {
> >+ return;
> >+ }
> >+
> >+ if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
> >+ virtio_queue_set_notification_packed(vq, enable);
> >+ } else {
> >+ virtio_queue_set_notification_split(vq, enable);
> >+ }
> >+}
> >+
> > int virtio_queue_ready(VirtQueue *vq)
> > {
> > return vq->vring.avail != 0;
> >@@ -2103,8 +2172,7 @@ static void virtio_set_isr(VirtIODevice *vdev, int value)
> > }
> > }
> >-/* Called within rcu_read_lock(). */
> >-static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> >+static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> > {
> > uint16_t old, new;
> > bool v;
> >@@ -2127,6 +2195,58 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> > return !v || vring_need_event(vring_get_used_event(vq), new, old);
> > }
> >+static bool vring_packed_need_event(VirtQueue *vq, uint16_t off_wrap,
> >+ uint16_t new, uint16_t old)
> >+{
> >+ bool wrap = vq->event_wrap_counter;
> >+ int off = off_wrap & ~(1 << 15);
> >+
> >+ if (new < old) {
> >+ new += vq->vring.num;
> >+ wrap ^= 1;
> >+ }
> >+
> >+ if (wrap != off_wrap >> 15) {
> >+ off += vq->vring.num;
> >+ }
> >+
> >+ return vring_need_event(off, new, old);
> >+}
> >+
> >+static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> >+{
> >+ VRingPackedDescEvent e;
> >+ uint16_t old, new;
> >+ bool v;
> >+ VRingMemoryRegionCaches *caches;
> >+
> >+ caches = vring_get_region_caches(vq);
> >+ vring_packed_event_read(vdev, &caches->driver, &e);
> >+
> >+ old = vq->signalled_used;
> >+ new = vq->signalled_used = vq->used_idx;
> >+ v = vq->signalled_used_valid;
> >+ vq->signalled_used_valid = true;
> >+
> >+ if (e.flags == RING_EVENT_FLAGS_DISABLE) {
> >+ return false;
> >+ } else if (e.flags == RING_EVENT_FLAGS_ENABLE) {
> >+ return true;
> >+ }
> >+
> >+ return !v || vring_packed_need_event(vq, e.off_wrap, new, old);
> >+}
> >+
> >+/* Called within rcu_read_lock(). */
> >+static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> >+{
> >+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
> >+ return virtio_packed_should_notify(vdev, vq);
> >+ } else {
> >+ return virtio_split_should_notify(vdev, vq);
> >+ }
> >+}
> >+
> > void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
> > {
> > bool should_notify;
>
next prev parent reply other threads:[~2018-10-15 8:21 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-11 14:08 [Qemu-devel] [RFC v3 00/12] packed ring virtio-net userspace backend support wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 01/12] virtio: introduce packed ring definitions wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 02/12] virtio: redefine structure & memory cache for packed ring wexu
2018-10-15 3:03 ` Jason Wang
2018-10-15 7:26 ` Wei Xu
2018-10-15 8:03 ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 03/12] virtio: init " wexu
2018-10-15 3:10 ` Jason Wang
2018-10-15 7:09 ` Wei Xu
2018-10-15 7:54 ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 04/12] virtio: init wrap counter " wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 05/12] virtio: init and desc empty check " wexu
2018-10-15 3:18 ` Jason Wang
2018-10-15 7:04 ` Wei Xu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 06/12] virtio: get avail bytes " wexu
2018-10-15 3:47 ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 07/12] virtio: fill/flush/pop " wexu
2018-10-15 6:14 ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 08/12] virtio: event suppression support " wexu
2018-10-15 6:55 ` Jason Wang
2018-10-15 6:59 ` Jason Wang
2018-10-15 8:20 ` Wei Xu [this message]
2018-10-15 9:11 ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 09/12] virtio-net: fill head desc after done all in a chain wexu
2018-10-15 7:45 ` Jason Wang
2018-10-15 8:03 ` Wei Xu
2018-10-15 8:05 ` Jason Wang
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 10/12] virtio: packed ring feature bit for userspace backend wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 11/12] virtio: enable packed ring via a new command line wexu
2018-10-11 14:08 ` [Qemu-devel] [[RFC v3 12/12] virtio: feature vhost-net support for packed ring wexu
2018-10-15 7:50 ` Jason Wang
2018-10-15 8:11 ` Wei Xu
2018-11-21 13:03 ` Maxime Coquelin
2018-11-22 3:46 ` Wei Xu
2018-11-21 14:39 ` [Qemu-devel] [RFC v3 00/12] packed ring virtio-net userspace backend support Tiwei Bie
2018-11-22 3:43 ` Wei Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181015082050.GG27871@wei-ubt \
--to=wexu@redhat.com \
--cc=jasowang@redhat.com \
--cc=jfreimann@redhat.com \
--cc=maxime.coquelin@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=tiwei.bie@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).