From: Jason Wang <jasowang@redhat.com>
To: wexu@redhat.com, qemu-devel@nongnu.org
Cc: jfreimann@redhat.com, tiwei.bie@intel.com, mst@redhat.com
Subject: Re: [Qemu-devel] [RFC v2 7/8] virtio: event suppression for packed ring
Date: Wed, 6 Jun 2018 11:46:48 +0800 [thread overview]
Message-ID: <ae3e3271-1391-09b8-0cc4-026f4f0243cd@redhat.com> (raw)
In-Reply-To: <1528225683-11413-8-git-send-email-wexu@redhat.com>
On 2018年06月06日 03:08, wexu@redhat.com wrote:
> From: Wei Xu <wexu@redhat.com>
>
> Signed-off-by: Wei Xu <wexu@redhat.com>
> Signed-off-by: Wei Xu <wexu@redhat.com>
Duplicated.
> ---
> hw/virtio/virtio.c | 115 +++++++++++++++++++++++--
> include/standard-headers/linux/virtio_config.h | 13 +++
> 2 files changed, 119 insertions(+), 9 deletions(-)
>
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index 6f2da83..4543974 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -226,6 +226,24 @@ static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
> virtio_tswap16s(vdev, &desc->next);
> }
>
> +static void vring_packed_event_read(VirtIODevice *vdev,
> + MemoryRegionCache *cache, VRingPackedDescEvent *e)
> +{
> + address_space_read_cached(cache, 0, e, sizeof(*e));
> + virtio_tswap16s(vdev, &e->off_wrap);
> + virtio_tswap16s(vdev, &e->flags);
You need to make sure flags is read before off_wrap.
> +}
> +
> +static void vring_packed_event_write(VirtIODevice *vdev,
> + MemoryRegionCache *cache, VRingPackedDescEvent *e)
> +{
> + virtio_tswap16s(vdev, &e->off_wrap);
> + virtio_tswap16s(vdev, &e->flags);
> + address_space_write_cached(cache, 0, e, sizeof(*e));
You need make sure flags were wrote before off_wrap.
> + address_space_cache_invalidate(cache, 0, sizeof(VRingUsedElem));
> +}
> +
> +
> static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
> {
> VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
> @@ -332,14 +350,8 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
> address_space_cache_invalidate(&caches->used, pa, sizeof(val));
> }
>
> -void virtio_queue_set_notification(VirtQueue *vq, int enable)
> +static void virtio_queue_set_notification_split(VirtQueue *vq, int enable)
> {
> - vq->notification = enable;
> -
> - if (!vq->vring.desc) {
> - return;
> - }
> -
> rcu_read_lock();
> if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
> vring_set_avail_event(vq, vring_avail_idx(vq));
> @@ -355,6 +367,38 @@ void virtio_queue_set_notification(VirtQueue *vq, int enable)
> rcu_read_unlock();
> }
>
> +static void virtio_queue_set_notification_packed(VirtQueue *vq, int enable)
> +{
> + VRingPackedDescEvent e;
> + VRingMemoryRegionCaches *caches;
> +
> + rcu_read_lock();
> + caches = vring_get_region_caches(vq);
> + vring_packed_event_read(vq->vdev, &caches->device, &e);
Why need read?
> + if (enable) {
> + e.flags = RING_EVENT_FLAGS_ENABLE;
> + } else {
> + e.flags = RING_EVENT_FLAGS_DISABLE;
> + }
> + vring_packed_event_write(vq->vdev, &caches->device, &e);
> + rcu_read_unlock();
> +}
> +
> +void virtio_queue_set_notification(VirtQueue *vq, int enable)
> +{
> + vq->notification = enable;
> +
> + if (!vq->vring.desc) {
> + return;
> + }
> +
> + if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
> + virtio_queue_set_notification_packed(vq, enable);
> + } else {
> + virtio_queue_set_notification_split(vq, enable);
> + }
> +}
> +
> int virtio_queue_ready(VirtQueue *vq)
> {
> return vq->vring.avail != 0;
> @@ -2059,8 +2103,7 @@ static void virtio_set_isr(VirtIODevice *vdev, int value)
> }
> }
>
> -/* Called within rcu_read_lock(). */
> -static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> +static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> {
> uint16_t old, new;
> bool v;
> @@ -2083,6 +2126,60 @@ static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> return !v || vring_need_event(vring_get_used_event(vq), new, old);
> }
>
> +static bool vring_packed_need_event(VirtQueue *vq, uint16_t off_wrap,
> + uint16_t new, uint16_t old)
> +{
> + bool wrap = vq->used_wrap_counter;
> + int off = off_wrap & ~(1 << 15);
> +
> + if (new < old) {
> + new += vq->vring.num;
> + wrap ^= 1;
> + }
> +
> + if (wrap != off_wrap >> 15) {
> + off += vq->vring.num;
> + }
> +
> + return vring_need_event(off, new, old);
> +}
> +
> +static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> +{
> + VRingPackedDescEvent e;
> + uint16_t old, new;
> + bool v;
> + VRingMemoryRegionCaches *caches;
> +
> + caches = vring_get_region_caches(vq);
> + vring_packed_event_read(vdev, &caches->driver, &e);
> +
> + /* Make sure we see the updated flags */
> + smp_mb();
Like I mention several times, why need a memory barrier here?
> + if (e.flags == RING_EVENT_FLAGS_DISABLE) {
> + return false;
> + } else if (e.flags == RING_EVENT_FLAGS_ENABLE) {
> + return true;
> + }
> +
> + v = vq->signalled_used_valid;
> + vq->signalled_used_valid = true;
> + old = vq->signalled_used;
> + new = vq->signalled_used = vq->used_idx;
> +
> + return !v || vring_packed_need_event(vq, e.off_wrap, new, old);
> +}
> +
> +/* Called within rcu_read_lock(). */
> +static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
> +{
> + if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
> + return virtio_packed_should_notify(vdev, vq);
> + } else {
> + return virtio_split_should_notify(vdev, vq);
> + }
> +}
> +
> void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
> {
> bool should_notify;
> diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h
> index 6ee5529..53e5c83 100644
> --- a/include/standard-headers/linux/virtio_config.h
> +++ b/include/standard-headers/linux/virtio_config.h
> @@ -73,4 +73,17 @@
> #define VIRTIO_F_IOMMU_PLATFORM 33
>
> #define VIRTIO_F_RING_PACKED 34
> +
> +/* Enable events */
> +#define RING_EVENT_FLAGS_ENABLE 0x0
> +/* Disable events */
> +#define RING_EVENT_FLAGS_DISABLE 0x1
> +/*
> + * * Enable events for a specific descriptor
> + * * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
> + * * Only valid if VIRTIO_F_RING_EVENT_IDX has been negotiated.
> + * */
> +#define RING_EVENT_FLAGS_DESC 0x2
> +/* The value 0x3 is reserved */
> +
> #endif /* _LINUX_VIRTIO_CONFIG_H */
This could be done in a patch of header sync.
Thanks
next prev parent reply other threads:[~2018-06-06 3:47 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-05 19:07 [Qemu-devel] [RFC v2 0/8] packed ring virtio-net userspace backend support wexu
2018-06-05 19:07 ` [Qemu-devel] [RFC v2 1/8] virtio: feature bit, data structure, init for 1.1 wexu
2018-06-06 2:49 ` Jason Wang
2018-06-05 19:07 ` [Qemu-devel] [RFC v2 2/8] virtio: memory cache for packed ring wexu
2018-06-06 2:53 ` Jason Wang
2018-06-19 7:39 ` Wei Xu
2018-06-13 12:27 ` Paolo Bonzini
2018-06-05 19:07 ` [Qemu-devel] [RFC v2 3/8] virtio: empty check and desc read " wexu
2018-06-06 3:09 ` Jason Wang
2018-06-05 19:07 ` [Qemu-devel] [RFC v2 4/8] virtio: get avail bytes check " wexu
2018-06-06 3:19 ` Jason Wang
2018-06-05 19:08 ` [Qemu-devel] [RFC v2 5/8] virtio: queue pop " wexu
2018-06-06 3:29 ` Jason Wang
2018-06-06 3:38 ` Wei Xu
2018-06-06 3:41 ` Jason Wang
2018-06-19 7:58 ` Wei Xu
2018-06-05 19:08 ` [Qemu-devel] [RFC v2 6/8] virtio: flush/push " wexu
2018-06-06 3:39 ` Jason Wang
[not found] ` <7dc2af60-47ad-1250-fc6c-3fdf288654c3@redhat.com>
2018-11-28 17:33 ` Maxime Coquelin
2018-06-05 19:08 ` [Qemu-devel] [RFC v2 7/8] virtio: event suppression " wexu
2018-06-06 3:46 ` Jason Wang [this message]
2018-06-05 19:08 ` [Qemu-devel] [RFC v2 8/8] virtio: guest driver reload for vhost-net wexu
2018-06-06 3:48 ` Jason Wang
2018-06-19 7:53 ` Wei Xu
2018-06-06 2:34 ` [Qemu-devel] [RFC v2 0/8] packed ring virtio-net userspace backend support Jason Wang
2018-06-06 3:49 ` Jason Wang
2018-06-19 7:41 ` Wei Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ae3e3271-1391-09b8-0cc4-026f4f0243cd@redhat.com \
--to=jasowang@redhat.com \
--cc=jfreimann@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=tiwei.bie@intel.com \
--cc=wexu@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).