From: Greg Kurz <gkurz@linux.vnet.ibm.com>
To: Thomas Huth <thuth@linux.vnet.ibm.com>
Cc: kwolf@redhat.com, peter.maydell@linaro.org, stefanha@redhat.com,
mst@redhat.com, marc.zyngier@arm.com, rusty@rustcorp.com.au,
agraf@suse.de, qemu-devel@nongnu.org, anthony@codemonkey.ws,
cornelia.huck@de.ibm.com, pbonzini@redhat.com, afaerber@suse.de
Subject: Re: [Qemu-devel] [PATCH v6 2/8] virtio: allow byte swapping for vring and config access
Date: Fri, 28 Mar 2014 18:02:47 +0100 [thread overview]
Message-ID: <20140328180247.49df7692@bahia.local> (raw)
In-Reply-To: <20140328170731.3966498c@oc7435384737.ibm.com>
On Fri, 28 Mar 2014 17:07:31 +0100
Thomas Huth <thuth@linux.vnet.ibm.com> wrote:
> On Fri, 28 Mar 2014 11:57:25 +0100
> Greg Kurz <gkurz@linux.vnet.ibm.com> wrote:
>
> > From: Rusty Russell <rusty@rustcorp.com.au>
> >
> > This is based on a simpler patch by Anthony Liguouri, which only handled
> > the vring accesses. We also need some drivers to access these helpers,
> > eg. for data which contains headers.
> >
> > Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
> > [ ldq_phys() API change,
> > use per-device needs_byteswap flag,
> > Greg Kurz <gkurz@linux.vnet.ibm.com> ]
> > Signed-off-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
> > ---
> > hw/virtio/virtio.c | 93 ++++++++++++++++++++++++++++++----------------------
> > 1 file changed, 53 insertions(+), 40 deletions(-)
> >
> > diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> > index 24b565f..1877b46 100644
> > --- a/hw/virtio/virtio.c
> > +++ b/hw/virtio/virtio.c
> > @@ -102,53 +102,57 @@ static void virtqueue_init(VirtQueue *vq)
> > vq->vring.align);
> > }
> >
> > -static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i)
> > +static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i,
> > + struct VirtIODevice *vdev)
> > {
> > hwaddr pa;
> > pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
> > - return ldq_phys(&address_space_memory, pa);
> > + return virtio_ldq_phys(&address_space_memory, pa, vdev);
> > }
> >
> > -static inline uint32_t vring_desc_len(hwaddr desc_pa, int i)
> > +static inline uint32_t vring_desc_len(hwaddr desc_pa, int i,
> > + struct VirtIODevice *vdev)
> > {
> > hwaddr pa;
> > pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
> > - return ldl_phys(&address_space_memory, pa);
> > + return virtio_ldl_phys(&address_space_memory, pa, vdev);
> > }
> >
> > -static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i)
> > +static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i,
> > + struct VirtIODevice *vdev)
> > {
> > hwaddr pa;
> > pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
> > - return lduw_phys(&address_space_memory, pa);
> > + return virtio_lduw_phys(&address_space_memory, pa, vdev);
> > }
> >
> > -static inline uint16_t vring_desc_next(hwaddr desc_pa, int i)
> > +static inline uint16_t vring_desc_next(hwaddr desc_pa, int i,
> > + struct VirtIODevice *vdev)
> > {
> > hwaddr pa;
> > pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
> > - return lduw_phys(&address_space_memory, pa);
> > + return virtio_lduw_phys(&address_space_memory, pa, vdev);
> > }
> >
> > static inline uint16_t vring_avail_flags(VirtQueue *vq)
> > {
> > hwaddr pa;
> > pa = vq->vring.avail + offsetof(VRingAvail, flags);
> > - return lduw_phys(&address_space_memory, pa);
> > + return virtio_lduw_phys(&address_space_memory, pa, vq->vdev);
> > }
> >
> > static inline uint16_t vring_avail_idx(VirtQueue *vq)
> > {
> > hwaddr pa;
> > pa = vq->vring.avail + offsetof(VRingAvail, idx);
> > - return lduw_phys(&address_space_memory, pa);
> > + return virtio_lduw_phys(&address_space_memory, pa, vq->vdev);
> > }
> >
> > static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
> > {
> > hwaddr pa;
> > pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
> > - return lduw_phys(&address_space_memory, pa);
> > + return virtio_lduw_phys(&address_space_memory, pa, vq->vdev);
> > }
> >
> > static inline uint16_t vring_used_event(VirtQueue *vq)
> > @@ -160,44 +164,46 @@ static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
> > {
> > hwaddr pa;
> > pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
> > - stl_phys(&address_space_memory, pa, val);
> > + virtio_stl_phys(&address_space_memory, pa, val, vq->vdev);
> > }
> >
> > static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
> > {
> > hwaddr pa;
> > pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
> > - stl_phys(&address_space_memory, pa, val);
> > + virtio_stl_phys(&address_space_memory, pa, val, vq->vdev);
> > }
> >
> > static uint16_t vring_used_idx(VirtQueue *vq)
> > {
> > hwaddr pa;
> > pa = vq->vring.used + offsetof(VRingUsed, idx);
> > - return lduw_phys(&address_space_memory, pa);
> > + return virtio_lduw_phys(&address_space_memory, pa, vq->vdev);
> > }
> >
> > static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
> > {
> > hwaddr pa;
> > pa = vq->vring.used + offsetof(VRingUsed, idx);
> > - stw_phys(&address_space_memory, pa, val);
> > + virtio_stw_phys(&address_space_memory, pa, val, vq->vdev);
> > }
> >
> > static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
> > {
> > hwaddr pa;
> > pa = vq->vring.used + offsetof(VRingUsed, flags);
> > - stw_phys(&address_space_memory,
> > - pa, lduw_phys(&address_space_memory, pa) | mask);
> > + virtio_stw_phys(&address_space_memory, pa,
> > + virtio_lduw_phys(&address_space_memory, pa,
> > + vq->vdev) | mask, vq->vdev);
> > }
> >
> > static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
> > {
> > hwaddr pa;
> > pa = vq->vring.used + offsetof(VRingUsed, flags);
> > - stw_phys(&address_space_memory,
> > - pa, lduw_phys(&address_space_memory, pa) & ~mask);
> > + virtio_stw_phys(&address_space_memory, pa,
> > + virtio_lduw_phys(&address_space_memory, pa,
> > + vq->vdev) & ~mask, vq->vdev);
> > }
> >
> > static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
> > @@ -207,7 +213,7 @@ static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
> > return;
> > }
> > pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
> > - stw_phys(&address_space_memory, pa, val);
> > + virtio_stw_phys(&address_space_memory, pa, val, vq->vdev);
> > }
> >
> > void virtio_queue_set_notification(VirtQueue *vq, int enable)
> > @@ -325,16 +331,18 @@ static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
> > }
> >
> > static unsigned virtqueue_next_desc(hwaddr desc_pa,
> > - unsigned int i, unsigned int max)
> > + unsigned int i, unsigned int max,
> > + struct VirtIODevice *vdev)
> > {
> > unsigned int next;
> >
> > /* If this descriptor says it doesn't chain, we're done. */
> > - if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
> > + if (!(vring_desc_flags(desc_pa, i, vdev) & VRING_DESC_F_NEXT)) {
> > return max;
> > + }
> >
> > /* Check they're not leading us off end of descriptors. */
> > - next = vring_desc_next(desc_pa, i);
> > + next = vring_desc_next(desc_pa, i, vdev);
> > /* Make sure compiler knows to grab that: we don't want it changing! */
> > smp_wmb();
> >
> > @@ -366,8 +374,9 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
> > i = virtqueue_get_head(vq, idx++);
> > desc_pa = vq->vring.desc;
> >
> > - if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
> > - if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
> > + if (vring_desc_flags(desc_pa, i, vq->vdev)
> > + & VRING_DESC_F_INDIRECT) {
>
> I think the above if-statement would still fit into one line with the
> 80-columns limit.
>
> > + if (vring_desc_len(desc_pa, i, vq->vdev) % sizeof(VRingDesc)) {
> > error_report("Invalid size for indirect buffer table");
> > exit(1);
> > }
> > @@ -380,8 +389,8 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
> >
> > /* loop over the indirect descriptor table */
> > indirect = 1;
> > - max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
> > - desc_pa = vring_desc_addr(desc_pa, i);
> > + max = vring_desc_len(desc_pa, i, vq->vdev) / sizeof(VRingDesc);
> > + desc_pa = vring_desc_addr(desc_pa, i, vq->vdev);
> > num_bufs = i = 0;
> > }
> >
> > @@ -392,15 +401,17 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
> > exit(1);
> > }
> >
> > - if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
> > - in_total += vring_desc_len(desc_pa, i);
> > + if (vring_desc_flags(desc_pa, i, vq->vdev)
> > + & VRING_DESC_F_WRITE) {
>
> Again, no need for two lines?
>
> > + in_total += vring_desc_len(desc_pa, i, vq->vdev);
> > } else {
> > - out_total += vring_desc_len(desc_pa, i);
> > + out_total += vring_desc_len(desc_pa, i, vq->vdev);
> > }
> > if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
> > goto done;
> > }
> > - } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
> > + } while ((i = virtqueue_next_desc(desc_pa, i, max, vq->vdev))
> > + != max);
>
> Doesn't that while statement also still fit into 80 columns?
>
> > if (!indirect)
> > total_bufs = num_bufs;
> > @@ -459,15 +470,15 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
> > vring_avail_event(vq, vring_avail_idx(vq));
> > }
> >
> > - if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
> > - if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
> > + if (vring_desc_flags(desc_pa, i, vq->vdev) & VRING_DESC_F_INDIRECT) {
> > + if (vring_desc_len(desc_pa, i, vq->vdev) % sizeof(VRingDesc)) {
> > error_report("Invalid size for indirect buffer table");
> > exit(1);
> > }
> >
> > /* loop over the indirect descriptor table */
> > - max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
> > - desc_pa = vring_desc_addr(desc_pa, i);
> > + max = vring_desc_len(desc_pa, i, vq->vdev) / sizeof(VRingDesc);
> > + desc_pa = vring_desc_addr(desc_pa, i, vq->vdev);
> > i = 0;
> > }
> >
> > @@ -475,30 +486,32 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
> > do {
> > struct iovec *sg;
> >
> > - if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
> > + if (vring_desc_flags(desc_pa, i, vq->vdev) & VRING_DESC_F_WRITE) {
> > if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
> > error_report("Too many write descriptors in indirect table");
> > exit(1);
> > }
> > - elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
> > + elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i,
> > + vq->vdev);
>
> That one seems to be pretty close, but could also still fit into 80
> columns?
>
> > sg = &elem->in_sg[elem->in_num++];
> > } else {
> > if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
> > error_report("Too many read descriptors in indirect table");
> > exit(1);
> > }
> > - elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
> > + elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i,
> > + vq->vdev);
> > sg = &elem->out_sg[elem->out_num++];
> > }
> >
> > - sg->iov_len = vring_desc_len(desc_pa, i);
> > + sg->iov_len = vring_desc_len(desc_pa, i, vq->vdev);
> >
> > /* If we've got too many, that implies a descriptor loop. */
> > if ((elem->in_num + elem->out_num) > max) {
> > error_report("Looped descriptor");
> > exit(1);
> > }
> > - } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
> > + } while ((i = virtqueue_next_desc(desc_pa, i, max, vq->vdev)) != max);
> >
> > /* Now map what we have collected */
> > virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
>
> Apart from the cosmetic nits, patch looks fine to me.
>
Heh... remains of a first shot where I was passing vdev->needs_byteswap
and overpassed the limit. :)
> Reviewed-by: Thomas Huth <thuth@linux.vnet.ibm.com>
>
>
>
Thanks !
--
Gregory Kurz kurzgreg@fr.ibm.com
gkurz@linux.vnet.ibm.com
Software Engineer @ IBM/Meiosys http://www.ibm.com
Tel +33 (0)562 165 496
"Anarchy is about taking complete responsibility for yourself."
Alan Moore.
next prev parent reply other threads:[~2014-03-28 17:03 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-03-28 10:57 [Qemu-devel] [PATCH v6 0/8] virtio endian-ambivalent target fixes Greg Kurz
2014-03-28 10:57 ` [Qemu-devel] [PATCH v6 1/8] virtio_legacy_get_byteswap: endian-ambivalent targets using legacy virtio Greg Kurz
2014-03-28 14:15 ` Thomas Huth
2014-03-28 15:40 ` Greg Kurz
2014-03-28 17:59 ` Andreas Färber
2014-03-28 19:00 ` Greg Kurz
2014-03-31 14:50 ` Alexander Graf
2014-04-01 11:54 ` Greg Kurz
2014-03-28 10:57 ` [Qemu-devel] [PATCH v6 2/8] virtio: allow byte swapping for vring and config access Greg Kurz
2014-03-28 16:07 ` Thomas Huth
2014-03-28 17:02 ` Greg Kurz [this message]
2014-03-31 16:24 ` Alexander Graf
2014-03-31 16:26 ` Andreas Färber
2014-04-01 12:03 ` Greg Kurz
2014-03-28 10:57 ` [Qemu-devel] [PATCH v6 3/8] virtio-net: use virtio wrappers to access headers Greg Kurz
2014-03-31 16:28 ` Alexander Graf
2014-03-28 10:57 ` [Qemu-devel] [PATCH v6 4/8] virtio-balloon: use virtio wrappers to access page frame numbers Greg Kurz
2014-03-31 16:30 ` Alexander Graf
2014-03-28 10:57 ` [Qemu-devel] [PATCH v6 5/8] virtio-blk: use virtio wrappers to access headers Greg Kurz
2014-03-31 16:31 ` Alexander Graf
2014-03-28 10:57 ` [Qemu-devel] [PATCH v6 6/8] virtio-scsi: " Greg Kurz
2014-03-28 17:13 ` Greg Kurz
2014-03-28 17:21 ` Andreas Färber
2014-03-28 17:37 ` Greg Kurz
2014-03-28 17:43 ` Peter Maydell
2014-03-28 18:04 ` Greg Kurz
2014-03-28 18:14 ` Peter Maydell
2014-03-28 18:58 ` Greg Kurz
2014-03-31 16:34 ` Alexander Graf
2014-03-28 10:58 ` [Qemu-devel] [PATCH v6 7/8] virtio-serial-bus: " Greg Kurz
2014-03-31 17:01 ` Alexander Graf
2014-03-28 10:58 ` [Qemu-devel] [PATCH v6 8/8] virtio-9p: " Greg Kurz
2014-03-28 11:22 ` [Qemu-devel] [PATCH v4] target-ppc: ppc64 target's virtio can be either endian Greg Kurz
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20140328180247.49df7692@bahia.local \
--to=gkurz@linux.vnet.ibm.com \
--cc=afaerber@suse.de \
--cc=agraf@suse.de \
--cc=anthony@codemonkey.ws \
--cc=cornelia.huck@de.ibm.com \
--cc=kwolf@redhat.com \
--cc=marc.zyngier@arm.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=rusty@rustcorp.com.au \
--cc=stefanha@redhat.com \
--cc=thuth@linux.vnet.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).