From: Rusty Russell <rusty@rustcorp.com.au>
To: qemu-devel@nongnu.org
Cc: Rusty Russell <rusty@rustcorp.com.au>
Subject: [Qemu-devel] [PATCH 3/8] virtio: allow byte swapping for vring and config access
Date: Mon, 12 Aug 2013 17:29:23 +0930 [thread overview]
Message-ID: <1376294363-4650-4-git-send-email-rusty@rustcorp.com.au> (raw)
In-Reply-To: <1376294363-4650-1-git-send-email-rusty@rustcorp.com.au>
This is based on a simpler patch by Anthony Liguouri, which only handled
the vring accesses. We also need some drivers to access these helpers,
eg. for data which contains headers.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
hw/virtio/virtio.c | 29 +++++++++++++++--------------
1 file changed, 15 insertions(+), 14 deletions(-)
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 09f62c6..178647b 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -18,6 +18,7 @@
#include "hw/virtio/virtio.h"
#include "qemu/atomic.h"
#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/virtio-access.h"
/*
* The alignment to use between consumer and producer parts of vring.
@@ -104,49 +105,49 @@ static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
- return ldq_phys(pa);
+ return virtio_ldq_phys(pa);
}
static inline uint32_t vring_desc_len(hwaddr desc_pa, int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
- return ldl_phys(pa);
+ return virtio_ldl_phys(pa);
}
static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
- return lduw_phys(pa);
+ return virtio_lduw_phys(pa);
}
static inline uint16_t vring_desc_next(hwaddr desc_pa, int i)
{
hwaddr pa;
pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
- return lduw_phys(pa);
+ return virtio_lduw_phys(pa);
}
static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
hwaddr pa;
pa = vq->vring.avail + offsetof(VRingAvail, flags);
- return lduw_phys(pa);
+ return virtio_lduw_phys(pa);
}
static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
hwaddr pa;
pa = vq->vring.avail + offsetof(VRingAvail, idx);
- return lduw_phys(pa);
+ return virtio_lduw_phys(pa);
}
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
hwaddr pa;
pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
- return lduw_phys(pa);
+ return virtio_lduw_phys(pa);
}
static inline uint16_t vring_used_event(VirtQueue *vq)
@@ -158,42 +159,42 @@ static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
- stl_phys(pa, val);
+ virtio_stl_phys(pa, val);
}
static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
- stl_phys(pa, val);
+ virtio_stl_phys(pa, val);
}
static uint16_t vring_used_idx(VirtQueue *vq)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, idx);
- return lduw_phys(pa);
+ return virtio_lduw_phys(pa);
}
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, idx);
- stw_phys(pa, val);
+ virtio_stw_phys(pa, val);
}
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, flags);
- stw_phys(pa, lduw_phys(pa) | mask);
+ virtio_stw_phys(pa, virtio_lduw_phys(pa) | mask);
}
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
hwaddr pa;
pa = vq->vring.used + offsetof(VRingUsed, flags);
- stw_phys(pa, lduw_phys(pa) & ~mask);
+ virtio_stw_phys(pa, virtio_lduw_phys(pa) & ~mask);
}
static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
@@ -203,7 +204,7 @@ static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
return;
}
pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
- stw_phys(pa, val);
+ virtio_stw_phys(pa, val);
}
void virtio_queue_set_notification(VirtQueue *vq, int enable)
--
1.8.1.2
next prev parent reply other threads:[~2013-08-12 8:01 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-08-12 7:59 [Qemu-devel] [PATCH 0/8] virtio for endian curious guests Take #2 Rusty Russell
2013-08-12 7:59 ` [Qemu-devel] [PATCH 1/8] virtio_get_byteswap: function for endian-ambivalent targets using virtio Rusty Russell
2013-08-12 9:28 ` Benjamin Herrenschmidt
2013-08-12 9:39 ` Peter Maydell
2013-08-12 9:43 ` Benjamin Herrenschmidt
2013-08-12 9:45 ` Peter Maydell
2013-08-12 9:50 ` Benjamin Herrenschmidt
2013-08-12 9:52 ` Peter Maydell
2013-08-12 9:56 ` Benjamin Herrenschmidt
2013-08-12 10:36 ` Peter Maydell
2013-08-12 12:56 ` Anthony Liguori
2013-08-13 4:20 ` Rusty Russell
2013-08-13 5:30 ` Benjamin Herrenschmidt
2013-08-14 0:03 ` Rusty Russell
2013-09-06 2:27 ` Rusty Russell
2013-08-12 7:59 ` [Qemu-devel] [PATCH 2/8] target-ppc: ppc64 target's virtio can be either endian Rusty Russell
2013-08-12 7:59 ` Rusty Russell [this message]
2013-09-09 12:44 ` [Qemu-devel] [PATCH] hw/9pfs/virtio_9p_device: use virtio wrappers to access headers Greg Kurz
2013-09-10 5:21 ` Rusty Russell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1376294363-4650-4-git-send-email-rusty@rustcorp.com.au \
--to=rusty@rustcorp.com.au \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).