From: Jean Guyader <jean.guyader@citrix.com>
To: xen-devel@lists.xen.org
Cc: Jean Guyader <jean.guyader@citrix.com>
Subject: [PATCH 4/5] xen: Add V4V implementation
Date: Thu, 28 Jun 2012 17:26:25 +0100 [thread overview]
Message-ID: <1340900786-21802-5-git-send-email-jean.guyader@citrix.com> (raw)
In-Reply-To: <1340900786-21802-1-git-send-email-jean.guyader@citrix.com>
[-- Attachment #1: Type: text/plain, Size: 946 bytes --]
Setup of v4v domains a domain gets created and cleanup
when a domain die. Wire up the v4v hypercall.
Include v4v internal and public headers.
Signed-off-by: Jean Guyader <jean.guyader@citrix.com>
---
xen/arch/x86/hvm/hvm.c | 9 +-
xen/arch/x86/x86_32/entry.S | 2 +
xen/arch/x86/x86_64/compat/entry.S | 2 +
xen/arch/x86/x86_64/entry.S | 2 +
xen/common/Makefile | 1 +
xen/common/domain.c | 11 +-
xen/common/v4v.c | 1755 ++++++++++++++++++++++++++++++++++++
xen/include/public/v4v.h | 240 +++++
xen/include/public/xen.h | 2 +-
xen/include/xen/sched.h | 5 +
xen/include/xen/v4v.h | 187 ++++
11 files changed, 2211 insertions(+), 5 deletions(-)
create mode 100644 xen/common/v4v.c
create mode 100644 xen/include/public/v4v.h
create mode 100644 xen/include/xen/v4v.h
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0004-xen-Add-V4V-implementation.patch --]
[-- Type: text/x-patch; name="0004-xen-Add-V4V-implementation.patch", Size: 66368 bytes --]
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e0d495d..6f2d70e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3124,7 +3124,8 @@ static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
HYPERCALL(set_timer_op),
HYPERCALL(hvm_op),
HYPERCALL(sysctl),
- HYPERCALL(tmem_op)
+ HYPERCALL(tmem_op),
+ HYPERCALL(v4v_op)
};
#else /* defined(__x86_64__) */
@@ -3209,7 +3210,8 @@ static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
HYPERCALL(set_timer_op),
HYPERCALL(hvm_op),
HYPERCALL(sysctl),
- HYPERCALL(tmem_op)
+ HYPERCALL(tmem_op),
+ HYPERCALL(v4v_op)
};
#define COMPAT_CALL(x) \
@@ -3226,7 +3228,8 @@ static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
COMPAT_CALL(set_timer_op),
HYPERCALL(hvm_op),
HYPERCALL(sysctl),
- HYPERCALL(tmem_op)
+ HYPERCALL(tmem_op),
+ HYPERCALL(v4v_op)
};
#endif /* defined(__x86_64__) */
diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S
index 2982679..b3e0da4 100644
--- a/xen/arch/x86/x86_32/entry.S
+++ b/xen/arch/x86/x86_32/entry.S
@@ -700,6 +700,7 @@ ENTRY(hypercall_table)
.long do_domctl
.long do_kexec_op
.long do_tmem_op
+ .long do_v4v_op
.rept __HYPERVISOR_arch_0-((.-hypercall_table)/4)
.long do_ni_hypercall
.endr
@@ -748,6 +749,7 @@ ENTRY(hypercall_args_table)
.byte 1 /* do_domctl */
.byte 2 /* do_kexec_op */
.byte 1 /* do_tmem_op */
+ .byte 6 /* do_v4v_op */
.rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
index f49ff2d..28615f9 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -414,6 +414,7 @@ ENTRY(compat_hypercall_table)
.quad do_domctl
.quad compat_kexec_op
.quad do_tmem_op
+ .quad do_v4v_op
.rept __HYPERVISOR_arch_0-((.-compat_hypercall_table)/8)
.quad compat_ni_hypercall
.endr
@@ -462,6 +463,7 @@ ENTRY(compat_hypercall_args_table)
.byte 1 /* do_domctl */
.byte 2 /* compat_kexec_op */
.byte 1 /* do_tmem_op */
+ .byte 6 /* do_v4v_op */
.rept __HYPERVISOR_arch_0-(.-compat_hypercall_args_table)
.byte 0 /* compat_ni_hypercall */
.endr
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 3836260..918fa59 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -699,6 +699,7 @@ ENTRY(hypercall_table)
.quad do_domctl
.quad do_kexec_op
.quad do_tmem_op
+ .quad do_v4v_op
.rept __HYPERVISOR_arch_0-((.-hypercall_table)/8)
.quad do_ni_hypercall
.endr
@@ -747,6 +748,7 @@ ENTRY(hypercall_args_table)
.byte 1 /* do_domctl */
.byte 2 /* do_kexec */
.byte 1 /* do_tmem_op */
+ .byte 6 /* do_v4v_op */
.rept __HYPERVISOR_arch_0-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
diff --git a/xen/common/Makefile b/xen/common/Makefile
index 9eba8bc..fe3c72c 100644
--- a/xen/common/Makefile
+++ b/xen/common/Makefile
@@ -45,6 +45,7 @@ obj-y += tmem_xen.o
obj-y += radix-tree.o
obj-y += rbtree.o
obj-y += lzo.o
+obj-y += v4v.o
obj-bin-$(CONFIG_X86) += $(foreach n,decompress bunzip2 unxz unlzma unlzo,$(n).init.o)
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 8840202..9539d88 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -195,7 +195,8 @@ struct domain *domain_create(
{
struct domain *d, **pd;
enum { INIT_xsm = 1u<<0, INIT_watchdog = 1u<<1, INIT_rangeset = 1u<<2,
- INIT_evtchn = 1u<<3, INIT_gnttab = 1u<<4, INIT_arch = 1u<<5 };
+ INIT_evtchn = 1u<<3, INIT_gnttab = 1u<<4, INIT_arch = 1u<<5,
+ INIT_v4v = 1u<<6 };
int init_status = 0;
int poolid = CPUPOOLID_NONE;
@@ -219,6 +220,7 @@ struct domain *domain_create(
spin_lock_init(&d->hypercall_deadlock_mutex);
INIT_PAGE_LIST_HEAD(&d->page_list);
INIT_PAGE_LIST_HEAD(&d->xenpage_list);
+ rwlock_init(&d->v4v_lock);
spin_lock_init(&d->node_affinity_lock);
d->node_affinity = NODE_MASK_ALL;
@@ -274,6 +276,10 @@ struct domain *domain_create(
goto fail;
init_status |= INIT_gnttab;
+ if ( v4v_init(d) != 0 )
+ goto fail;
+ init_status |= INIT_v4v;
+
poolid = 0;
d->mem_event = xzalloc(struct mem_event_per_domain);
@@ -313,6 +319,8 @@ struct domain *domain_create(
xfree(d->mem_event);
if ( init_status & INIT_arch )
arch_domain_destroy(d);
+ if ( init_status & INIT_v4v )
+ v4v_destroy(d);
if ( init_status & INIT_gnttab )
grant_table_destroy(d);
if ( init_status & INIT_evtchn )
@@ -466,6 +474,7 @@ int domain_kill(struct domain *d)
domain_pause(d);
d->is_dying = DOMDYING_dying;
spin_barrier(&d->domain_lock);
+ v4v_destroy(d);
evtchn_destroy(d);
gnttab_release_mappings(d);
tmem_destroy(d->tmem);
diff --git a/xen/common/v4v.c b/xen/common/v4v.c
new file mode 100644
index 0000000..e589fda
--- /dev/null
+++ b/xen/common/v4v.c
@@ -0,0 +1,1755 @@
+/******************************************************************************
+ * V4V
+ *
+ * Version 2 of v2v (Virtual-to-Virtual)
+ *
+ * Copyright (c) 2010, Citrix Systems
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <xen/config.h>
+#include <xen/mm.h>
+#include <xen/compat.h>
+#include <xen/init.h>
+#include <xen/lib.h>
+#include <xen/errno.h>
+#include <xen/sched.h>
+#include <xen/domain.h>
+#include <xen/v4v.h>
+#include <xen/event.h>
+#include <xen/guest_access.h>
+#include <asm/paging.h>
+#include <asm/p2m.h>
+#include <xen/keyhandler.h>
+#include <xen/v4v_utils.h>
+
+#ifdef V4V_DEBUG
+#define MY_FILE "v4v.c"
+#define v4v_dprintk(format, args...) \
+ do { \
+ printk("%s:%d " format, \
+ MY_FILE, __LINE__, ## args ); \
+ } while ( 1 == 0 )
+#else
+#define v4v_dprintk(format, ... ) (void)0
+#endif
+
+
+
+DEFINE_XEN_GUEST_HANDLE (uint8_t);
+static struct v4v_ring_info *v4v_ring_find_info (struct domain *d,
+ struct v4v_ring_id *id);
+
+static struct v4v_ring_info *v4v_ring_find_info_by_addr (struct domain *d,
+ struct v4v_addr *a,
+ domid_t p);
+
+/*
+ * locks
+ */
+
+/*
+ * locking is organized as follows:
+ *
+ * the global lock v4v_lock: L1 protects the v4v elements
+ * of all struct domain *d in the system, it does not
+ * protect any of the elements of d->v4v, just their
+ * addresses. By extension since the destruction of
+ * a domain with a non-NULL d->v4v will need to free
+ * the d->v4v pointer, holding this lock gauruntees
+ * that no domains pointers in which v4v is interested
+ * become invalid whilst this lock is held.
+ */
+
+static DEFINE_RWLOCK (v4v_lock); /* L1 */
+
+/*
+ * the lock d->v4v->lock: L2: Read on protects the hash table and
+ * the elements in the hash_table d->v4v->ring_hash, and
+ * the node and id fields in struct v4v_ring_info in the
+ * hash table. Write on L2 protects all of the elements of
+ * struct v4v_ring_info. To take L2 you must already have R(L1)
+ * W(L1) implies W(L2) and L3
+ *
+ * the lock v4v_ring_info *ringinfo; ringinfo->lock: L3:
+ * protects len,tx_ptr the guest ring, the
+ * guest ring_data and the pending list. To take L3 you must
+ * already have R(L2). W(L2) implies L3
+ */
+
+
+/*
+ * Debugs
+ */
+
+#ifdef V4V_DEBUG
+static void
+v4v_hexdump (void *_p, int len)
+{
+ uint8_t *buf = (uint8_t *) _p;
+ int i, j;
+
+ for (i = 0; i < len; i += 16)
+ {
+ printk (KERN_ERR "%p:", &buf[i]);
+ for (j = 0; j < 16; ++j)
+ {
+ int k = i + j;
+ if (k < len)
+ printk (" %02x", buf[k]);
+ else
+ printk (" ");
+ }
+ printk (" ");
+
+ for (j = 0; j < 16; ++j)
+ {
+ int k = i + j;
+ if (k < len)
+ printk ("%c", ((buf[k] > 32) && (buf[k] < 127)) ? buf[k] : '.');
+ else
+ printk (" ");
+ }
+ printk ("\n");
+ }
+}
+#endif
+
+
+/*
+ * Event channel
+ */
+
+static void
+v4v_signal_domain (struct domain *d)
+{
+ v4v_dprintk("send guest VIRQ_V4V domid:%d\n", d->domain_id);
+ send_guest_vcpu_virq (d->vcpu[0], VIRQ_V4V);
+}
+
+static void
+v4v_signal_domid (domid_t id)
+{
+ struct domain *d = get_domain_by_id (id);
+ if (!d)
+ return;
+ v4v_signal_domain (d);
+ put_domain (d);
+}
+
+
+/*
+ * ring buffer
+ */
+
+/* called must have L3 */
+static void
+v4v_ring_unmap (struct v4v_ring_info *ring_info)
+{
+ int i;
+ for (i = 0; i < ring_info->npage; ++i)
+ {
+ if (!ring_info->mfn_mapping[i])
+ continue;
+ v4v_dprintk("");
+ v4v_dprintk("unmapping page %p from %p\n",
+ (void*) mfn_x (ring_info->mfns[i]),
+ ring_info->mfn_mapping[i]);
+
+ unmap_domain_page (ring_info->mfn_mapping[i]);
+ ring_info->mfn_mapping[i] = NULL;
+ }
+}
+
+/* called must have L3 */
+static uint8_t *
+v4v_ring_map_page (struct v4v_ring_info *ring_info, int i)
+{
+ if (i >= ring_info->npage)
+ return NULL;
+ if (ring_info->mfn_mapping[i])
+ return ring_info->mfn_mapping[i];
+ ring_info->mfn_mapping[i] = map_domain_page (mfn_x (ring_info->mfns[i]));
+
+ v4v_dprintk("mapping page %p to %p\n",
+ (void *) mfn_x (ring_info->mfns[i]),
+ ring_info->mfn_mapping[i]);
+ return ring_info->mfn_mapping[i];
+}
+
+/* called must have L3 */
+static int
+v4v_memcpy_from_guest_ring (void *_dst, struct v4v_ring_info *ring_info,
+ uint32_t offset, uint32_t len)
+{
+ int page = offset >> PAGE_SHIFT;
+ uint8_t *src;
+ uint8_t *dst = _dst;
+
+ offset &= PAGE_SIZE - 1;
+
+ while ((offset + len) > PAGE_SIZE)
+ {
+ src = v4v_ring_map_page (ring_info, page);
+
+ if (!src)
+ {
+ return -EFAULT;
+ }
+
+ v4v_dprintk("memcpy(%p,%p+%d,%d)\n",
+ dst, src, offset,
+ (int) (PAGE_SIZE - offset));
+ memcpy (dst, src + offset, PAGE_SIZE - offset);
+
+ page++;
+ len -= PAGE_SIZE - offset;
+ dst += PAGE_SIZE - offset;
+ offset = 0;
+ }
+
+ src = v4v_ring_map_page (ring_info, page);
+ if (!src)
+ {
+ return -EFAULT;
+ }
+
+ v4v_dprintk("memcpy(%p,%p+%d,%d)\n", dst, src, offset, len);
+ memcpy (dst, src + offset, len);
+
+ return 0;
+}
+
+
+/* called must have L3 */
+static int
+v4v_update_tx_ptr (struct v4v_ring_info *ring_info, uint32_t tx_ptr)
+{
+ uint8_t *dst = v4v_ring_map_page (ring_info, 0);
+ volatile uint32_t *p = (uint32_t *)(dst + offsetof (v4v_ring_t, tx_ptr));
+
+ if (!dst)
+ return -EFAULT;
+ *p = tx_ptr;
+ return 0;
+}
+
+/* called must have L3 */
+static int
+v4v_memcpy_to_guest_ring (struct v4v_ring_info *ring_info, uint32_t offset,
+ void *_src, uint32_t len)
+{
+ int page = offset >> PAGE_SHIFT;
+ uint8_t *dst;
+ uint8_t *src = _src;
+
+ offset &= PAGE_SIZE - 1;
+
+ while ((offset + len) > PAGE_SIZE)
+ {
+ dst = v4v_ring_map_page (ring_info, page);
+
+ if (!dst)
+ {
+ v4v_dprintk("!dst\n");
+ return -EFAULT;
+ }
+
+#ifdef V4V_DEBUG
+ v4v_dprintk("memcpy(%p+%d,%p,%d)\n",
+ dst, offset, src,
+ (int) (PAGE_SIZE - offset));
+ v4v_hexdump (src, PAGE_SIZE - offset);
+ v4v_hexdump (dst + offset, PAGE_SIZE - offset);
+#endif
+ memcpy (dst + offset, src, PAGE_SIZE - offset);
+
+ page++;
+ len -= (PAGE_SIZE - offset);
+ src += (PAGE_SIZE - offset);
+ offset = 0;
+ }
+
+ dst = v4v_ring_map_page (ring_info, page);
+
+ if (!dst)
+ {
+ v4v_dprintk("attempted to map page %d of %d\n", page, ring_info->npage);
+ return -EFAULT;
+ }
+
+#ifdef V4V_DEBUG
+ v4v_dprintk("memcpy(%p+%d,%p,%d)\n",
+ dst, offset, src, len);
+ v4v_hexdump (src, len);
+ v4v_hexdump (dst + offset, len);
+#endif
+ memcpy (dst + offset, src, len);
+
+ return 0;
+}
+
+/*called must have L3*/
+static int
+v4v_memcpy_to_guest_ring_from_guest(struct v4v_ring_info *ring_info,
+ uint32_t offset,
+ XEN_GUEST_HANDLE (uint8_t) src_hnd,
+ uint32_t len)
+{
+ int page = offset >> PAGE_SHIFT;
+ uint8_t *dst;
+
+ offset &= PAGE_SIZE - 1;
+
+ while ( (offset + len) > PAGE_SIZE )
+ {
+ dst = v4v_ring_map_page (ring_info, page);
+
+ if ( !dst )
+ {
+ v4v_dprintk("!dst\n");
+ return -EFAULT;
+ }
+
+ v4v_dprintk("copy_from_guest(%p+%d,%p,%d)\n",
+ dst, offset, (void *) src_hnd.p,
+ (int) (PAGE_SIZE - offset));
+ if ( copy_from_guest ((dst + offset), src_hnd, PAGE_SIZE - offset) )
+ {
+ v4v_dprintk("copy_from_guest failed\n");
+ return -EFAULT;
+ }
+
+ page++;
+ len -= PAGE_SIZE - offset;
+ guest_handle_add_offset (src_hnd, PAGE_SIZE - offset);
+ offset = 0;
+ }
+
+ dst = v4v_ring_map_page (ring_info, page);
+ if (!dst)
+ {
+ v4v_dprintk("v4v_ring_map failed\n");
+ return -EFAULT;
+ }
+
+ v4v_dprintk("copy_from_guest(%p+%d,%p,%d)\n",
+ dst, offset, (void *) src_hnd.p, len);
+ if ( copy_from_guest ((dst + offset), src_hnd, len) )
+ {
+ v4v_dprintk("copy_from_guest failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+v4v_ringbuf_get_rx_ptr (struct domain *d, struct v4v_ring_info *ring_info,
+ uint32_t * rx_ptr)
+{
+ v4v_ring_t *ringp;
+
+ if ( ring_info->npage == 0 )
+ return -1;
+
+ ringp = map_domain_page (mfn_x (ring_info->mfns[0]));
+
+ v4v_dprintk("v4v_ringbuf_payload_space: mapped %p to %p\n",
+ (void *) mfn_x (ring_info->mfns[0]), ringp);
+ if ( !ringp )
+ return -1;
+
+ *rx_ptr = *(volatile uint32_t *) &ringp->rx_ptr;
+
+ unmap_domain_page (mfn_x (ring_info->mfns[0]));
+ return 0;
+}
+
+uint32_t
+v4v_ringbuf_payload_space (struct domain * d, struct v4v_ring_info * ring_info)
+{
+ v4v_ring_t ring;
+ int32_t ret;
+
+ ring.tx_ptr = ring_info->tx_ptr;
+ ring.len = ring_info->len;
+
+ if ( v4v_ringbuf_get_rx_ptr (d, ring_info, &ring.rx_ptr) )
+ return 0;
+
+ v4v_dprintk("v4v_ringbuf_payload_space:tx_ptr=%d rx_ptr=%d\n",
+ (int) ring.tx_ptr, (int) ring.rx_ptr);
+ if ( ring.rx_ptr == ring.tx_ptr )
+ return ring.len - sizeof (struct v4v_ring_message_header);
+
+ ret = ring.rx_ptr - ring.tx_ptr;
+ if ( ret < 0 )
+ ret += ring.len;
+
+ ret -= sizeof (struct v4v_ring_message_header);
+ ret -= V4V_ROUNDUP (1);
+
+ return (ret < 0) ? 0 : ret;
+}
+
+/*caller must have L3*/
+static size_t
+v4v_ringbuf_insert (struct domain *d,
+ struct v4v_ring_info *ring_info,
+ struct v4v_ring_id *src_id, uint32_t proto,
+ XEN_GUEST_HANDLE (void) buf_hnd_void, uint32_t len)
+{
+ XEN_GUEST_HANDLE (uint8_t) buf_hnd =
+ guest_handle_cast (buf_hnd_void, uint8_t);
+ v4v_ring_t ring;
+ struct v4v_ring_message_header mh = { 0 };
+ int32_t sp;
+ int32_t happy_ret = len;
+ int32_t ret = 0;
+
+ if ( (V4V_ROUNDUP (len) + sizeof (struct v4v_ring_message_header)) >=
+ ring_info->len )
+ {
+ v4v_dprintk("EMSGSIZE\n");
+ return -EMSGSIZE;
+ }
+
+ do
+ {
+ if ( (ret = v4v_memcpy_from_guest_ring (&ring, ring_info,
+ 0, sizeof (ring))) )
+ break;
+
+ ring.tx_ptr = ring_info->tx_ptr;
+ ring.len = ring_info->len;
+
+ v4v_dprintk("ring.tx_ptr=%d ring.rx_ptr=%d ring.len=%d ring_info->tx_ptr=%d\n",
+ ring.tx_ptr, ring.rx_ptr, ring.len, ring_info->tx_ptr);
+
+ if ( ring.rx_ptr == ring.tx_ptr )
+ sp = ring_info->len;
+ else
+ {
+ sp = ring.rx_ptr - ring.tx_ptr;
+ if (sp < 0)
+ sp += ring.len;
+ }
+
+ if ( (V4V_ROUNDUP (len) + sizeof (struct v4v_ring_message_header)) >= sp )
+ {
+ v4v_dprintk("EAGAIN\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ mh.len = len + sizeof (struct v4v_ring_message_header);
+ mh.source = src_id->addr;
+ mh.protocol = proto;
+
+ if ( (ret = v4v_memcpy_to_guest_ring (ring_info,
+ ring.tx_ptr + sizeof (v4v_ring_t),
+ &mh, sizeof (mh))) )
+ break;
+
+ ring.tx_ptr += sizeof (mh);
+ if ( ring.tx_ptr == ring_info->len )
+ ring.tx_ptr = 0;
+
+ sp = ring.len - ring.tx_ptr;
+
+ if ( len > sp )
+ {
+ if ((ret = v4v_memcpy_to_guest_ring_from_guest (ring_info,
+ ring.tx_ptr + sizeof (v4v_ring_t),
+ buf_hnd, sp)))
+ break;
+
+ ring.tx_ptr = 0;
+ len -= sp;
+ guest_handle_add_offset (buf_hnd, sp);
+ }
+
+ if ( (ret = v4v_memcpy_to_guest_ring_from_guest (ring_info,
+ ring.tx_ptr + sizeof (v4v_ring_t),
+ buf_hnd, len)) )
+ break;
+
+ ring.tx_ptr += V4V_ROUNDUP (len);
+
+ if ( ring.tx_ptr == ring_info->len )
+ ring.tx_ptr = 0;
+
+ mb ();
+ ring_info->tx_ptr = ring.tx_ptr;
+
+ if ( (ret = v4v_update_tx_ptr(ring_info, ring.tx_ptr)) )
+ break;
+
+ }
+ while ( 0 );
+
+ v4v_ring_unmap (ring_info);
+
+ return ret ? ret : happy_ret;
+
+}
+
+static ssize_t
+v4v_iov_count (XEN_GUEST_HANDLE (v4v_iov_t) iovs, int niov)
+{
+ v4v_iov_t iov;
+ size_t ret = 0;
+
+ while ( niov-- )
+ {
+ if ( copy_from_guest (&iov, iovs, 1) )
+ return -EFAULT;
+
+ ret += iov.iov_len;
+ guest_handle_add_offset (iovs, 1);
+ }
+
+ return ret;
+}
+
+/*caller must have L3*/
+static ssize_t
+v4v_ringbuf_insertv (struct domain *d,
+ struct v4v_ring_info *ring_info,
+ struct v4v_ring_id *src_id, uint32_t proto,
+ XEN_GUEST_HANDLE (v4v_iov_t) iovs, uint32_t niov,
+ uint32_t len)
+{
+ v4v_ring_t ring;
+ struct v4v_ring_message_header mh = { 0 };
+ int32_t sp;
+ int32_t happy_ret;
+ int32_t ret = 0;
+
+ happy_ret = len;
+
+ if ( (V4V_ROUNDUP (len) + sizeof (struct v4v_ring_message_header) ) >=
+ ring_info->len)
+ return -EMSGSIZE;
+
+ do
+ {
+ if ( (ret = v4v_memcpy_from_guest_ring (&ring, ring_info, 0,
+ sizeof (ring))) )
+ break;
+
+ ring.tx_ptr = ring_info->tx_ptr;
+ ring.len = ring_info->len;
+
+ v4v_dprintk("ring.tx_ptr=%d ring.rx_ptr=%d ring.len=%d ring_info->tx_ptr=%d\n",
+ ring.tx_ptr, ring.rx_ptr, ring.len, ring_info->tx_ptr);
+
+ if ( ring.rx_ptr == ring.tx_ptr )
+ sp = ring_info->len;
+ else
+ {
+ sp = ring.rx_ptr - ring.tx_ptr;
+ if (sp < 0)
+ sp += ring.len;
+ }
+
+ if ( (V4V_ROUNDUP (len) + sizeof (struct v4v_ring_message_header) ) >= sp)
+ {
+ v4v_dprintk("EAGAIN\n");
+ ret = -EAGAIN;
+ break;
+ }
+
+ mh.len = len + sizeof (struct v4v_ring_message_header);
+ mh.source = src_id->addr;
+ mh.protocol = proto;
+
+ if ( (ret = v4v_memcpy_to_guest_ring (ring_info,
+ ring.tx_ptr + sizeof (v4v_ring_t),
+ &mh, sizeof (mh))) )
+ break;
+
+ ring.tx_ptr += sizeof (mh);
+ if ( ring.tx_ptr == ring_info->len )
+ ring.tx_ptr = 0;
+
+ while ( niov-- )
+ {
+ XEN_GUEST_HANDLE (uint8_t) buf_hnd;
+ v4v_iov_t iov;
+
+ if ( copy_from_guest (&iov, iovs, 1) )
+ {
+ ret = -EFAULT;
+ break;
+ }
+
+ buf_hnd.p = (uint8_t *) iov.iov_base; //FIXME
+ len = iov.iov_len;
+
+ if ( unlikely (!guest_handle_okay (buf_hnd, len)) )
+ {
+ ret = -EFAULT;
+ break;
+ }
+
+ sp = ring.len - ring.tx_ptr;
+
+ if ( len > sp )
+ {
+ if ( (ret = v4v_memcpy_to_guest_ring_from_guest (ring_info,
+ ring.tx_ptr +
+ sizeof (v4v_ring_t),
+ buf_hnd, sp)) )
+ break;
+
+ ring.tx_ptr = 0;
+ len -= sp;
+ guest_handle_add_offset (buf_hnd, sp);
+ }
+
+ if ( (ret = v4v_memcpy_to_guest_ring_from_guest (ring_info,
+ ring.tx_ptr +
+ sizeof (v4v_ring_t),
+ buf_hnd, len)) )
+ break;
+
+ ring.tx_ptr += len;
+
+ if (ring.tx_ptr == ring_info->len)
+ ring.tx_ptr = 0;
+
+ guest_handle_add_offset (iovs, 1);
+ }
+ if ( ret )
+ break;
+
+ ring.tx_ptr = V4V_ROUNDUP (ring.tx_ptr);
+
+ if ( ring.tx_ptr >= ring_info->len )
+ ring.tx_ptr -= ring_info->len;
+
+ mb ();
+ ring_info->tx_ptr = ring.tx_ptr;
+ if ( (ret = v4v_update_tx_ptr(ring_info, ring.tx_ptr)) )
+ break;
+ }
+ while ( 0 );
+
+ v4v_ring_unmap (ring_info);
+
+ return ret ? ret : happy_ret;
+}
+
+
+
+/* pending */
+static void
+v4v_pending_remove_ent (struct v4v_pending_ent *ent)
+{
+ hlist_del (&ent->node);
+ xfree (ent);
+}
+
+/*caller must have L3 */
+static void
+v4v_pending_remove_all (struct v4v_ring_info *info)
+{
+ struct hlist_node *node, *next;
+ struct v4v_pending_ent *pending_ent;
+
+ hlist_for_each_entry_safe (pending_ent, node, next, &info->pending,
+ node) v4v_pending_remove_ent (pending_ent);
+}
+
+/*Caller must hold L1 */
+static void
+v4v_pending_notify (struct domain *caller_d, struct hlist_head *to_notify)
+{
+ struct hlist_node *node, *next;
+ struct v4v_pending_ent *pending_ent;
+
+ hlist_for_each_entry_safe (pending_ent, node, next, to_notify, node)
+ {
+ hlist_del (&pending_ent->node);
+ v4v_signal_domid (pending_ent->id);
+ xfree (pending_ent);
+ }
+
+}
+
+/*caller must have R(L2) */
+static void
+v4v_pending_find (struct v4v_ring_info *ring_info, uint32_t payload_space,
+ struct hlist_head *to_notify)
+{
+ struct hlist_node *node, *next;
+ struct v4v_pending_ent *ent;
+
+ spin_lock (&ring_info->lock);
+ hlist_for_each_entry_safe (ent, node, next, &ring_info->pending, node)
+ {
+ if (payload_space >= ent->len)
+ {
+ hlist_del (&ent->node);
+ hlist_add_head (&ent->node, to_notify);
+ }
+ }
+ spin_unlock (&ring_info->lock);
+}
+
+/*caller must have L3 */
+static int
+v4v_pending_queue (struct v4v_ring_info *ring_info, domid_t src_id, int len)
+{
+ struct v4v_pending_ent *ent = xmalloc (struct v4v_pending_ent);
+
+ if ( !ent )
+ {
+ v4v_dprintk("ENOMEM\n");
+ return -ENOMEM;
+ }
+
+ ent->len = len;
+ ent->id = src_id;
+
+ hlist_add_head (&ent->node, &ring_info->pending);
+
+ return 0;
+}
+
+/* L3 */
+static int
+v4v_pending_requeue (struct v4v_ring_info *ring_info, domid_t src_id, int len)
+{
+ struct hlist_node *node;
+ struct v4v_pending_ent *ent;
+
+ hlist_for_each_entry (ent, node, &ring_info->pending, node)
+ {
+ if ( ent->id == src_id )
+ {
+ if ( ent->len < len )
+ ent->len = len;
+ return 0;
+ }
+ }
+
+ return v4v_pending_queue (ring_info, src_id, len);
+}
+
+
+/* L3 */
+static void
+v4v_pending_cancel (struct v4v_ring_info *ring_info, domid_t src_id)
+{
+ struct hlist_node *node, *next;
+ struct v4v_pending_ent *ent;
+
+ hlist_for_each_entry_safe (ent, node, next, &ring_info->pending, node)
+ {
+ if ( ent->id == src_id)
+ {
+ hlist_del (&ent->node);
+ xfree (ent);
+ }
+ }
+}
+
+/*
+ * ring data
+ */
+
+/*Caller should hold R(L1)*/
+static int
+v4v_fill_ring_data (struct domain *src_d,
+ XEN_GUEST_HANDLE (v4v_ring_data_ent_t) data_ent_hnd)
+{
+ v4v_ring_data_ent_t ent;
+ struct domain *dst_d;
+ struct v4v_ring_info *ring_info;
+
+ if ( copy_from_guest (&ent, data_ent_hnd, 1) )
+ {
+ v4v_dprintk("EFAULT\n");
+ return -EFAULT;
+ }
+
+ v4v_dprintk("v4v_fill_ring_data: ent.ring.domain=%d,ent.ring.port=%u\n",
+ (int) ent.ring.domain, (int) ent.ring.port);
+
+ ent.flags = 0;
+
+ dst_d = get_domain_by_id (ent.ring.domain);
+
+ if ( dst_d && dst_d->v4v )
+ {
+ read_lock (&dst_d->v4v->lock);
+ ring_info = v4v_ring_find_info_by_addr (dst_d, &ent.ring,
+ src_d->domain_id);
+
+ if ( ring_info )
+ {
+ uint32_t space_avail;
+
+ ent.flags |= V4V_RING_DATA_F_EXISTS;
+ ent.max_message_size =
+ ring_info->len - sizeof (struct v4v_ring_message_header) -
+ V4V_ROUNDUP (1);
+ spin_lock (&ring_info->lock);
+
+ space_avail = v4v_ringbuf_payload_space (dst_d, ring_info);
+
+ if ( space_avail >= ent.space_required )
+ {
+ v4v_pending_cancel (ring_info, src_d->domain_id);
+ ent.flags |= V4V_RING_DATA_F_SUFFICIENT;
+ }
+ else
+ {
+ v4v_pending_requeue (ring_info, src_d->domain_id,
+ ent.space_required);
+ ent.flags |= V4V_RING_DATA_F_PENDING;
+ }
+
+ spin_unlock (&ring_info->lock);
+
+ if ( space_avail == ent.max_message_size )
+ ent.flags |= V4V_RING_DATA_F_EMPTY;
+
+ }
+ read_unlock (&dst_d->v4v->lock);
+ }
+
+ if ( dst_d )
+ put_domain (dst_d);
+
+ if ( copy_field_to_guest (data_ent_hnd, &ent, flags) )
+ {
+ v4v_dprintk("EFAULT\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/*Called should hold no more than R(L1) */
+static int
+v4v_fill_ring_datas (struct domain *d, int nent,
+ XEN_GUEST_HANDLE (v4v_ring_data_ent_t) data_ent_hnd)
+{
+ int ret = 0;
+
+ read_lock (&v4v_lock);
+ while ( !ret && nent-- )
+ {
+ ret = v4v_fill_ring_data (d, data_ent_hnd);
+ guest_handle_add_offset (data_ent_hnd, 1);
+ }
+ read_unlock (&v4v_lock);
+ return ret;
+}
+
+/*
+ * ring
+ */
+static int
+v4v_find_ring_mfns (struct domain *d, struct v4v_ring_info *ring_info,
+ uint32_t npage, XEN_GUEST_HANDLE (v4v_pfn_t) pfn_hnd)
+{
+ int i,j;
+ mfn_t *mfns;
+ uint8_t **mfn_mapping;
+ unsigned long mfn;
+ struct page_info *page;
+ int ret = 0;
+
+ if ((npage << PAGE_SHIFT) < ring_info->len)
+ {
+ v4v_dprintk("EINVAL\n");
+ return -EINVAL;
+ }
+
+ mfns = xmalloc_array (mfn_t, npage);
+ if ( !mfns )
+ {
+ v4v_dprintk("ENOMEM\n");
+ return -ENOMEM;
+ }
+
+ mfn_mapping = xmalloc_array (uint8_t *, npage);
+ if ( !mfn_mapping )
+ {
+ xfree (mfns);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < npage; ++i)
+ {
+ unsigned long pfn;
+ p2m_type_t p2mt;
+
+ if ( copy_from_guest_offset (&pfn, pfn_hnd, i, 1) )
+ {
+ ret = -EFAULT;
+ v4v_dprintk("EFAULT\n");
+ break;
+ }
+
+ mfn = mfn_x(get_gfn(d, pfn, &p2mt));
+ if ( !mfn_valid(mfn) )
+ {
+ printk(KERN_ERR "v4v domain %d passed invalid mfn %"PRI_mfn" ring %p seq %d\n",
+ d->domain_id, mfn, ring_info, i);
+ ret = -EINVAL;
+ break;
+ }
+ page = mfn_to_page(mfn);
+ if ( !get_page_and_type(page, d, PGT_writable_page) )
+ {
+ printk(KERN_ERR "v4v domain %d passed wrong type mfn %"PRI_mfn" ring %p seq %d\n",
+ d->domain_id, mfn, ring_info, i);
+ ret = -EINVAL;
+ break;
+ }
+ mfns[i] = _mfn(mfn);
+ v4v_dprintk("v4v_find_ring_mfns: %d: %lx -> %lx\n",
+ i, (unsigned long) pfn, (unsigned long) mfn_x (mfns[i]));
+ mfn_mapping[i] = NULL;
+ put_gfn(d, pfn);
+ }
+
+ if ( !ret )
+ {
+ ring_info->npage = npage;
+ ring_info->mfns = mfns;
+ ring_info->mfn_mapping = mfn_mapping;
+ }
+ else
+ {
+ j = i;
+ for ( i = 0; i < j; ++i )
+ if ( mfn_x(mfns[i]) != 0 )
+ put_page_and_type(mfn_to_page(mfn_x(mfns[i])));
+ xfree (mfn_mapping);
+ xfree (mfns);
+ v4v_dprintk("");
+ }
+ return ret;
+}
+
+
+/* caller must hold R(L2) */
+static struct v4v_ring_info *
+v4v_ring_find_info (struct domain *d, struct v4v_ring_id *id)
+{
+ uint16_t hash;
+ struct hlist_node *node;
+ struct v4v_ring_info *ring_info;
+
+ hash = v4v_hash_fn (id);
+
+ v4v_dprintk("ring_find_info: d->v4v=%p, d->v4v->ring_hash[%d]=%p id=%p\n",
+ d->v4v, (int) hash, d->v4v->ring_hash[hash].first, id);
+ v4v_dprintk("ring_find_info: id.addr.port=%d id.addr.domain=%d id.addr.partner=%d\n",
+ id->addr.port, id->addr.domain, id->partner);
+
+ hlist_for_each_entry (ring_info, node, &d->v4v->ring_hash[hash], node)
+ {
+ if ( !memcmp (id, &ring_info->id, sizeof (*id)) )
+ {
+ v4v_dprintk("ring_find_info: ring_info=%p\n", ring_info);
+ return ring_info;
+ }
+ }
+ v4v_dprintk("ring_find_info: no ring_info found\n");
+ return NULL;
+}
+
+/* caller must hold R(L2) */
+static struct v4v_ring_info *
+v4v_ring_find_info_by_addr (struct domain *d, struct v4v_addr *a, domid_t p)
+{
+ struct v4v_ring_id id;
+ struct v4v_ring_info *ret;
+
+ if ( !a )
+ return NULL;
+
+ id.addr.port = a->port;
+ id.addr.domain = d->domain_id;
+ id.partner = p;
+
+ ret = v4v_ring_find_info (d, &id);
+ if ( ret )
+ return ret;
+
+ id.partner = V4V_DOMID_NONE;
+
+ return v4v_ring_find_info (d, &id);
+}
+
+/*caller must hold W(L2) */
+static void v4v_ring_remove_mfns (struct v4v_ring_info *ring_info)
+{
+ int i;
+
+ if ( ring_info->mfns )
+ {
+ for ( i=0; i < ring_info->npage; ++i )
+ if (mfn_x(ring_info->mfns[i]) != 0)
+ put_page_and_type(mfn_to_page(mfn_x(ring_info->mfns[i])));
+ xfree (ring_info->mfns);
+ }
+ ring_info->mfns = NULL;
+}
+
+/*caller must hold W(L2) */
+static void
+v4v_ring_remove_info (struct v4v_ring_info *ring_info)
+{
+ v4v_pending_remove_all (ring_info);
+
+ hlist_del (&ring_info->node);
+ v4v_ring_remove_mfns(ring_info);
+ xfree (ring_info);
+}
+
+/* Call from guest to unpublish a ring */
+static long
+v4v_ring_remove (struct domain *d, XEN_GUEST_HANDLE (v4v_ring_t) ring_hnd)
+{
+ struct v4v_ring ring;
+ struct v4v_ring_info *ring_info;
+ int ret = 0;
+
+ read_lock (&v4v_lock);
+
+ do
+ {
+ if ( !d->v4v )
+ {
+ v4v_dprintk("EINVAL\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if ( copy_from_guest (&ring, ring_hnd, 1) )
+ {
+ v4v_dprintk("EFAULT\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ if ( ring.magic != V4V_RING_MAGIC )
+ {
+ v4v_dprintk("ring.magic(%lx) != V4V_RING_MAGIC(%lx), EINVAL\n",
+ ring.magic, V4V_RING_MAGIC);
+ ret = -EINVAL;
+ break;
+ }
+
+ ring.id.addr.domain = d->domain_id;
+
+ write_lock (&d->v4v->lock);
+ ring_info = v4v_ring_find_info (d, &ring.id);
+
+ if ( ring_info )
+ v4v_ring_remove_info (ring_info);
+
+ write_unlock (&d->v4v->lock);
+
+ if ( !ring_info )
+ {
+ v4v_dprintk( "ENOENT\n" );
+ ret = -ENOENT;
+ break;
+ }
+
+ }
+ while ( 0 );
+
+ read_unlock (&v4v_lock);
+ return ret;
+}
+
+/* call from guest to publish a ring */
+static long
+v4v_ring_add (struct domain *d, XEN_GUEST_HANDLE (v4v_ring_t) ring_hnd,
+ uint32_t npage, XEN_GUEST_HANDLE (v4v_pfn_t) pfn_hnd)
+{
+ struct v4v_ring ring;
+ struct v4v_ring_info *ring_info;
+ int need_to_insert = 0;
+ int ret = 0;
+
+ if ( (long) ring_hnd.p & (PAGE_SIZE - 1) )
+ {
+ v4v_dprintk("EINVAL\n");
+ return -EINVAL;
+ }
+
+ read_lock (&v4v_lock);
+ do
+ {
+ if ( !d->v4v )
+ {
+ v4v_dprintk(" !d->v4v, EINVAL\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if ( copy_from_guest (&ring, ring_hnd, 1) )
+ {
+ v4v_dprintk(" copy_from_guest failed, EFAULT\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ if ( ring.magic != V4V_RING_MAGIC )
+ {
+ v4v_dprintk("ring.magic(%lx) != V4V_RING_MAGIC(%lx), EINVAL\n",
+ ring.magic, V4V_RING_MAGIC);
+ ret = -EINVAL;
+ break;
+ }
+
+ if ( (ring.len <
+ (sizeof (struct v4v_ring_message_header) + V4V_ROUNDUP (1) +
+ V4V_ROUNDUP (1))) || (V4V_ROUNDUP (ring.len) != ring.len) )
+ {
+ v4v_dprintk("EINVAL\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ ring.id.addr.domain = d->domain_id;
+ if ( copy_field_to_guest (ring_hnd, &ring, id) )
+ {
+ v4v_dprintk("EFAULT\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ /*
+ * no need for a lock yet, because only we know about this
+ * set the tx pointer if it looks bogus (we don't reset it
+ * because this might be a re-register after S4)
+ */
+ if ( (ring.tx_ptr >= ring.len)
+ || (V4V_ROUNDUP (ring.tx_ptr) != ring.tx_ptr) )
+ {
+ ring.tx_ptr = ring.rx_ptr;
+ }
+ copy_field_to_guest (ring_hnd, &ring, tx_ptr);
+
+ read_lock (&d->v4v->lock);
+ ring_info = v4v_ring_find_info (d, &ring.id);
+
+ if ( !ring_info )
+ {
+ read_unlock (&d->v4v->lock);
+ ring_info = xmalloc (struct v4v_ring_info);
+ if ( !ring_info )
+ {
+ v4v_dprintk("ENOMEM\n");
+ ret = -ENOMEM;
+ break;
+ }
+ need_to_insert++;
+ spin_lock_init (&ring_info->lock);
+ INIT_HLIST_HEAD (&ring_info->pending);
+ ring_info->mfns = NULL;
+
+ }
+ else
+ {
+ /*
+ * Ring info already existed. If mfn list was already
+ * populated remove the MFN's from list and then add
+ * the new list.
+ */
+ printk(KERN_INFO "v4v: dom%d re-registering existing ring, clearing MFN list\n",
+ current->domain->domain_id);
+ v4v_ring_remove_mfns(ring_info);
+ }
+
+ spin_lock (&ring_info->lock);
+ ring_info->id = ring.id;
+ ring_info->len = ring.len;
+ ring_info->tx_ptr = ring.tx_ptr;
+ ring_info->ring = ring_hnd;
+ if ( ring_info->mfns )
+ xfree (ring_info->mfns);
+ ret = v4v_find_ring_mfns (d, ring_info, npage, pfn_hnd);
+ spin_unlock (&ring_info->lock);
+ if ( ret )
+ break;
+
+ if ( !need_to_insert )
+ {
+ read_unlock (&d->v4v->lock);
+ }
+ else
+ {
+ uint16_t hash = v4v_hash_fn (&ring.id);
+ write_lock (&d->v4v->lock);
+ hlist_add_head (&ring_info->node, &d->v4v->ring_hash[hash]);
+ write_unlock (&d->v4v->lock);
+ }
+ }
+ while ( 0 );
+
+ read_unlock (&v4v_lock);
+ return ret;
+}
+
+
+/*
+ * io
+ */
+
+/*Caller must hold v4v_lock and hash_lock*/
+static void
+v4v_notify_ring (struct domain *d, struct v4v_ring_info *ring_info,
+ struct hlist_head *to_notify)
+{
+ uint32_t space;
+
+ spin_lock (&ring_info->lock);
+ space = v4v_ringbuf_payload_space (d, ring_info);
+ spin_unlock (&ring_info->lock);
+
+ v4v_pending_find (ring_info, space, to_notify);
+}
+
+/*notify hypercall*/
+static long
+v4v_notify (struct domain *d,
+ XEN_GUEST_HANDLE (v4v_ring_data_t) ring_data_hnd)
+{
+ v4v_ring_data_t ring_data;
+ HLIST_HEAD (to_notify);
+ int i;
+ int ret = 0;
+
+ read_lock (&v4v_lock);
+
+ if ( !d->v4v )
+ {
+ read_unlock (&v4v_lock);
+ v4v_dprintk("!d->v4v, ENODEV\n");
+ return -ENODEV;
+ }
+
+ read_lock (&d->v4v->lock);
+ for ( i = 0; i < V4V_HTABLE_SIZE; ++i )
+ {
+ struct hlist_node *node, *next;
+ struct v4v_ring_info *ring_info;
+
+ hlist_for_each_entry_safe (ring_info, node,
+ next, &d->v4v->ring_hash[i],
+ node)
+ {
+ v4v_notify_ring (d, ring_info, &to_notify);
+ }
+ }
+ read_unlock (&d->v4v->lock);
+
+ if ( !hlist_empty (&to_notify) )
+ {
+ v4v_pending_notify (d, &to_notify);
+ }
+
+ do
+ {
+ if ( !guest_handle_is_null (ring_data_hnd) )
+ {
+ /* Quick sanity check on ring_data_hnd */
+ if ( copy_field_from_guest (&ring_data, ring_data_hnd, magic) )
+ {
+ v4v_dprintk("copy_field_from_guest failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ if ( ring_data.magic != V4V_RING_DATA_MAGIC )
+ {
+ v4v_dprintk("ring.magic(%lx) != V4V_RING_MAGIC(%lx), EINVAL\n",
+ ring_data.magic, V4V_RING_MAGIC);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (copy_from_guest (&ring_data, ring_data_hnd, 1))
+ {
+ v4v_dprintk("copy_from_guest failed\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ {
+ XEN_GUEST_HANDLE (v4v_ring_data_ent_t) ring_data_ent_hnd;
+ ring_data_ent_hnd =
+ guest_handle_for_field(ring_data_hnd, v4v_ring_data_ent_t, ring[0]);
+ ret = v4v_fill_ring_datas (d, ring_data.nent, ring_data_ent_hnd);
+ }
+ }
+ }
+ while ( 0 );
+
+ read_unlock (&v4v_lock);
+
+ return ret;
+}
+
+
+
+/*Hypercall to do the send*/
+static size_t
+v4v_send (struct domain *src_d, v4v_addr_t * src_addr,
+ v4v_addr_t * dst_addr, uint32_t proto,
+ XEN_GUEST_HANDLE (void) buf, size_t len)
+{
+ struct domain *dst_d;
+ struct v4v_ring_id src_id;
+ struct v4v_ring_info *ring_info;
+ int ret = 0;
+
+ if ( !dst_addr )
+ {
+ v4v_dprintk("!dst_addr\n");
+ return -EINVAL;
+ }
+
+ read_lock (&v4v_lock);
+ if ( !src_d->v4v )
+ {
+ read_unlock (&v4v_lock);
+ v4v_dprintk("!src_d->v4v\n");
+ return -EINVAL;
+ }
+
+ src_id.addr.port = src_addr->port;
+ src_id.addr.domain = src_d->domain_id;
+ src_id.partner = dst_addr->domain;
+
+ dst_d = get_domain_by_id (dst_addr->domain);
+ if ( !dst_d )
+ {
+ read_unlock (&v4v_lock);
+ v4v_dprintk("!dst_d, ECONNREFUSED\n");
+ return -ECONNREFUSED;
+ }
+
+ do
+ {
+ if ( !dst_d->v4v )
+ {
+ ret = -ECONNREFUSED;
+ v4v_dprintk("!dst_d->v4v, ECONNREFUSED\n");
+ break;
+ }
+
+ read_lock (&dst_d->v4v->lock);
+ ring_info =
+ v4v_ring_find_info_by_addr (dst_d, dst_addr, src_addr->domain);
+
+ if ( !ring_info )
+ {
+ ret = -ECONNREFUSED;
+ v4v_dprintk("!ring_info\n");
+ }
+ else
+ {
+ spin_lock (&ring_info->lock);
+ ret =
+ v4v_ringbuf_insert (dst_d, ring_info, &src_id, proto, buf, len);
+ if ( ret == -EAGAIN )
+ {
+ v4v_dprintk("ret == EAGAIN\n");
+ /* Schedule a wake up on the event channel when space is there */
+ if (v4v_pending_requeue (ring_info, src_d->domain_id, len))
+ {
+ v4v_dprintk("v4v_pending_requeue failed, ENOMEM\n");
+ ret = -ENOMEM;
+ }
+ }
+ spin_unlock (&ring_info->lock);
+
+ if (ret >= 0)
+ {
+ v4v_signal_domain (dst_d);
+ }
+ }
+ read_unlock (&dst_d->v4v->lock);
+ }
+ while ( 0 );
+
+ put_domain (dst_d);
+ read_unlock (&v4v_lock);
+ return ret;
+}
+
+/*Hypercall to do the send*/
+static size_t
+v4v_sendv (struct domain *src_d, v4v_addr_t * src_addr,
+ v4v_addr_t * dst_addr, uint32_t proto,
+ XEN_GUEST_HANDLE (v4v_iov_t) iovs, size_t niov)
+{
+ struct domain *dst_d;
+ struct v4v_ring_id src_id;
+ struct v4v_ring_info *ring_info;
+ int ret = 0;
+
+ if ( !dst_addr )
+ {
+ v4v_dprintk("!dst_addr, EINVAL\n");
+ return -EINVAL;
+ }
+
+ read_lock (&v4v_lock);
+ if (!src_d->v4v)
+ {
+ read_unlock (&v4v_lock);
+ v4v_dprintk("!src_d->v4v, EINVAL\n");
+ return -EINVAL;
+ }
+
+ src_id.addr.port = src_addr->port;
+ src_id.addr.domain = src_d->domain_id;
+ src_id.partner = dst_addr->domain;
+
+ dst_d = get_domain_by_id (dst_addr->domain);
+ if (!dst_d)
+ {
+ read_unlock (&v4v_lock);
+ v4v_dprintk("!dst_d, ECONNREFUSED\n");
+ return -ECONNREFUSED;
+ }
+
+ do
+ {
+ if ( !dst_d->v4v )
+ {
+ v4v_dprintk("dst_d->v4v, ECONNREFUSED\n");
+ ret = -ECONNREFUSED;
+ break;
+ }
+
+ read_lock (&dst_d->v4v->lock);
+ ring_info =
+ v4v_ring_find_info_by_addr (dst_d, dst_addr, src_addr->domain);
+
+ if ( !ring_info )
+ {
+ ret = -ECONNREFUSED;
+ v4v_dprintk(" !ring_info, ECONNREFUSED\n");
+ }
+ else
+ {
+ uint32_t len = v4v_iov_count (iovs, niov);
+
+ if ( len < 0 )
+ {
+ ret = len;
+ break;
+ }
+
+ spin_lock (&ring_info->lock);
+ ret =
+ v4v_ringbuf_insertv (dst_d, ring_info, &src_id, proto, iovs,
+ niov, len);
+ if ( ret == -EAGAIN )
+ {
+ v4v_dprintk("v4v_ringbuf_insertv failed, EAGAIN\n");
+ /* Schedule a wake up on the event channel when space is there */
+ if (v4v_pending_requeue (ring_info, src_d->domain_id, len))
+ {
+ v4v_dprintk("v4v_pending_requeue failed, ENOMEM\n");
+ ret = -ENOMEM;
+ }
+ }
+ spin_unlock (&ring_info->lock);
+
+ if ( ret >= 0 )
+ {
+ v4v_signal_domain (dst_d);
+ }
+
+ }
+ read_unlock (&dst_d->v4v->lock);
+
+ }
+ while ( 0 );
+
+ put_domain (dst_d);
+ read_unlock (&v4v_lock);
+ return ret;
+}
+
+/*
+ * hypercall glue
+ */
+long
+do_v4v_op (int cmd, XEN_GUEST_HANDLE (void) arg1,
+ XEN_GUEST_HANDLE (void) arg2,
+ XEN_GUEST_HANDLE (void) arg3, uint32_t arg4, uint32_t arg5)
+{
+ struct domain *d = current->domain;
+ long rc = -EFAULT;
+
+ v4v_dprintk("->do_v4v_op(%d,%p,%p,%p,%d,%d)\n", cmd,
+ (void *) arg1.p, (void *) arg2.p, (void *) arg3.p,
+ (int) arg4, (int) arg5);
+
+ domain_lock (d);
+ switch (cmd)
+ {
+ case V4VOP_register_ring:
+ {
+ XEN_GUEST_HANDLE (v4v_ring_t) ring_hnd =
+ guest_handle_cast (arg1, v4v_ring_t);
+ XEN_GUEST_HANDLE (v4v_pfn_t) pfn_hnd =
+ guest_handle_cast (arg2, v4v_pfn_t);
+ uint32_t npage = arg4;
+ if ( unlikely (!guest_handle_okay (ring_hnd, 1)) )
+ goto out;
+ if ( unlikely (!guest_handle_okay (pfn_hnd, npage)) )
+ goto out;
+ rc = v4v_ring_add (d, ring_hnd, npage, pfn_hnd);
+ break;
+ }
+ case V4VOP_unregister_ring:
+ {
+ XEN_GUEST_HANDLE (v4v_ring_t) ring_hnd =
+ guest_handle_cast (arg1, v4v_ring_t);
+ if ( unlikely (!guest_handle_okay (ring_hnd, 1)) )
+ goto out;
+ rc = v4v_ring_remove (d, ring_hnd);
+ break;
+ }
+ case V4VOP_send:
+ {
+ v4v_addr_t src, dst;
+ uint32_t len = arg4;
+ uint32_t protocol = arg5;
+ XEN_GUEST_HANDLE (v4v_addr_t) src_hnd =
+ guest_handle_cast (arg1, v4v_addr_t);
+ XEN_GUEST_HANDLE (v4v_addr_t) dst_hnd =
+ guest_handle_cast (arg2, v4v_addr_t);
+
+ if ( unlikely (!guest_handle_okay (src_hnd, 1)) )
+ goto out;
+ if ( copy_from_guest (&src, src_hnd, 1) )
+ goto out;
+
+ if ( unlikely (!guest_handle_okay (dst_hnd, 1)) )
+ goto out;
+ if ( copy_from_guest (&dst, dst_hnd, 1) )
+ goto out;
+
+ rc = v4v_send (d, &src, &dst, protocol, arg3, len);
+ break;
+ }
+ case V4VOP_sendv:
+ {
+ v4v_addr_t src, dst;
+ uint32_t niov = arg4;
+ uint32_t protocol = arg5;
+ XEN_GUEST_HANDLE (v4v_addr_t) src_hnd =
+ guest_handle_cast (arg1, v4v_addr_t);
+ XEN_GUEST_HANDLE (v4v_addr_t) dst_hnd =
+ guest_handle_cast (arg2, v4v_addr_t);
+ XEN_GUEST_HANDLE (v4v_iov_t) iovs =
+ guest_handle_cast (arg3, v4v_iov_t);
+
+ if ( unlikely (!guest_handle_okay (src_hnd, 1)) )
+ goto out;
+ if ( copy_from_guest (&src, src_hnd, 1) )
+ goto out;
+
+ if ( unlikely (!guest_handle_okay (dst_hnd, 1)) )
+ goto out;
+ if ( copy_from_guest (&dst, dst_hnd, 1) )
+ goto out;
+
+ if ( unlikely (!guest_handle_okay (iovs, niov)) )
+ goto out;
+
+ rc = v4v_sendv (d, &src, &dst, protocol, iovs, niov);
+ break;
+ }
+ case V4VOP_notify:
+ {
+ XEN_GUEST_HANDLE (v4v_ring_data_t) ring_data_hnd =
+ guest_handle_cast (arg1, v4v_ring_data_t);
+ rc = v4v_notify (d, ring_data_hnd);
+ break;
+ }
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+out:
+ domain_unlock (d);
+ v4v_dprintk("<-do_v4v_op()=%d\n", (int) rc);
+ return rc;
+}
+
+/*
+ * init
+ */
+
+void
+v4v_destroy (struct domain *d)
+{
+ int i;
+
+ BUG_ON (!d->is_dying);
+ write_lock (&v4v_lock);
+
+ v4v_dprintk("d->v=%p\n", d->v4v);
+
+ if ( d->v4v )
+ {
+ for ( i = 0; i < V4V_HTABLE_SIZE; ++i )
+ {
+ struct hlist_node *node, *next;
+ struct v4v_ring_info *ring_info;
+
+ hlist_for_each_entry_safe (ring_info, node,
+ next, &d->v4v->ring_hash[i],
+ node)
+ {
+ v4v_ring_remove_info (ring_info);
+ }
+ }
+ }
+
+ d->v4v = NULL;
+ write_unlock (&v4v_lock);
+}
+
+int
+v4v_init (struct domain *d)
+{
+ struct v4v_domain *v4v;
+ int i;
+
+ v4v = xmalloc (struct v4v_domain);
+ if ( !v4v )
+ return -ENOMEM;
+
+ rwlock_init (&v4v->lock);
+
+ for ( i = 0; i < V4V_HTABLE_SIZE; ++i )
+ {
+ INIT_HLIST_HEAD (&v4v->ring_hash[i]);
+ }
+
+ write_lock (&v4v_lock);
+ d->v4v = v4v;
+ write_unlock (&v4v_lock);
+
+ return 0;
+}
+
+
+/*
+ * debug
+ */
+
+static void
+dump_domain_ring (struct domain *d, struct v4v_ring_info *ring_info)
+{
+ uint32_t rx_ptr;
+
+ printk (KERN_ERR " ring: domid=%d port=0x%08x partner=%d npage=%d\n",
+ (int) d->domain_id, (int) ring_info->id.addr.port,
+ (int) ring_info->id.partner, (int) ring_info->npage);
+
+ if ( v4v_ringbuf_get_rx_ptr (d, ring_info, &rx_ptr) )
+ {
+ printk (KERN_ERR " Failed to read rx_ptr\n");
+ return;
+ }
+
+ printk (KERN_ERR " tx_ptr=%d rx_ptr=%d len=%d\n",
+ (int) ring_info->tx_ptr, (int) rx_ptr, (int) ring_info->len);
+}
+
+static void
+dump_domain_rings (struct domain *d)
+{
+ int i;
+
+ printk (KERN_ERR " domain %d:\n", (int) d->domain_id);
+
+ read_lock (&d->v4v->lock);
+
+ for ( i = 0; i < V4V_HTABLE_SIZE; ++i )
+ {
+ struct hlist_node *node;
+ struct v4v_ring_info *ring_info;
+
+ hlist_for_each_entry (ring_info, node, &d->v4v->ring_hash[i], node)
+ dump_domain_ring (d, ring_info);
+ }
+ read_unlock (&d->v4v->lock);
+
+ printk (KERN_ERR "\n");
+ v4v_signal_domain (d);
+}
+
+static void
+dump_rings (unsigned char key)
+{
+ struct domain *d;
+
+ printk (KERN_ERR "\n\nV4V ring dump:\n");
+ read_lock (&v4v_lock);
+
+ rcu_read_lock (&domlist_read_lock);
+
+ for_each_domain (d) dump_domain_rings (d);
+
+ rcu_read_unlock (&domlist_read_lock);
+
+ read_unlock (&v4v_lock);
+}
+
+struct keyhandler v4v_info_keyhandler = {
+ .diagnostic = 1,
+ .u.fn = dump_rings,
+ .desc = "dump v4v ring states and intterupt"
+};
+
+static int __init
+setup_dump_rings (void)
+{
+ register_keyhandler ('4', &v4v_info_keyhandler);
+ return 0;
+}
+
+__initcall (setup_dump_rings);
+
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/public/v4v.h b/xen/include/public/v4v.h
new file mode 100644
index 0000000..197770e
--- /dev/null
+++ b/xen/include/public/v4v.h
@@ -0,0 +1,240 @@
+/******************************************************************************
+ * V4V
+ *
+ * Version 2 of v2v (Virtual-to-Virtual)
+ *
+ * Copyright (c) 2010, Citrix Systems
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __XEN_PUBLIC_V4V_H__
+#define __XEN_PUBLIC_V4V_H__
+
+#include "xen.h"
+
+/*
+ * Structure definitions
+ */
+
+#define V4V_PROTO_DGRAM 0x3c2c1db8
+#define V4V_PROTO_STREAM 0x70f6a8e5
+
+#ifdef __i386__
+# define V4V_RING_MAGIC 0xdf6977f231abd910ULL
+# define V4V_PFN_LIST_MAGIC 0x91dd6159045b302dULL
+#else
+# define V4V_RING_MAGIC 0xdf6977f231abd910
+# define V4V_PFN_LIST_MAGIC 0x91dd6159045b302d
+#endif
+#define V4V_DOMID_INVALID (0x7FFFU)
+#define V4V_DOMID_NONE V4V_DOMID_INVALID
+#define V4V_DOMID_ANY V4V_DOMID_INVALID
+#define V4V_PORT_NONE 0
+
+/*
+ * struct v4v_iov
+ * {
+ * 64 bits: iov_base
+ * 64 bits: iov_len
+ * }
+ */
+
+/*
+ * struct v4v_addr
+ * {
+ * 32 bits: port
+ * 16 bits: domid
+ * }
+ */
+
+/*
+ * v4v_ring_id
+ * {
+ * struct v4v_addr: addr
+ * 16 bits: partner
+ * }
+ */
+
+/*
+ * v4v_ring
+ * {
+ * 64 bits: magic
+ * v4v_rind_id: id
+ * 32 bits: len
+ * 32 bits: rx_ptr
+ * 32 bits: tx_ptr
+ * 64 bits: padding
+ * ... : ring
+ * }
+ *
+ * id:
+ * xen only looks at this during register/unregister
+ * and will fill in id.addr.domain
+ *
+ * rx_ptr: rx pointer, modified by domain
+ * tx_ptr: tx pointer, modified by xen
+ */
+
+#ifdef __i386__
+#define V4V_RING_DATA_MAGIC 0x4ce4d30fbc82e92aULL
+#else
+#define V4V_RING_DATA_MAGIC 0x4ce4d30fbc82e92a
+#endif
+
+#define V4V_RING_DATA_F_EMPTY 1U << 0 /* Ring is empty */
+#define V4V_RING_DATA_F_EXISTS 1U << 1 /* Ring exists */
+#define V4V_RING_DATA_F_PENDING 1U << 2 /* Pending interrupt exists - do not
+ rely on this field - for
+ profiling only */
+#define V4V_RING_DATA_F_SUFFICIENT 1U << 3 /* Sufficient space to queue
+ space_required bytes exists */
+
+/*
+ * v4v_ring_data_ent
+ * {
+ * v4v_addr: ring
+ * 16 bits: flags
+ * 16 bits: padding
+ * 32 bits: space_required
+ * 32 bits: max_message_size
+ * }
+ */
+
+/*
+ * v4v_ring_data
+ * {
+ * 64 bits: magic (V4V_RING_DATA_MAGIC)
+ * 32 bits: nent
+ * 32 bits: padding
+ * 256 bits: reserved
+ * ... : v4v_ring_data_ent
+ * }
+ */
+
+
+#define V4V_ROUNDUP(a) (((a) +0xf ) & ~0xf)
+/*
+ * Messages on the ring are padded to 128 bits
+ * Len here refers to the exact length of the data not including the
+ * 128 bit header. The message uses
+ * ((len +0xf) & ~0xf) + sizeof(v4v_ring_message_header) bytes
+ */
+
+/*
+ * v4v_stream_header
+ * {
+ * 32 bits: flags
+ * 32 bits: conid
+ * }
+ */
+
+/*
+ * v4v_ring_message_header
+ * {
+ * 32 bits: len
+ * v4v_addr: source
+ * 32 bits: protocol
+ * ... : data
+ * }
+ */
+
+/*
+ * HYPERCALLS
+ */
+
+#define V4VOP_register_ring 1
+/*
+ * Registers a ring with Xen, if a ring with the same v4v_ring_id exists,
+ * this ring takes its place, registration will not change tx_ptr
+ * unless it is invalid
+ *
+ * do_v4v_op(V4VOP_unregister_ring,
+ * v4v_ring, XEN_GUEST_HANDLE(v4v_pfn),
+ * NULL, npage, 0)
+ */
+
+
+#define V4VOP_unregister_ring 2
+/*
+ * Unregister a ring.
+ *
+ * v4v_hypercall(V4VOP_send, v4v_ring, NULL, NULL, 0, 0)
+ */
+
+#define V4VOP_send 3
+/*
+ * Sends len bytes of buf to dst, giving src as the source address (xen will
+ * ignore src->domain and put your domain in the actually message), xen
+ * first looks for a ring with id.addr==dst and id.partner==sending_domain
+ * if that fails it looks for id.addr==dst and id.partner==DOMID_ANY.
+ * protocol is the 32 bit protocol number used from the message
+ * most likely V4V_PROTO_DGRAM or STREAM. If insufficient space exists
+ * it will return -EAGAIN and xen will twing the V4V_INTERRUPT when
+ * sufficient space becomes available
+ *
+ * v4v_hypercall(V4VOP_send,
+ * v4v_addr src,
+ * v4v_addr dst,
+ * void* buf,
+ * uint32_t len,
+ * uint32_t protocol)
+ */
+
+
+#define V4VOP_notify 4
+/* Asks xen for information about other rings in the system
+ *
+ * ent->ring is the v4v_addr_t of the ring you want information on
+ * the same matching rules are used as for V4VOP_send.
+ *
+ * ent->space_required if this field is not null xen will check
+ * that there is space in the destination ring for this many bytes
+ * of payload. If there is it will set the V4V_RING_DATA_F_SUFFICIENT
+ * and CANCEL any pending interrupt for that ent->ring, if insufficient
+ * space is available it will schedule an interrupt and the flag will
+ * not be set.
+ *
+ * The flags are set by xen when notify replies
+ * V4V_RING_DATA_F_EMPTY ring is empty
+ * V4V_RING_DATA_F_PENDING interrupt is pending - don't rely on this
+ * V4V_RING_DATA_F_SUFFICIENT sufficient space for space_required is there
+ * V4V_RING_DATA_F_EXISTS ring exists
+ *
+ * v4v_hypercall(V4VOP_notify,
+ * XEN_GUEST_HANDLE(v4v_ring_data_ent) ent,
+ * NULL, NULL, nent, 0)
+ */
+
+
+#define V4VOP_sendv 5
+/*
+ * Identical to V4VOP_send except rather than buf and len it takes
+ * an array of v4v_iov and a length of the array.
+ *
+ * v4v_hypercall(V4VOP_sendv,
+ * v4v_addr src,
+ * v4v_addr dst,
+ * v4v_iov iov,
+ * uint32_t niov,
+ * uint32_t protocol)
+ */
+
+#endif /* __XEN_PUBLIC_V4V_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
index 033cbba..dce0338 100644
--- a/xen/include/public/xen.h
+++ b/xen/include/public/xen.h
@@ -99,7 +99,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
#define __HYPERVISOR_domctl 36
#define __HYPERVISOR_kexec_op 37
#define __HYPERVISOR_tmem_op 38
-#define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */
+#define __HYPERVISOR_v4v_op 39
/* Architecture-specific hypercall definitions. */
#define __HYPERVISOR_arch_0 48
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 53804c8..457e3f2 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -23,6 +23,7 @@
#include <public/sysctl.h>
#include <public/vcpu.h>
#include <public/mem_event.h>
+#include <xen/v4v.h>
#ifdef CONFIG_COMPAT
#include <compat/vcpu.h>
@@ -350,6 +351,10 @@ struct domain
nodemask_t node_affinity;
unsigned int last_alloc_node;
spinlock_t node_affinity_lock;
+
+ /* v4v */
+ rwlock_t v4v_lock;
+ struct v4v_domain *v4v;
};
struct domain_setup_info
diff --git a/xen/include/xen/v4v.h b/xen/include/xen/v4v.h
new file mode 100644
index 0000000..641a6a8
--- /dev/null
+++ b/xen/include/xen/v4v.h
@@ -0,0 +1,187 @@
+/******************************************************************************
+ * V4V
+ *
+ * Version 2 of v2v (Virtual-to-Virtual)
+ *
+ * Copyright (c) 2010, Citrix Systems
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __V4V_PRIVATE_H__
+#define __V4V_PRIVATE_H__
+
+#include <xen/config.h>
+#include <xen/types.h>
+#include <xen/spinlock.h>
+#include <xen/smp.h>
+#include <xen/shared.h>
+#include <xen/list.h>
+#include <public/v4v.h>
+
+#define V4V_HTABLE_SIZE 32
+
+#define V4V_PACKED __attribute__ ((packed))
+
+/*
+ * Structures
+ */
+
+typedef struct v4v_iov
+{
+ uint64_t iov_base;
+ uint64_t iov_len;
+} V4V_PACKED v4v_iov_t;
+DEFINE_XEN_GUEST_HANDLE (v4v_iov_t);
+
+typedef struct v4v_addr
+{
+ uint32_t port;
+ domid_t domain;
+} V4V_PACKED v4v_addr_t;
+DEFINE_XEN_GUEST_HANDLE (v4v_addr_t);
+
+typedef struct v4v_ring_id
+{
+ struct v4v_addr addr;
+ domid_t partner;
+} V4V_PACKED v4v_ring_id_t;
+
+typedef uint64_t v4v_pfn_t;
+DEFINE_XEN_GUEST_HANDLE (v4v_pfn_t);
+
+typedef struct v4v_ring
+{
+ uint64_t magic;
+ struct v4v_ring_id id;
+ uint32_t len;
+ uint32_t rx_ptr;
+ uint32_t tx_ptr;
+ uint64_t reserved[4];
+ uint8_t ring[0];
+} V4V_PACKED v4v_ring_t;
+DEFINE_XEN_GUEST_HANDLE (v4v_ring_t);
+
+typedef struct v4v_ring_data_ent
+{
+ struct v4v_addr ring;
+ uint16_t flags;
+ uint16_t pad0;
+ uint32_t space_required;
+ uint32_t max_message_size;
+} V4V_PACKED v4v_ring_data_ent_t;
+DEFINE_XEN_GUEST_HANDLE (v4v_ring_data_ent_t);
+
+typedef struct v4v_ring_data
+{
+ uint64_t magic;
+ uint32_t nent;
+ uint32_t padding;
+ uint64_t reserved[4];
+ v4v_ring_data_ent_t ring[0];
+} V4V_PACKED v4v_ring_data_t;
+DEFINE_XEN_GUEST_HANDLE (v4v_ring_data_t);
+
+struct v4v_stream_header
+{
+ uint32_t flags;
+ uint32_t conid;
+} V4V_PACKED;
+
+struct v4v_ring_message_header
+{
+ uint32_t len;
+ struct v4v_addr source;
+ uint32_t protocol;
+ uint8_t data[0];
+} V4V_PACKED;
+
+/*
+ * Helper functions
+ */
+
+
+static inline uint16_t
+v4v_hash_fn (struct v4v_ring_id *id)
+{
+ uint16_t ret;
+ ret = (uint16_t) (id->addr.port >> 16);
+ ret ^= (uint16_t) id->addr.port;
+ ret ^= id->addr.domain;
+ ret ^= id->partner;
+
+ ret &= (V4V_HTABLE_SIZE-1);
+
+ return ret;
+}
+
+struct v4v_pending_ent
+{
+ struct hlist_node node;
+ domid_t id;
+ uint32_t len;
+} V4V_PACKED;
+
+
+struct v4v_ring_info
+{
+ /* next node in the hash, protected by L2 */
+ struct hlist_node node;
+ /* this ring's id, protected by L2 */
+ struct v4v_ring_id id;
+ /* L3 */
+ spinlock_t lock;
+ /* cached length of the ring (from ring->len), protected by L3 */
+ uint32_t len;
+ uint32_t npage;
+ /* cached tx pointer location, protected by L3 */
+ uint32_t tx_ptr;
+ /* guest ring, protected by L3 */
+ XEN_GUEST_HANDLE(v4v_ring_t) ring;
+ /* mapped ring pages protected by L3*/
+ uint8_t **mfn_mapping;
+ /* list of mfns of guest ring */
+ mfn_t *mfns;
+ /* list of struct v4v_pending_ent for this ring, L3 */
+ struct hlist_head pending;
+} V4V_PACKED;
+
+/*
+ * The value of the v4v element in a struct domain is
+ * protected by the global lock L1
+ */
+struct v4v_domain
+{
+ /* L2 */
+ rwlock_t lock;
+ /* protected by L2 */
+ struct hlist_head ring_hash[V4V_HTABLE_SIZE];
+} V4V_PACKED;
+
+void v4v_destroy(struct domain *d);
+int v4v_init(struct domain *d);
+long do_v4v_op (int cmd,
+ XEN_GUEST_HANDLE (void) arg1,
+ XEN_GUEST_HANDLE (void) arg2,
+ XEN_GUEST_HANDLE (void) arg3,
+ uint32_t arg4,
+ uint32_t arg5);
+
+#endif /* __V4V_PRIVATE_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2012-06-28 16:26 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-06-28 16:26 [PATCH 0/5] RFC: V4V (v2) Jean Guyader
2012-06-28 16:26 ` [PATCH 1/5] xen: add ssize_t Jean Guyader
2012-06-29 8:05 ` Jan Beulich
2012-06-29 10:09 ` Jean Guyader
2012-06-29 10:38 ` Jan Beulich
2012-06-28 16:26 ` [PATCH 2/5] v4v: Introduce VIRQ_V4V Jean Guyader
2012-06-29 8:07 ` Jan Beulich
2012-06-29 10:33 ` Jean Guyader
2012-06-28 16:26 ` [PATCH 3/5] xen: Enforce introduce guest_handle_for_field Jean Guyader
2012-06-29 8:10 ` Jan Beulich
2012-06-28 16:26 ` Jean Guyader [this message]
2012-06-29 8:33 ` [PATCH 4/5] xen: Add V4V implementation Jan Beulich
2012-06-29 10:03 ` Jean Guyader
2012-06-29 10:36 ` Jan Beulich
2012-07-18 20:09 ` Jean Guyader
2012-07-19 9:34 ` Andrew Cooper
2012-07-19 9:58 ` Jean Guyader
2012-07-19 9:54 ` Attilio Rao
2012-07-19 10:06 ` Jean Guyader
2012-07-19 10:04 ` Attilio Rao
2012-07-19 10:32 ` Ian Campbell
2012-07-19 10:42 ` Andrew Cooper
2012-07-19 11:33 ` Stefano Stabellini
2012-07-19 11:40 ` Andrew Cooper
2012-07-19 11:58 ` Jean Guyader
2012-07-23 8:18 ` Jan Beulich
2012-07-05 11:36 ` Tim Deegan
2012-06-28 16:26 ` [PATCH 5/5] v4v: Introduce basic access control to V4V Jean Guyader
2012-07-05 14:23 ` Tim Deegan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1340900786-21802-5-git-send-email-jean.guyader@citrix.com \
--to=jean.guyader@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).