From: David Woodhouse <dwmw2@infradead.org>
To: Peter Maydell <peter.maydell@linaro.org>, qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
"Paul Durrant" <paul@xen.org>,
"Joao Martins" <joao.m.martins@oracle.com>,
"Ankur Arora" <ankur.a.arora@oracle.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Thomas Huth" <thuth@redhat.com>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Juan Quintela" <quintela@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
"Claudio Fontana" <cfontana@suse.de>,
"Julien Grall" <julien@xen.org>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
armbru@redhat.com
Subject: [PATCH v10 52/59] hw/xen: Add basic ring handling to xenstore
Date: Wed, 1 Feb 2023 14:31:41 +0000 [thread overview]
Message-ID: <20230201143148.1744093-53-dwmw2@infradead.org> (raw)
In-Reply-To: <20230201143148.1744093-1-dwmw2@infradead.org>
From: David Woodhouse <dwmw@amazon.co.uk>
Extract requests, return ENOSYS to all of them. This is enough to allow
older Linux guests to boot, as they need *something* back but it doesn't
matter much what.
In the first instance we're likely to wire this up over a UNIX socket to
an actual xenstored implementation, but in the fullness of time it would
be nice to have a fully single-tenant "virtual" xenstore within qemu.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
hw/i386/kvm/xen_xenstore.c | 223 ++++++++++++++++++++++++++++++++++++-
1 file changed, 220 insertions(+), 3 deletions(-)
diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c
index 702f417633..2388842d15 100644
--- a/hw/i386/kvm/xen_xenstore.c
+++ b/hw/i386/kvm/xen_xenstore.c
@@ -188,18 +188,235 @@ uint16_t xen_xenstore_get_port(void)
return s->guest_port;
}
+static bool req_pending(XenXenstoreState *s)
+{
+ struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+
+ return s->req_offset == XENSTORE_HEADER_SIZE + req->len;
+}
+
+static void reset_req(XenXenstoreState *s)
+{
+ memset(s->req_data, 0, sizeof(s->req_data));
+ s->req_offset = 0;
+}
+
+static void reset_rsp(XenXenstoreState *s)
+{
+ s->rsp_pending = false;
+
+ memset(s->rsp_data, 0, sizeof(s->rsp_data));
+ s->rsp_offset = 0;
+}
+
+static void process_req(XenXenstoreState *s)
+{
+ struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ const char enosys[] = "ENOSYS";
+
+ assert(req_pending(s));
+ assert(!s->rsp_pending);
+
+ rsp->type = XS_ERROR;
+ rsp->req_id = req->req_id;
+ rsp->tx_id = req->tx_id;
+ rsp->len = sizeof(enosys);
+ memcpy((void *)&rsp[1], enosys, sizeof(enosys));
+
+ s->rsp_pending = true;
+ reset_req(s);
+}
+
+static unsigned int copy_from_ring(XenXenstoreState *s, uint8_t *ptr, unsigned int len)
+{
+ if (!len)
+ return 0;
+
+ XENSTORE_RING_IDX prod = qatomic_read(&s->xs->req_prod);
+ XENSTORE_RING_IDX cons = qatomic_read(&s->xs->req_cons);
+ unsigned int copied = 0;
+
+ smp_mb();
+
+ while (len) {
+ unsigned int avail = prod - cons;
+ unsigned int offset = MASK_XENSTORE_IDX(cons);
+ unsigned int copylen = avail;
+
+ if (avail > XENSTORE_RING_SIZE) {
+ error_report("XenStore ring handling error");
+ s->fatal_error = true;
+ break;
+ } else if (avail == 0)
+ break;
+
+ if (copylen > len) {
+ copylen = len;
+ }
+ if (copylen > XENSTORE_RING_SIZE - offset) {
+ copylen = XENSTORE_RING_SIZE - offset;
+ }
+
+ memcpy(ptr, &s->xs->req[offset], copylen);
+ copied += copylen;
+
+ ptr += copylen;
+ len -= copylen;
+
+ cons += copylen;
+ }
+
+ smp_mb();
+
+ qatomic_set(&s->xs->req_cons, cons);
+
+ return copied;
+}
+
+static unsigned int copy_to_ring(XenXenstoreState *s, uint8_t *ptr, unsigned int len)
+{
+ if (!len)
+ return 0;
+
+ XENSTORE_RING_IDX cons = qatomic_read(&s->xs->rsp_cons);
+ XENSTORE_RING_IDX prod = qatomic_read(&s->xs->rsp_prod);
+ unsigned int copied = 0;
+
+ smp_mb();
+
+ while (len) {
+ unsigned int avail = cons + XENSTORE_RING_SIZE - prod;
+ unsigned int offset = MASK_XENSTORE_IDX(prod);
+ unsigned int copylen = len;
+
+ if (avail > XENSTORE_RING_SIZE) {
+ error_report("XenStore ring handling error");
+ s->fatal_error = true;
+ break;
+ } else if (avail == 0)
+ break;
+
+ if (copylen > avail) {
+ copylen = avail;
+ }
+ if (copylen > XENSTORE_RING_SIZE - offset) {
+ copylen = XENSTORE_RING_SIZE - offset;
+ }
+
+
+ memcpy(&s->xs->rsp[offset], ptr, copylen);
+ copied += copylen;
+
+ ptr += copylen;
+ len -= copylen;
+
+ prod += copylen;
+ }
+
+ smp_mb();
+
+ qatomic_set(&s->xs->rsp_prod, prod);
+
+ return copied;
+}
+
+static unsigned int get_req(XenXenstoreState *s)
+{
+ unsigned int copied = 0;
+
+ if (s->fatal_error)
+ return 0;
+
+ assert(!req_pending(s));
+
+ if (s->req_offset < XENSTORE_HEADER_SIZE) {
+ void *ptr = s->req_data + s->req_offset;
+ unsigned int len = XENSTORE_HEADER_SIZE;
+ unsigned int copylen = copy_from_ring(s, ptr, len);
+
+ copied += copylen;
+ s->req_offset += copylen;
+ }
+
+ if (s->req_offset >= XENSTORE_HEADER_SIZE) {
+ struct xsd_sockmsg *req = (struct xsd_sockmsg *)s->req_data;
+
+ if (req->len > (uint32_t)XENSTORE_PAYLOAD_MAX) {
+ error_report("Illegal XenStore request");
+ s->fatal_error = true;
+ return 0;
+ }
+
+ void *ptr = s->req_data + s->req_offset;
+ unsigned int len = XENSTORE_HEADER_SIZE + req->len - s->req_offset;
+ unsigned int copylen = copy_from_ring(s, ptr, len);
+
+ copied += copylen;
+ s->req_offset += copylen;
+ }
+
+ return copied;
+}
+
+static unsigned int put_rsp(XenXenstoreState *s)
+{
+ if (s->fatal_error)
+ return 0;
+
+ assert(s->rsp_pending);
+
+ struct xsd_sockmsg *rsp = (struct xsd_sockmsg *)s->rsp_data;
+ assert(s->rsp_offset < XENSTORE_HEADER_SIZE + rsp->len);
+
+ void *ptr = s->rsp_data + s->rsp_offset;
+ unsigned int len = XENSTORE_HEADER_SIZE + rsp->len - s->rsp_offset;
+ unsigned int copylen = copy_to_ring(s, ptr, len);
+
+ s->rsp_offset += copylen;
+
+ /* Have we produced a complete response? */
+ if (s->rsp_offset == XENSTORE_HEADER_SIZE + rsp->len)
+ reset_rsp(s);
+
+ return copylen;
+}
+
static void xen_xenstore_event(void *opaque)
{
XenXenstoreState *s = opaque;
evtchn_port_t port = xen_be_evtchn_pending(s->eh);
+ unsigned int copied_to, copied_from;
+ bool processed, notify = false;
+
if (port != s->be_port) {
return;
}
- printf("xenstore event\n");
+
/* We know this is a no-op. */
xen_be_evtchn_unmask(s->eh, port);
- qemu_hexdump(stdout, "", s->xs, sizeof(*s->xs));
- xen_be_evtchn_notify(s->eh, s->be_port);
+
+ do {
+ copied_to = copied_from = 0;
+ processed = false;
+
+ if (s->rsp_pending)
+ copied_to = put_rsp(s);
+
+ if (!req_pending(s))
+ copied_from = get_req(s);
+
+ if (req_pending(s) && !s->rsp_pending) {
+ process_req(s);
+ processed = true;
+ }
+
+ notify |= copied_to || copied_from;
+ } while (copied_to || copied_from || processed);
+
+ if (notify) {
+ xen_be_evtchn_notify(s->eh, s->be_port);
+ }
}
static void alloc_guest_port(XenXenstoreState *s)
--
2.39.0
next prev parent reply other threads:[~2023-02-01 14:57 UTC|newest]
Thread overview: 104+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-01 14:30 [PATCH v10 00/59] Xen HVM support under KVM David Woodhouse
2023-02-01 14:30 ` [PATCH v10 01/59] include: import Xen public headers to hw/xen/interface David Woodhouse
2023-02-01 14:30 ` [PATCH v10 02/59] xen: add CONFIG_XEN_BUS and CONFIG_XEN_EMU options for Xen emulation David Woodhouse
2023-02-01 14:30 ` [PATCH v10 03/59] xen: Add XEN_DISABLED mode and make it default David Woodhouse
2023-02-01 14:30 ` [PATCH v10 04/59] i386/kvm: Add xen-version KVM accelerator property and init KVM Xen support David Woodhouse
2023-02-01 14:30 ` [PATCH v10 05/59] i386/kvm: handle Xen HVM cpuid leaves David Woodhouse
2023-02-01 14:30 ` [PATCH v10 06/59] i386/hvm: Set Xen vCPU ID in KVM David Woodhouse
2023-02-01 14:30 ` [PATCH v10 07/59] xen-platform: exclude vfio-pci from the PCI platform unplug David Woodhouse
2023-02-01 14:30 ` [PATCH v10 08/59] xen-platform: allow its creation with XEN_EMULATE mode David Woodhouse
2023-02-01 14:30 ` [PATCH v10 09/59] i386/xen: handle guest hypercalls David Woodhouse
2023-02-01 14:30 ` [PATCH v10 10/59] i386/xen: implement HYPERVISOR_xen_version David Woodhouse
2023-02-01 14:31 ` [PATCH v10 11/59] i386/xen: implement HYPERVISOR_sched_op, SCHEDOP_shutdown David Woodhouse
2023-02-01 14:31 ` [PATCH v10 12/59] i386/xen: Implement SCHEDOP_poll and SCHEDOP_yield David Woodhouse
2023-02-01 14:31 ` [PATCH v10 13/59] hw/xen: Add xen_overlay device for emulating shared xenheap pages David Woodhouse
2023-02-01 14:31 ` [PATCH v10 14/59] xen: Permit --xen-domid argument when accel is KVM David Woodhouse
2023-02-01 14:31 ` [PATCH v10 15/59] i386/xen: add pc_machine_kvm_type to initialize XEN_EMULATE mode David Woodhouse
2023-02-01 14:31 ` [PATCH v10 16/59] i386/xen: manage and save/restore Xen guest long_mode setting David Woodhouse
2023-02-01 14:31 ` [PATCH v10 17/59] i386/xen: implement HYPERVISOR_memory_op David Woodhouse
2023-02-01 14:31 ` [PATCH v10 18/59] i386/xen: implement XENMEM_add_to_physmap_batch David Woodhouse
2023-02-01 14:31 ` [PATCH v10 19/59] i386/xen: implement HYPERVISOR_hvm_op David Woodhouse
2023-02-01 14:31 ` [PATCH v10 20/59] i386/xen: implement HYPERVISOR_vcpu_op David Woodhouse
2023-02-01 14:31 ` [PATCH v10 21/59] i386/xen: handle VCPUOP_register_vcpu_info David Woodhouse
2023-02-01 14:31 ` [PATCH v10 22/59] i386/xen: handle VCPUOP_register_vcpu_time_info David Woodhouse
2023-02-01 14:31 ` [PATCH v10 23/59] i386/xen: handle VCPUOP_register_runstate_memory_area David Woodhouse
2023-02-01 14:31 ` [PATCH v10 24/59] i386/xen: implement HYPERVISOR_event_channel_op David Woodhouse
2023-02-01 14:31 ` [PATCH v10 25/59] i386/xen: implement HVMOP_set_evtchn_upcall_vector David Woodhouse
2023-02-01 14:31 ` [PATCH v10 26/59] i386/xen: implement HVMOP_set_param David Woodhouse
2023-02-01 14:31 ` [PATCH v10 27/59] hw/xen: Add xen_evtchn device for event channel emulation David Woodhouse
2023-02-01 14:31 ` [PATCH v10 28/59] i386/xen: Add support for Xen event channel delivery to vCPU David Woodhouse
2023-02-01 14:31 ` [PATCH v10 29/59] hw/xen: Implement EVTCHNOP_status David Woodhouse
2023-02-01 14:31 ` [PATCH v10 30/59] hw/xen: Implement EVTCHNOP_close David Woodhouse
2023-02-10 13:25 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 31/59] hw/xen: Implement EVTCHNOP_unmask David Woodhouse
2023-02-10 13:33 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 32/59] hw/xen: Implement EVTCHNOP_bind_virq David Woodhouse
2023-02-10 13:48 ` Paul Durrant
2023-02-15 13:08 ` David Woodhouse
2023-02-01 14:31 ` [PATCH v10 33/59] hw/xen: Implement EVTCHNOP_bind_ipi David Woodhouse
2023-02-10 13:55 ` Paul Durrant
2023-02-10 14:15 ` David Woodhouse
2023-02-10 14:25 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 34/59] hw/xen: Implement EVTCHNOP_send David Woodhouse
2023-02-10 13:58 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 35/59] hw/xen: Implement EVTCHNOP_alloc_unbound David Woodhouse
2023-02-10 14:13 ` Paul Durrant
2023-02-10 14:17 ` David Woodhouse
2023-02-10 14:28 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 36/59] hw/xen: Implement EVTCHNOP_bind_interdomain David Woodhouse
2023-02-10 14:30 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 37/59] hw/xen: Implement EVTCHNOP_bind_vcpu David Woodhouse
2023-02-10 14:36 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 38/59] hw/xen: Implement EVTCHNOP_reset David Woodhouse
2023-02-10 14:44 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 39/59] i386/xen: add monitor commands to test event injection David Woodhouse
2023-02-10 14:54 ` Paul Durrant
2023-02-10 15:05 ` David Woodhouse
2023-02-10 15:12 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 40/59] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_GSI callback David Woodhouse
2023-02-10 15:24 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 41/59] hw/xen: Support HVM_PARAM_CALLBACK_TYPE_PCI_INTX callback David Woodhouse
2023-02-10 16:47 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 42/59] kvm/i386: Add xen-gnttab-max-frames property David Woodhouse
2023-02-13 15:17 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 43/59] hw/xen: Add xen_gnttab device for grant table emulation David Woodhouse
2023-02-13 15:21 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 44/59] hw/xen: Support mapping grant frames David Woodhouse
2023-02-13 15:31 ` Paul Durrant
2023-02-14 15:35 ` David Woodhouse
2023-02-14 15:40 ` Paul Durrant
2023-02-14 15:41 ` David Woodhouse
2023-02-01 14:31 ` [PATCH v10 45/59] i386/xen: Implement HYPERVISOR_grant_table_op and GNTTABOP_[gs]et_verson David Woodhouse
2023-02-14 9:59 ` Paul Durrant
2023-02-14 15:33 ` David Woodhouse
2023-02-01 14:31 ` [PATCH v10 46/59] hw/xen: Implement GNTTABOP_query_size David Woodhouse
2023-02-14 10:00 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 47/59] i386/xen: handle PV timer hypercalls David Woodhouse
2023-02-14 10:10 ` Paul Durrant
2023-02-14 15:37 ` David Woodhouse
2023-02-15 13:43 ` David Woodhouse
2023-02-01 14:31 ` [PATCH v10 48/59] i386/xen: Reserve Xen special pages for console, xenstore rings David Woodhouse
2023-02-14 15:35 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 49/59] i386/xen: handle HVMOP_get_param David Woodhouse
2023-02-14 15:47 ` Paul Durrant
2023-02-14 15:50 ` David Woodhouse
2023-02-01 14:31 ` [PATCH v10 50/59] hw/xen: Add backend implementation of interdomain event channel support David Woodhouse
2023-02-14 15:53 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 51/59] hw/xen: Add xen_xenstore device for xenstore emulation David Woodhouse
2023-02-14 15:55 ` Paul Durrant
2023-02-01 14:31 ` David Woodhouse [this message]
2023-02-14 16:02 ` [PATCH v10 52/59] hw/xen: Add basic ring handling to xenstore Paul Durrant
2023-02-01 14:31 ` [PATCH v10 53/59] hw/xen: Automatically add xen-platform PCI device for emulated Xen guests David Woodhouse
2023-02-14 16:04 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 54/59] i386/xen: Implement HYPERVISOR_physdev_op David Woodhouse
2023-02-14 16:06 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 55/59] hw/xen: Implement emulated PIRQ hypercall support David Woodhouse
2023-02-14 16:10 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 56/59] hw/xen: Support GSI mapping to PIRQ David Woodhouse
2023-02-14 16:12 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 57/59] hw/xen: Support MSI " David Woodhouse
2023-02-14 16:14 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 58/59] kvm/i386: Add xen-evtchn-max-pirq property David Woodhouse
2023-02-14 16:16 ` Paul Durrant
2023-02-01 14:31 ` [PATCH v10 59/59] i386/xen: Document Xen HVM emulation David Woodhouse
2023-02-14 16:20 ` Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230201143148.1744093-53-dwmw2@infradead.org \
--to=dwmw2@infradead.org \
--cc=alex.bennee@linaro.org \
--cc=ankur.a.arora@oracle.com \
--cc=armbru@redhat.com \
--cc=cfontana@suse.de \
--cc=dgilbert@redhat.com \
--cc=joao.m.martins@oracle.com \
--cc=julien@xen.org \
--cc=marcel.apfelbaum@gmail.com \
--cc=mst@redhat.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).