From: "Edgar E. Iglesias" <edgar.iglesias@gmail.com>
To: qemu-devel@nongnu.org
Cc: sstabellini@kernel.org, anthony@xenproject.org, paul@xen.org,
peter.maydell@linaro.org, edgar.iglesias@amd.com,
xen-devel@lists.xenproject.org,
"Edgar E. Iglesias" <edgar.iglesias@gmail.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Richard Henderson <richard.henderson@linaro.org>,
Eduardo Habkost <eduardo@habkost.net>,
"Michael S. Tsirkin" <mst@redhat.com>,
Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
Subject: [PULL v2 2/5] hw/xen: Expose handle_bufioreq in xen_register_ioreq
Date: Thu, 3 Oct 2024 20:56:51 +0200 [thread overview]
Message-ID: <20241003185655.1480819-3-edgar.iglesias@gmail.com> (raw)
In-Reply-To: <20241003185655.1480819-1-edgar.iglesias@gmail.com>
From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
Expose handle_bufioreq in xen_register_ioreq().
This is to allow machines to enable or disable buffered ioreqs.
No functional change since all callers still set it to
HVM_IOREQSRV_BUFIOREQ_ATOMIC.
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
---
hw/i386/xen/xen-hvm.c | 4 +-
hw/xen/xen-hvm-common.c | 100 ++++++++++++++++++++------------
hw/xen/xen-pvh-common.c | 4 +-
include/hw/xen/xen-hvm-common.h | 3 +
include/hw/xen/xen_native.h | 3 +-
5 files changed, 73 insertions(+), 41 deletions(-)
diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c
index 4f6446600c..d3df488c48 100644
--- a/hw/i386/xen/xen-hvm.c
+++ b/hw/i386/xen/xen-hvm.c
@@ -614,7 +614,9 @@ void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory)
state = g_new0(XenIOState, 1);
- xen_register_ioreq(state, max_cpus, &xen_memory_listener);
+ xen_register_ioreq(state, max_cpus,
+ HVM_IOREQSRV_BUFIOREQ_ATOMIC,
+ &xen_memory_listener);
xen_is_stubdomain = xen_check_stubdomain(state->xenstore);
diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c
index 3a9d6f981b..7d2b72853b 100644
--- a/hw/xen/xen-hvm-common.c
+++ b/hw/xen/xen-hvm-common.c
@@ -667,6 +667,8 @@ static int xen_map_ioreq_server(XenIOState *state)
xen_pfn_t ioreq_pfn;
xen_pfn_t bufioreq_pfn;
evtchn_port_t bufioreq_evtchn;
+ unsigned long num_frames = 1;
+ unsigned long frame = 1;
int rc;
/*
@@ -675,59 +677,78 @@ static int xen_map_ioreq_server(XenIOState *state)
*/
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
+
+ if (state->has_bufioreq) {
+ frame = 0;
+ num_frames = 2;
+ }
state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
XENMEM_resource_ioreq_server,
- state->ioservid, 0, 2,
+ state->ioservid,
+ frame, num_frames,
&addr,
PROT_READ | PROT_WRITE, 0);
if (state->fres != NULL) {
trace_xen_map_resource_ioreq(state->ioservid, addr);
- state->buffered_io_page = addr;
- state->shared_page = addr + XC_PAGE_SIZE;
+ state->shared_page = addr;
+ if (state->has_bufioreq) {
+ state->buffered_io_page = addr;
+ state->shared_page = addr + XC_PAGE_SIZE;
+ }
} else if (errno != EOPNOTSUPP) {
error_report("failed to map ioreq server resources: error %d handle=%p",
errno, xen_xc);
return -1;
}
- rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
- (state->shared_page == NULL) ?
- &ioreq_pfn : NULL,
- (state->buffered_io_page == NULL) ?
- &bufioreq_pfn : NULL,
- &bufioreq_evtchn);
- if (rc < 0) {
- error_report("failed to get ioreq server info: error %d handle=%p",
- errno, xen_xc);
- return rc;
- }
+ /*
+ * If we fail to map the shared page with xenforeignmemory_map_resource()
+ * or if we're using buffered ioreqs, we need xen_get_ioreq_server_info()
+ * to provide the the addresses to map the shared page and/or to get the
+ * event-channel port for buffered ioreqs.
+ */
+ if (state->shared_page == NULL || state->has_bufioreq) {
+ rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
+ (state->shared_page == NULL) ?
+ &ioreq_pfn : NULL,
+ (state->has_bufioreq &&
+ state->buffered_io_page == NULL) ?
+ &bufioreq_pfn : NULL,
+ &bufioreq_evtchn);
+ if (rc < 0) {
+ error_report("failed to get ioreq server info: error %d handle=%p",
+ errno, xen_xc);
+ return rc;
+ }
- if (state->shared_page == NULL) {
- trace_xen_map_ioreq_server_shared_page(ioreq_pfn);
+ if (state->shared_page == NULL) {
+ trace_xen_map_ioreq_server_shared_page(ioreq_pfn);
- state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
- PROT_READ | PROT_WRITE,
- 1, &ioreq_pfn, NULL);
+ state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
+ PROT_READ | PROT_WRITE,
+ 1, &ioreq_pfn, NULL);
+ }
if (state->shared_page == NULL) {
error_report("map shared IO page returned error %d handle=%p",
errno, xen_xc);
}
- }
- if (state->buffered_io_page == NULL) {
- trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);
+ if (state->has_bufioreq && state->buffered_io_page == NULL) {
+ trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);
- state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
- PROT_READ | PROT_WRITE,
- 1, &bufioreq_pfn,
- NULL);
- if (state->buffered_io_page == NULL) {
- error_report("map buffered IO page returned error %d", errno);
- return -1;
+ state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
+ PROT_READ | PROT_WRITE,
+ 1, &bufioreq_pfn,
+ NULL);
+ if (state->buffered_io_page == NULL) {
+ error_report("map buffered IO page returned error %d", errno);
+ return -1;
+ }
}
}
- if (state->shared_page == NULL || state->buffered_io_page == NULL) {
+ if (state->shared_page == NULL ||
+ (state->has_bufioreq && state->buffered_io_page == NULL)) {
return -1;
}
@@ -830,14 +851,15 @@ static void xen_do_ioreq_register(XenIOState *state,
state->ioreq_local_port[i] = rc;
}
- rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
- state->bufioreq_remote_port);
- if (rc == -1) {
- error_report("buffered evtchn bind error %d", errno);
- goto err;
+ if (state->has_bufioreq) {
+ rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
+ state->bufioreq_remote_port);
+ if (rc == -1) {
+ error_report("buffered evtchn bind error %d", errno);
+ goto err;
+ }
+ state->bufioreq_local_port = rc;
}
- state->bufioreq_local_port = rc;
-
/* Init RAM management */
#ifdef XEN_COMPAT_PHYSMAP
xen_map_cache_init(xen_phys_offset_to_gaddr, state);
@@ -865,6 +887,7 @@ err:
}
void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
+ uint8_t handle_bufioreq,
const MemoryListener *xen_memory_listener)
{
int rc;
@@ -883,7 +906,8 @@ void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
goto err;
}
- rc = xen_create_ioreq_server(xen_domid, &state->ioservid);
+ state->has_bufioreq = handle_bufioreq != HVM_IOREQSRV_BUFIOREQ_OFF;
+ rc = xen_create_ioreq_server(xen_domid, handle_bufioreq, &state->ioservid);
if (!rc) {
xen_do_ioreq_register(state, max_cpus, xen_memory_listener);
} else {
diff --git a/hw/xen/xen-pvh-common.c b/hw/xen/xen-pvh-common.c
index 28d7168446..08641fdcec 100644
--- a/hw/xen/xen-pvh-common.c
+++ b/hw/xen/xen-pvh-common.c
@@ -194,7 +194,9 @@ static void xen_pvh_init(MachineState *ms)
}
xen_pvh_init_ram(s, sysmem);
- xen_register_ioreq(&s->ioreq, ms->smp.max_cpus, &xen_memory_listener);
+ xen_register_ioreq(&s->ioreq, ms->smp.max_cpus,
+ HVM_IOREQSRV_BUFIOREQ_ATOMIC,
+ &xen_memory_listener);
if (s->cfg.virtio_mmio_num) {
xen_create_virtio_mmio_devices(s);
diff --git a/include/hw/xen/xen-hvm-common.h b/include/hw/xen/xen-hvm-common.h
index 3d796235dc..0f586c4384 100644
--- a/include/hw/xen/xen-hvm-common.h
+++ b/include/hw/xen/xen-hvm-common.h
@@ -81,6 +81,8 @@ typedef struct XenIOState {
QLIST_HEAD(, XenPciDevice) dev_list;
DeviceListener device_listener;
+ bool has_bufioreq;
+
Notifier exit;
} XenIOState;
@@ -95,6 +97,7 @@ void xen_device_unrealize(DeviceListener *listener, DeviceState *dev);
void xen_hvm_change_state_handler(void *opaque, bool running, RunState rstate);
void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
+ uint8_t handle_bufioreq,
const MemoryListener *xen_memory_listener);
void cpu_ioreq_pio(ioreq_t *req);
diff --git a/include/hw/xen/xen_native.h b/include/hw/xen/xen_native.h
index 1a5ad693a4..5caf91a616 100644
--- a/include/hw/xen/xen_native.h
+++ b/include/hw/xen/xen_native.h
@@ -464,10 +464,11 @@ static inline void xen_unmap_pcidev(domid_t dom,
}
static inline int xen_create_ioreq_server(domid_t dom,
+ int handle_bufioreq,
ioservid_t *ioservid)
{
int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
- HVM_IOREQSRV_BUFIOREQ_ATOMIC,
+ handle_bufioreq,
ioservid);
if (rc == 0) {
--
2.43.0
next prev parent reply other threads:[~2024-10-03 18:58 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-03 18:56 [PULL v2 0/5] Xen Queue Edgar E. Iglesias
2024-10-03 18:56 ` [PULL v2 1/5] hw/xen: Remove deadcode Edgar E. Iglesias
2024-10-03 18:56 ` Edgar E. Iglesias [this message]
2024-10-07 15:42 ` [PULL v2 2/5] hw/xen: Expose handle_bufioreq in xen_register_ioreq Peter Maydell
2024-10-08 16:15 ` Edgar E. Iglesias
2024-10-03 18:56 ` [PULL v2 3/5] hw/xen: xenpvh: Disable buffered IOREQs for ARM Edgar E. Iglesias
2024-10-03 18:56 ` [PULL v2 4/5] hw/xen: xenpvh: Add pci-intx-irq-base property Edgar E. Iglesias
2024-10-03 18:56 ` [PULL v2 5/5] hw/arm: xenpvh: Enable PCI for ARM PVH Edgar E. Iglesias
2024-10-04 16:07 ` [PULL v2 0/5] Xen Queue Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241003185655.1480819-3-edgar.iglesias@gmail.com \
--to=edgar.iglesias@gmail.com \
--cc=anthony@xenproject.org \
--cc=edgar.iglesias@amd.com \
--cc=eduardo@habkost.net \
--cc=marcel.apfelbaum@gmail.com \
--cc=mst@redhat.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
--cc=sstabellini@kernel.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).