From: David Woodhouse <dwmw2@infradead.org>
To: Peter Maydell <peter.maydell@linaro.org>
Cc: qemu-devel@nongnu.org, Paolo Bonzini <pbonzini@redhat.com>,
Paul Durrant <paul@xen.org>,
Joao Martins <joao.m.martins@oracle.com>,
Ankur Arora <ankur.a.arora@oracle.com>,
Stefano Stabellini <sstabellini@kernel.org>,
vikram.garhwal@amd.com,
Anthony Perard <anthony.perard@citrix.com>,
xen-devel@lists.xenproject.org,
Juan Quintela <quintela@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>
Subject: [PULL 15/27] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers
Date: Tue, 7 Mar 2023 18:26:55 +0000 [thread overview]
Message-ID: <20230307182707.2298618-16-dwmw2@infradead.org> (raw)
In-Reply-To: <20230307182707.2298618-1-dwmw2@infradead.org>
From: David Woodhouse <dwmw@amazon.co.uk>
XC_PAGE_SIZE comes from the actual Xen libraries, while XEN_PAGE_SIZE is
provided by QEMU itself in xen_backend_ops.h. For backends which may be
built for emulation mode, use the latter.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
---
hw/block/dataplane/xen-block.c | 8 ++++----
hw/display/xenfb.c | 12 ++++++------
hw/net/xen_nic.c | 12 ++++++------
hw/usb/xen-usb.c | 8 ++++----
4 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c
index e55b713002..8322a1de82 100644
--- a/hw/block/dataplane/xen-block.c
+++ b/hw/block/dataplane/xen-block.c
@@ -101,9 +101,9 @@ static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane)
* re-use requests, allocate the memory once here. It will be freed
* xen_block_dataplane_destroy() when the request list is freed.
*/
- request->buf = qemu_memalign(XC_PAGE_SIZE,
+ request->buf = qemu_memalign(XEN_PAGE_SIZE,
BLKIF_MAX_SEGMENTS_PER_REQUEST *
- XC_PAGE_SIZE);
+ XEN_PAGE_SIZE);
dataplane->requests_total++;
qemu_iovec_init(&request->v, 1);
} else {
@@ -185,7 +185,7 @@ static int xen_block_parse_request(XenBlockRequest *request)
goto err;
}
if (request->req.seg[i].last_sect * dataplane->sector_size >=
- XC_PAGE_SIZE) {
+ XEN_PAGE_SIZE) {
error_report("error: page crossing");
goto err;
}
@@ -740,7 +740,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
dataplane->protocol = protocol;
- ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref;
+ ring_size = XEN_PAGE_SIZE * dataplane->nr_ring_ref;
switch (dataplane->protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 2c4016fcbd..0074a9b6f8 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -489,13 +489,13 @@ static int xenfb_map_fb(struct XenFB *xenfb)
}
if (xenfb->pixels) {
- munmap(xenfb->pixels, xenfb->fbpages * XC_PAGE_SIZE);
+ munmap(xenfb->pixels, xenfb->fbpages * XEN_PAGE_SIZE);
xenfb->pixels = NULL;
}
- xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XC_PAGE_SIZE);
+ xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XEN_PAGE_SIZE);
n_fbdirs = xenfb->fbpages * mode / 8;
- n_fbdirs = DIV_ROUND_UP(n_fbdirs, XC_PAGE_SIZE);
+ n_fbdirs = DIV_ROUND_UP(n_fbdirs, XEN_PAGE_SIZE);
pgmfns = g_new0(xen_pfn_t, n_fbdirs);
fbmfns = g_new0(xen_pfn_t, xenfb->fbpages);
@@ -528,8 +528,8 @@ static int xenfb_configure_fb(struct XenFB *xenfb, size_t fb_len_lim,
{
size_t mfn_sz = sizeof_field(struct xenfb_page, pd[0]);
size_t pd_len = sizeof_field(struct xenfb_page, pd) / mfn_sz;
- size_t fb_pages = pd_len * XC_PAGE_SIZE / mfn_sz;
- size_t fb_len_max = fb_pages * XC_PAGE_SIZE;
+ size_t fb_pages = pd_len * XEN_PAGE_SIZE / mfn_sz;
+ size_t fb_len_max = fb_pages * XEN_PAGE_SIZE;
int max_width, max_height;
if (fb_len_lim > fb_len_max) {
@@ -930,7 +930,7 @@ static void fb_disconnect(struct XenLegacyDevice *xendev)
* instead. This releases the guest pages and keeps qemu happy.
*/
qemu_xen_foreignmem_unmap(fb->pixels, fb->fbpages);
- fb->pixels = mmap(fb->pixels, fb->fbpages * XC_PAGE_SIZE,
+ fb->pixels = mmap(fb->pixels, fb->fbpages * XEN_PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON,
-1, 0);
if (fb->pixels == MAP_FAILED) {
diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c
index 166d03787d..9bbf6599fc 100644
--- a/hw/net/xen_nic.c
+++ b/hw/net/xen_nic.c
@@ -145,7 +145,7 @@ static void net_tx_packets(struct XenNetDev *netdev)
continue;
}
- if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
+ if ((txreq.offset + txreq.size) > XEN_PAGE_SIZE) {
xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n");
net_tx_error(netdev, &txreq, rc);
continue;
@@ -171,7 +171,7 @@ static void net_tx_packets(struct XenNetDev *netdev)
if (txreq.flags & NETTXF_csum_blank) {
/* have read-only mapping -> can't fill checksum in-place */
if (!tmpbuf) {
- tmpbuf = g_malloc(XC_PAGE_SIZE);
+ tmpbuf = g_malloc(XEN_PAGE_SIZE);
}
memcpy(tmpbuf, page + txreq.offset, txreq.size);
net_checksum_calculate(tmpbuf, txreq.size, CSUM_ALL);
@@ -243,9 +243,9 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
return 0;
}
- if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
+ if (size > XEN_PAGE_SIZE - NET_IP_ALIGN) {
xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
- (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
+ (unsigned long)size, XEN_PAGE_SIZE - NET_IP_ALIGN);
return -1;
}
@@ -348,8 +348,8 @@ static int net_connect(struct XenLegacyDevice *xendev)
netdev->txs = NULL;
return -1;
}
- BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE);
- BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE);
+ BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XEN_PAGE_SIZE);
+ BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XEN_PAGE_SIZE);
xen_be_bind_evtchn(&netdev->xendev);
diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
index a770a64cb4..66cb3f7c24 100644
--- a/hw/usb/xen-usb.c
+++ b/hw/usb/xen-usb.c
@@ -161,7 +161,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
for (i = 0; i < nr_segs; i++) {
if ((unsigned)usbback_req->req.seg[i].offset +
- (unsigned)usbback_req->req.seg[i].length > XC_PAGE_SIZE) {
+ (unsigned)usbback_req->req.seg[i].length > XEN_PAGE_SIZE) {
xen_pv_printf(xendev, 0, "segment crosses page boundary\n");
return -EINVAL;
}
@@ -185,7 +185,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
for (i = 0; i < usbback_req->nr_buffer_segs; i++) {
seg = usbback_req->req.seg + i;
- addr = usbback_req->buffer + i * XC_PAGE_SIZE + seg->offset;
+ addr = usbback_req->buffer + i * XEN_PAGE_SIZE + seg->offset;
qemu_iovec_add(&usbback_req->packet.iov, addr, seg->length);
}
}
@@ -902,8 +902,8 @@ static int usbback_connect(struct XenLegacyDevice *xendev)
usbif->conn_ring_ref = conn_ring_ref;
urb_sring = usbif->urb_sring;
conn_sring = usbif->conn_sring;
- BACK_RING_INIT(&usbif->urb_ring, urb_sring, XC_PAGE_SIZE);
- BACK_RING_INIT(&usbif->conn_ring, conn_sring, XC_PAGE_SIZE);
+ BACK_RING_INIT(&usbif->urb_ring, urb_sring, XEN_PAGE_SIZE);
+ BACK_RING_INIT(&usbif->conn_ring, conn_sring, XEN_PAGE_SIZE);
xen_be_bind_evtchn(xendev);
--
2.39.0
next prev parent reply other threads:[~2023-03-07 18:30 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-07 18:26 [PULL 00/27] Enable PV backends with Xen/KVM emulation David Woodhouse
2023-03-07 18:26 ` [PULL 01/27] hw/xen: Add xenstore wire implementation and implementation stubs David Woodhouse
2023-03-07 18:26 ` [PULL 02/27] hw/xen: Add basic XenStore tree walk and write/read/directory support David Woodhouse
2023-03-07 18:26 ` [PULL 03/27] hw/xen: Implement XenStore watches David Woodhouse
2023-03-07 18:26 ` [PULL 04/27] hw/xen: Implement XenStore transactions David Woodhouse
2023-03-07 18:26 ` [PULL 05/27] hw/xen: Watches on " David Woodhouse
2023-05-02 17:08 ` Peter Maydell
2023-06-02 17:06 ` Peter Maydell
2023-06-20 12:19 ` Peter Maydell
2023-06-20 17:57 ` David Woodhouse
2023-06-20 17:58 ` [PATCH] hw/xen: Clarify (lack of) error handling in transaction_commit() David Woodhouse
2023-07-26 9:23 ` Paul Durrant
2023-03-07 18:26 ` [PULL 06/27] hw/xen: Implement XenStore permissions David Woodhouse
2023-03-07 18:26 ` [PULL 07/27] hw/xen: Implement core serialize/deserialize methods for xenstore_impl David Woodhouse
2023-03-07 18:26 ` [PULL 08/27] hw/xen: Create initial XenStore nodes David Woodhouse
2023-03-07 18:26 ` [PULL 09/27] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-03-07 18:26 ` [PULL 10/27] hw/xen: Add gnttab " David Woodhouse
2023-03-07 18:26 ` [PULL 11/27] hw/xen: Pass grant ref to gnttab unmap operation David Woodhouse
2023-03-07 18:26 ` [PULL 12/27] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-03-07 18:26 ` [PULL 13/27] hw/xen: Add xenstore " David Woodhouse
2023-03-12 19:19 ` Jason Andryuk
2023-03-13 8:45 ` David Woodhouse
2023-03-13 23:17 ` Jason Andryuk
2023-03-14 8:32 ` David Woodhouse
2023-03-14 8:35 ` [PATCH] accel/xen: Fix DM state change notification in dm_restrict mode David Woodhouse
2023-03-14 9:05 ` Paul Durrant
2023-03-14 10:49 ` Jason Andryuk
2023-04-04 17:35 ` [PULL 13/27] hw/xen: Add xenstore operations to allow redirection to internal emulation Peter Maydell
2023-04-04 17:45 ` David Woodhouse
2023-04-04 17:45 ` Peter Maydell
2023-04-04 18:21 ` David Woodhouse
2023-03-07 18:26 ` [PULL 14/27] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-04-06 15:18 ` Peter Maydell
2023-03-07 18:26 ` David Woodhouse [this message]
2023-03-07 18:26 ` [PULL 16/27] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-03-07 18:26 ` [PULL 17/27] hw/xen: Build PV backend drivers for CONFIG_XEN_BUS David Woodhouse
2023-03-07 18:26 ` [PULL 18/27] hw/xen: Avoid crash when backend watch fires too early David Woodhouse
2023-03-07 18:26 ` [PULL 19/27] hw/xen: Only advertise ring-page-order for xen-block if gnttab supports it David Woodhouse
2023-03-07 18:27 ` [PULL 20/27] hw/xen: Hook up emulated implementation for event channel operations David Woodhouse
2023-03-07 18:27 ` [PULL 21/27] hw/xen: Add emulated implementation of grant table operations David Woodhouse
2023-03-07 18:27 ` [PULL 22/27] hw/xen: Add emulated implementation of XenStore operations David Woodhouse
2023-04-11 17:47 ` Peter Maydell
2023-04-11 18:07 ` Peter Maydell
2023-04-12 18:22 ` David Woodhouse
2023-04-12 18:53 ` Peter Maydell
2023-03-07 18:27 ` [PULL 23/27] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-03-07 18:27 ` [PULL 24/27] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-03-07 18:27 ` [PULL 25/27] i386/xen: Initialize Xen backends from pc_basic_device_init() for emulation David Woodhouse
2023-03-07 18:27 ` [PULL 26/27] MAINTAINERS: Add entry for Xen on KVM emulation David Woodhouse
2023-03-07 18:27 ` [PULL 27/27] docs: Update Xen-on-KVM documentation for PV disk support David Woodhouse
2023-03-07 20:20 ` [PULL 00/27] Enable PV backends with Xen/KVM emulation Philippe Mathieu-Daudé
2023-03-07 22:34 ` David Woodhouse
2023-03-07 23:26 ` Philippe Mathieu-Daudé
2023-03-09 15:18 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230307182707.2298618-16-dwmw2@infradead.org \
--to=dwmw2@infradead.org \
--cc=ankur.a.arora@oracle.com \
--cc=anthony.perard@citrix.com \
--cc=dgilbert@redhat.com \
--cc=joao.m.martins@oracle.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=quintela@redhat.com \
--cc=sstabellini@kernel.org \
--cc=vikram.garhwal@amd.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).