From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: Paolo Bonzini <pbonzini@redhat.com>, Paul Durrant <paul@xen.org>,
Joao Martins <joao.m.martins@oracle.com>,
Ankur Arora <ankur.a.arora@oracle.com>,
Stefano Stabellini <sstabellini@kernel.org>,
vikram.garhwal@amd.com,
Anthony Perard <anthony.perard@citrix.com>,
xen-devel@lists.xenproject.org
Subject: [RFC PATCH v1 15/25] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers
Date: Thu, 2 Mar 2023 15:34:25 +0000 [thread overview]
Message-ID: <20230302153435.1170111-16-dwmw2@infradead.org> (raw)
In-Reply-To: <20230302153435.1170111-1-dwmw2@infradead.org>
From: David Woodhouse <dwmw@amazon.co.uk>
XC_PAGE_SIZE comes from the actual Xen libraries, while XEN_PAGE_SIZE is
provided by QEMU itself in xen_backend_ops.h. For backends which may be
built for emulation mode, use the latter.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
---
hw/block/dataplane/xen-block.c | 8 ++++----
hw/display/xenfb.c | 12 ++++++------
hw/net/xen_nic.c | 12 ++++++------
hw/usb/xen-usb.c | 8 ++++----
4 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c
index e55b713002..8322a1de82 100644
--- a/hw/block/dataplane/xen-block.c
+++ b/hw/block/dataplane/xen-block.c
@@ -101,9 +101,9 @@ static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane)
* re-use requests, allocate the memory once here. It will be freed
* xen_block_dataplane_destroy() when the request list is freed.
*/
- request->buf = qemu_memalign(XC_PAGE_SIZE,
+ request->buf = qemu_memalign(XEN_PAGE_SIZE,
BLKIF_MAX_SEGMENTS_PER_REQUEST *
- XC_PAGE_SIZE);
+ XEN_PAGE_SIZE);
dataplane->requests_total++;
qemu_iovec_init(&request->v, 1);
} else {
@@ -185,7 +185,7 @@ static int xen_block_parse_request(XenBlockRequest *request)
goto err;
}
if (request->req.seg[i].last_sect * dataplane->sector_size >=
- XC_PAGE_SIZE) {
+ XEN_PAGE_SIZE) {
error_report("error: page crossing");
goto err;
}
@@ -740,7 +740,7 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
dataplane->protocol = protocol;
- ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref;
+ ring_size = XEN_PAGE_SIZE * dataplane->nr_ring_ref;
switch (dataplane->protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 2c4016fcbd..0074a9b6f8 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -489,13 +489,13 @@ static int xenfb_map_fb(struct XenFB *xenfb)
}
if (xenfb->pixels) {
- munmap(xenfb->pixels, xenfb->fbpages * XC_PAGE_SIZE);
+ munmap(xenfb->pixels, xenfb->fbpages * XEN_PAGE_SIZE);
xenfb->pixels = NULL;
}
- xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XC_PAGE_SIZE);
+ xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XEN_PAGE_SIZE);
n_fbdirs = xenfb->fbpages * mode / 8;
- n_fbdirs = DIV_ROUND_UP(n_fbdirs, XC_PAGE_SIZE);
+ n_fbdirs = DIV_ROUND_UP(n_fbdirs, XEN_PAGE_SIZE);
pgmfns = g_new0(xen_pfn_t, n_fbdirs);
fbmfns = g_new0(xen_pfn_t, xenfb->fbpages);
@@ -528,8 +528,8 @@ static int xenfb_configure_fb(struct XenFB *xenfb, size_t fb_len_lim,
{
size_t mfn_sz = sizeof_field(struct xenfb_page, pd[0]);
size_t pd_len = sizeof_field(struct xenfb_page, pd) / mfn_sz;
- size_t fb_pages = pd_len * XC_PAGE_SIZE / mfn_sz;
- size_t fb_len_max = fb_pages * XC_PAGE_SIZE;
+ size_t fb_pages = pd_len * XEN_PAGE_SIZE / mfn_sz;
+ size_t fb_len_max = fb_pages * XEN_PAGE_SIZE;
int max_width, max_height;
if (fb_len_lim > fb_len_max) {
@@ -930,7 +930,7 @@ static void fb_disconnect(struct XenLegacyDevice *xendev)
* instead. This releases the guest pages and keeps qemu happy.
*/
qemu_xen_foreignmem_unmap(fb->pixels, fb->fbpages);
- fb->pixels = mmap(fb->pixels, fb->fbpages * XC_PAGE_SIZE,
+ fb->pixels = mmap(fb->pixels, fb->fbpages * XEN_PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON,
-1, 0);
if (fb->pixels == MAP_FAILED) {
diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c
index 166d03787d..9bbf6599fc 100644
--- a/hw/net/xen_nic.c
+++ b/hw/net/xen_nic.c
@@ -145,7 +145,7 @@ static void net_tx_packets(struct XenNetDev *netdev)
continue;
}
- if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
+ if ((txreq.offset + txreq.size) > XEN_PAGE_SIZE) {
xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n");
net_tx_error(netdev, &txreq, rc);
continue;
@@ -171,7 +171,7 @@ static void net_tx_packets(struct XenNetDev *netdev)
if (txreq.flags & NETTXF_csum_blank) {
/* have read-only mapping -> can't fill checksum in-place */
if (!tmpbuf) {
- tmpbuf = g_malloc(XC_PAGE_SIZE);
+ tmpbuf = g_malloc(XEN_PAGE_SIZE);
}
memcpy(tmpbuf, page + txreq.offset, txreq.size);
net_checksum_calculate(tmpbuf, txreq.size, CSUM_ALL);
@@ -243,9 +243,9 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size
if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
return 0;
}
- if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
+ if (size > XEN_PAGE_SIZE - NET_IP_ALIGN) {
xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
- (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
+ (unsigned long)size, XEN_PAGE_SIZE - NET_IP_ALIGN);
return -1;
}
@@ -348,8 +348,8 @@ static int net_connect(struct XenLegacyDevice *xendev)
netdev->txs = NULL;
return -1;
}
- BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE);
- BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE);
+ BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XEN_PAGE_SIZE);
+ BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XEN_PAGE_SIZE);
xen_be_bind_evtchn(&netdev->xendev);
diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c
index a770a64cb4..66cb3f7c24 100644
--- a/hw/usb/xen-usb.c
+++ b/hw/usb/xen-usb.c
@@ -161,7 +161,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
for (i = 0; i < nr_segs; i++) {
if ((unsigned)usbback_req->req.seg[i].offset +
- (unsigned)usbback_req->req.seg[i].length > XC_PAGE_SIZE) {
+ (unsigned)usbback_req->req.seg[i].length > XEN_PAGE_SIZE) {
xen_pv_printf(xendev, 0, "segment crosses page boundary\n");
return -EINVAL;
}
@@ -185,7 +185,7 @@ static int usbback_gnttab_map(struct usbback_req *usbback_req)
for (i = 0; i < usbback_req->nr_buffer_segs; i++) {
seg = usbback_req->req.seg + i;
- addr = usbback_req->buffer + i * XC_PAGE_SIZE + seg->offset;
+ addr = usbback_req->buffer + i * XEN_PAGE_SIZE + seg->offset;
qemu_iovec_add(&usbback_req->packet.iov, addr, seg->length);
}
}
@@ -902,8 +902,8 @@ static int usbback_connect(struct XenLegacyDevice *xendev)
usbif->conn_ring_ref = conn_ring_ref;
urb_sring = usbif->urb_sring;
conn_sring = usbif->conn_sring;
- BACK_RING_INIT(&usbif->urb_ring, urb_sring, XC_PAGE_SIZE);
- BACK_RING_INIT(&usbif->conn_ring, conn_sring, XC_PAGE_SIZE);
+ BACK_RING_INIT(&usbif->urb_ring, urb_sring, XEN_PAGE_SIZE);
+ BACK_RING_INIT(&usbif->conn_ring, conn_sring, XEN_PAGE_SIZE);
xen_be_bind_evtchn(xendev);
--
2.39.0
next prev parent reply other threads:[~2023-03-02 15:35 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-02 15:34 [RFC PATCH v1 00/25] Enable PV backends with Xen/KVM emulation David Woodhouse
2023-03-02 15:34 ` [RFC PATCH v1 01/25] hw/xen: Add xenstore wire implementation and implementation stubs David Woodhouse
2023-03-07 10:55 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 02/25] hw/xen: Add basic XenStore tree walk and write/read/directory support David Woodhouse
2023-03-07 11:14 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 03/25] hw/xen: Implement XenStore watches David Woodhouse
2023-03-07 11:29 ` Paul Durrant
2023-03-07 12:20 ` David Woodhouse
2023-03-02 15:34 ` [RFC PATCH v1 04/25] hw/xen: Implement XenStore transactions David Woodhouse
2023-03-07 13:16 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 05/25] hw/xen: Watches on " David Woodhouse
2023-03-07 13:32 ` Paul Durrant
2023-03-07 13:37 ` David Woodhouse
2023-03-02 15:34 ` [RFC PATCH v1 06/25] hw/xen: Implement XenStore permissions David Woodhouse
2023-03-07 13:40 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 07/25] hw/xen: Implement core serialize/deserialize methods for xenstore_impl David Woodhouse
2023-03-07 16:33 ` David Woodhouse
2023-03-07 16:39 ` Paul Durrant
2023-03-07 16:45 ` Paul Durrant
2023-03-07 16:52 ` David Woodhouse
2023-03-07 16:59 ` Paul Durrant
2023-03-07 17:00 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 08/25] hw/xen: Create initial XenStore nodes David Woodhouse
2023-03-07 13:52 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 09/25] hw/xen: Add evtchn operations to allow redirection to internal emulation David Woodhouse
2023-03-07 14:04 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 10/25] hw/xen: Add gnttab " David Woodhouse
2023-03-07 14:22 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 11/25] hw/xen: Pass grant ref to gnttab unmap operation David Woodhouse
2023-03-07 14:30 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 12/25] hw/xen: Add foreignmem operations to allow redirection to internal emulation David Woodhouse
2023-03-07 14:40 ` Paul Durrant
2023-03-07 14:48 ` David Woodhouse
2023-03-07 14:54 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 13/25] hw/xen: Add xenstore " David Woodhouse
2023-03-07 14:44 ` Paul Durrant
2023-03-07 14:52 ` David Woodhouse
2023-03-07 14:55 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 14/25] hw/xen: Move xenstore_store_pv_console_info to xen_console.c David Woodhouse
2023-03-07 14:47 ` Paul Durrant
2023-03-02 15:34 ` David Woodhouse [this message]
2023-03-07 14:48 ` [RFC PATCH v1 15/25] hw/xen: Use XEN_PAGE_SIZE in PV backend drivers Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 16/25] hw/xen: Rename xen_common.h to xen_native.h David Woodhouse
2023-03-07 14:58 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 17/25] hw/xen: Build PV backend drivers for CONFIG_XEN_BUS David Woodhouse
2023-03-07 15:42 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 18/25] hw/xen: Avoid crash when backend watch fires too early David Woodhouse
2023-03-07 15:43 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 19/25] hw/xen: Only advertise ring-page-order for xen-block if gnttab supports it David Woodhouse
2023-03-07 15:48 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 20/25] hw/xen: Hook up emulated implementation for event channel operations David Woodhouse
2023-03-07 15:50 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 21/25] hw/xen: Add emulated implementation of grant table operations David Woodhouse
2023-03-07 16:07 ` Paul Durrant
2023-03-07 16:16 ` David Woodhouse
2023-03-02 15:34 ` [RFC PATCH v1 22/25] hw/xen: Add emulated implementation of XenStore operations David Woodhouse
2023-03-07 16:21 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 23/25] hw/xen: Map guest XENSTORE_PFN grant in emulated Xenstore David Woodhouse
2023-03-07 16:26 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 24/25] hw/xen: Implement soft reset for emulated gnttab David Woodhouse
2023-03-07 16:29 ` Paul Durrant
2023-03-02 15:34 ` [RFC PATCH v1 25/25] i386/xen: Initialize Xen backends from pc_basic_device_init() for emulation David Woodhouse
2023-03-07 16:31 ` Paul Durrant
2023-03-07 16:21 ` [RFC PATCH v1 26/25] MAINTAINERS: Add entry for Xen on KVM emulation David Woodhouse
2023-03-07 16:32 ` Paul Durrant
2023-03-07 16:22 ` [RFC PATCH v1 27/25] docs: Update Xen-on-KVM documentation for PV disk support David Woodhouse
2023-03-07 16:33 ` Paul Durrant
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230302153435.1170111-16-dwmw2@infradead.org \
--to=dwmw2@infradead.org \
--cc=ankur.a.arora@oracle.com \
--cc=anthony.perard@citrix.com \
--cc=joao.m.martins@oracle.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sstabellini@kernel.org \
--cc=vikram.garhwal@amd.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).