From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: "Stefan Hajnoczi" <stefanha@redhat.com>,
"Kevin Wolf" <kwolf@redhat.com>, "Fam Zheng" <fam@euphon.net>,
"Hanna Reitz" <hreitz@redhat.com>,
qemu-block@nongnu.org, qemu-s390x@nongnu.org,
"Thomas Huth" <thuth@redhat.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Farhan Ali" <alifm@linux.ibm.com>
Subject: [PULL 2/3] include: Add a header to define host PCI MMIO functions
Date: Thu, 8 May 2025 10:22:33 -0400 [thread overview]
Message-ID: <20250508142234.44974-3-stefanha@redhat.com> (raw)
In-Reply-To: <20250508142234.44974-1-stefanha@redhat.com>
From: Farhan Ali <alifm@linux.ibm.com>
Add a generic API for host PCI MMIO reads/writes
(e.g. Linux VFIO BAR accesses). The functions access
little endian memory and returns the result in
host cpu endianness.
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Farhan Ali <alifm@linux.ibm.com>
Reviewed-by: Thomas Huth <thuth@redhat.com>
Message-id: 20250430185012.2303-3-alifm@linux.ibm.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
include/qemu/host-pci-mmio.h | 136 +++++++++++++++++++++++++++++++++++
1 file changed, 136 insertions(+)
create mode 100644 include/qemu/host-pci-mmio.h
diff --git a/include/qemu/host-pci-mmio.h b/include/qemu/host-pci-mmio.h
new file mode 100644
index 0000000000..a8ed9938ac
--- /dev/null
+++ b/include/qemu/host-pci-mmio.h
@@ -0,0 +1,136 @@
+/*
+ * API for host PCI MMIO accesses (e.g. Linux VFIO BARs)
+ *
+ * Copyright 2025 IBM Corp.
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HOST_PCI_MMIO_H
+#define HOST_PCI_MMIO_H
+
+#include "qemu/bswap.h"
+#include "qemu/s390x_pci_mmio.h"
+
+static inline uint8_t host_pci_ldub_p(const void *ioaddr)
+{
+ uint8_t ret = 0;
+#ifdef __s390x__
+ ret = s390x_pci_mmio_read_8(ioaddr);
+#else
+ ret = ldub_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline uint16_t host_pci_lduw_le_p(const void *ioaddr)
+{
+ uint16_t ret = 0;
+#ifdef __s390x__
+ ret = le16_to_cpu(s390x_pci_mmio_read_16(ioaddr));
+#else
+ ret = lduw_le_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline uint32_t host_pci_ldl_le_p(const void *ioaddr)
+{
+ uint32_t ret = 0;
+#ifdef __s390x__
+ ret = le32_to_cpu(s390x_pci_mmio_read_32(ioaddr));
+#else
+ ret = ldl_le_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline uint64_t host_pci_ldq_le_p(const void *ioaddr)
+{
+ uint64_t ret = 0;
+#ifdef __s390x__
+ ret = le64_to_cpu(s390x_pci_mmio_read_64(ioaddr));
+#else
+ ret = ldq_le_p(ioaddr);
+#endif
+
+ return ret;
+}
+
+static inline void host_pci_stb_p(void *ioaddr, uint8_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_8(ioaddr, val);
+#else
+ stb_p(ioaddr, val);
+#endif
+}
+
+static inline void host_pci_stw_le_p(void *ioaddr, uint16_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_16(ioaddr, cpu_to_le16(val));
+#else
+ stw_le_p(ioaddr, val);
+#endif
+}
+
+static inline void host_pci_stl_le_p(void *ioaddr, uint32_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_32(ioaddr, cpu_to_le32(val));
+#else
+ stl_le_p(ioaddr, val);
+#endif
+}
+
+static inline void host_pci_stq_le_p(void *ioaddr, uint64_t val)
+{
+#ifdef __s390x__
+ s390x_pci_mmio_write_64(ioaddr, cpu_to_le64(val));
+#else
+ stq_le_p(ioaddr, val);
+#endif
+}
+
+static inline uint64_t host_pci_ldn_le_p(const void *ioaddr, int sz)
+{
+ switch (sz) {
+ case 1:
+ return host_pci_ldub_p(ioaddr);
+ case 2:
+ return host_pci_lduw_le_p(ioaddr);
+ case 4:
+ return host_pci_ldl_le_p(ioaddr);
+ case 8:
+ return host_pci_ldq_le_p(ioaddr);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline void host_pci_stn_le_p(void *ioaddr, int sz, uint64_t v)
+{
+ switch (sz) {
+ case 1:
+ host_pci_stb_p(ioaddr, v);
+ break;
+ case 2:
+ host_pci_stw_le_p(ioaddr, v);
+ break;
+ case 4:
+ host_pci_stl_le_p(ioaddr, v);
+ break;
+ case 8:
+ host_pci_stq_le_p(ioaddr, v);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+#endif
--
2.49.0
next prev parent reply other threads:[~2025-05-08 14:23 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-08 14:22 [PULL 0/3] Block patches Stefan Hajnoczi
2025-05-08 14:22 ` [PULL 1/3] util: Add functions for s390x mmio read/write Stefan Hajnoczi
2025-05-08 14:22 ` Stefan Hajnoczi [this message]
2025-05-08 14:22 ` [PULL 3/3] block/nvme: Use host PCI MMIO API Stefan Hajnoczi
2025-05-10 18:36 ` [PULL 0/3] Block patches Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250508142234.44974-3-stefanha@redhat.com \
--to=stefanha@redhat.com \
--cc=alifm@linux.ibm.com \
--cc=fam@euphon.net \
--cc=hreitz@redhat.com \
--cc=kwolf@redhat.com \
--cc=philmd@linaro.org \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-s390x@nongnu.org \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).