From: Shunsuke Mie <mie@igel.co.jp>
To: "Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>,
Rusty Russell <rusty@rustcorp.com.au>
Cc: kvm@vger.kernel.org, virtualization@lists.linux-foundation.org,
netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
Shunsuke Mie <mie@igel.co.jp>
Subject: [RFC PATCH v2 7/7] vringh: IOMEM support
Date: Thu, 2 Feb 2023 18:09:34 +0900 [thread overview]
Message-ID: <20230202090934.549556-8-mie@igel.co.jp> (raw)
In-Reply-To: <20230202090934.549556-1-mie@igel.co.jp>
This patch introduces the new memory accessor for vringh. It is able to
use vringh to virtio rings located on iomemory region.
Signed-off-by: Shunsuke Mie <mie@igel.co.jp>
---
drivers/vhost/Kconfig | 6 ++++
drivers/vhost/vringh.c | 76 ++++++++++++++++++++++++++++++++++++++++++
include/linux/vringh.h | 8 +++++
3 files changed, 90 insertions(+)
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 587fbae06182..a79a4efbc817 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -6,6 +6,12 @@ config VHOST_IOTLB
This option is selected by any driver which needs to support
an IOMMU in software.
+config VHOST_IOMEM
+ tristate
+ select VHOST_RING
+ help
+ Generic IOMEM implementation for vhost and vringh.
+
config VHOST_RING
tristate
select VHOST_IOTLB
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 46fb315483ed..e3d9c7281ad0 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -18,6 +18,9 @@
#include <linux/highmem.h>
#include <linux/vhost_iotlb.h>
#endif
+#if IS_REACHABLE(CONFIG_VHOST_IOMEM)
+#include <linux/io.h>
+#endif
#include <uapi/linux/virtio_config.h>
static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
@@ -1165,4 +1168,77 @@ EXPORT_SYMBOL(vringh_set_iotlb);
#endif
+#if IS_REACHABLE(CONFIG_VHOST_IOMEM)
+
+/* io-memory space access helpers. */
+static int getu16_iomem(const struct vringh *vrh, u16 *val, const __virtio16 *p)
+{
+ *val = vringh16_to_cpu(vrh, ioread16(p));
+ return 0;
+}
+
+static int putu16_iomem(const struct vringh *vrh, __virtio16 *p, u16 val)
+{
+ iowrite16(cpu_to_vringh16(vrh, val), p);
+ return 0;
+}
+
+static int copydesc_iomem(const struct vringh *vrh, void *dst, const void *src,
+ size_t len)
+{
+ memcpy_fromio(dst, src, len);
+ return 0;
+}
+
+static int putused_iomem(const struct vringh *vrh, struct vring_used_elem *dst,
+ const struct vring_used_elem *src, unsigned int num)
+{
+ memcpy_toio(dst, src, num * sizeof(*dst));
+ return 0;
+}
+
+static int xfer_from_iomem(const struct vringh *vrh, void *src, void *dst,
+ size_t len)
+{
+ memcpy_fromio(dst, src, len);
+ return 0;
+}
+
+static int xfer_to_iomem(const struct vringh *vrh, void *dst, void *src,
+ size_t len)
+{
+ memcpy_toio(dst, src, len);
+ return 0;
+}
+
+static struct vringh_ops iomem_vringh_ops = {
+ .getu16 = getu16_iomem,
+ .putu16 = putu16_iomem,
+ .xfer_from = xfer_from_iomem,
+ .xfer_to = xfer_to_iomem,
+ .putused = putused_iomem,
+ .copydesc = copydesc_iomem,
+ .range_check = no_range_check,
+ .getrange = NULL,
+};
+
+int vringh_init_iomem(struct vringh *vrh, u64 features, unsigned int num,
+ bool weak_barriers, gfp_t gfp, struct vring_desc *desc,
+ struct vring_avail *avail, struct vring_used *used)
+{
+ int err;
+
+ err = __vringh_init(vrh, features, num, weak_barriers, gfp, desc, avail,
+ used);
+ if (err)
+ return err;
+
+ memcpy(&vrh->ops, &iomem_vringh_ops, sizeof(iomem_vringh_ops));
+
+ return 0;
+}
+EXPORT_SYMBOL(vringh_init_iomem);
+
+#endif
+
MODULE_LICENSE("GPL");
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 89c73605c85f..420c2d0ed398 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -265,4 +265,12 @@ int vringh_init_iotlb(struct vringh *vrh, u64 features,
#endif /* CONFIG_VHOST_IOTLB */
+#if IS_REACHABLE(CONFIG_VHOST_IOMEM)
+
+int vringh_init_iomem(struct vringh *vrh, u64 features, unsigned int num,
+ bool weak_barriers, gfp_t gfp, struct vring_desc *desc,
+ struct vring_avail *avail, struct vring_used *used);
+
+#endif /* CONFIG_VHOST_IOMEM */
+
#endif /* _LINUX_VRINGH_H */
--
2.25.1
prev parent reply other threads:[~2023-02-02 9:10 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-02 9:09 [RFC PATCH v2 0/7] Introduce a vringh accessor for IO memory Shunsuke Mie
2023-02-02 9:09 ` [RFC PATCH v2 1/7] vringh: fix a typo in comments for vringh_kiov Shunsuke Mie
2023-02-02 10:32 ` Michael S. Tsirkin
2023-02-02 9:09 ` [RFC PATCH v2 2/7] tools/virtio: enable to build with retpoline Shunsuke Mie
2023-02-02 10:33 ` Michael S. Tsirkin
2023-02-02 9:09 ` [RFC PATCH v2 3/7] vringh: remove vringh_iov and unite to vringh_kiov Shunsuke Mie
2023-02-02 9:09 ` [RFC PATCH v2 4/7] tools/virtio: convert to new vringh user APIs Shunsuke Mie
2023-02-02 9:09 ` [RFC PATCH v2 5/7] vringh: unify the APIs for all accessors Shunsuke Mie
2023-02-02 9:09 ` [RFC PATCH v2 6/7] tools/virtio: convert to use new unified vringh APIs Shunsuke Mie
2023-02-02 9:09 ` Shunsuke Mie [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230202090934.549556-8-mie@igel.co.jp \
--to=mie@igel.co.jp \
--cc=jasowang@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=rusty@rustcorp.com.au \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).