From: Liang Chen <liangchen.linux@gmail.com>
To: jasowang@redhat.com, mst@redhat.com
Cc: xuanzhuo@linux.alibaba.com, netdev@vger.kernel.org,
linux-kernel@vger.kernel.org, alexander.duyck@gmail.com,
virtualization@lists.linux-foundation.org, edumazet@google.com,
kuba@kernel.org, pabeni@redhat.com, davem@davemloft.net
Subject: [PATCH net-next 4/5] virtio_ring: Introduce DMA pre-handler
Date: Fri, 26 May 2023 13:46:20 +0800 [thread overview]
Message-ID: <20230526054621.18371-4-liangchen.linux@gmail.com> (raw)
In-Reply-To: <20230526054621.18371-1-liangchen.linux@gmail.com>
Currently, DMA operations of virtio devices' data buffer are encapsulated
within the underlying virtqueue implementation. DMA map/unmap operations
are performed for each data buffer attached to/detached from the virtqueue,
which is transparent and invisible to the higher-level virtio device
drivers. This encapsulation makes it not viable for device drivers to
introduce certain mechanisms, such as page pool, that require explicit
management of DMA map/unmap. Therefore, by inserting a pre-handler before
the generic DMA map/unmap operations, virtio device drivers have the
opportunity to participate in DMA operations.
Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
---
drivers/virtio/virtio_ring.c | 73 +++++++++++++++++++++++++++++++++---
include/linux/virtio.h | 18 +++++++++
2 files changed, 85 insertions(+), 6 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c5310eaf8b46..a99641260555 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -213,6 +213,9 @@ struct vring_virtqueue {
bool last_add_time_valid;
ktime_t last_add_time;
#endif
+
+ /* DMA mapping Pre-handler for virtio device driver */
+ struct virtqueue_pre_dma_ops *pre_dma_ops;
};
static struct virtqueue *__vring_new_virtqueue(unsigned int index,
@@ -369,6 +372,19 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
return (dma_addr_t)sg_phys(sg);
}
+ /* Allow virtio drivers to perform customized mapping operation, and
+ * fallback to the generic path if it fails to handle the mapping.
+ */
+ if (vq->pre_dma_ops && vq->pre_dma_ops->map_page) {
+ dma_addr_t addr;
+
+ addr = vq->pre_dma_ops->map_page(vring_dma_dev(vq),
+ sg_page(sg), sg->offset, sg->length,
+ direction, 0);
+ if (addr)
+ return addr;
+ }
+
/*
* We can't use dma_map_sg, because we don't use scatterlists in
* the way it expects (we don't guarantee that the scatterlist
@@ -432,6 +448,15 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
+ if (vq->pre_dma_ops && vq->pre_dma_ops->unmap_page) {
+ if (vq->pre_dma_ops->unmap_page(vring_dma_dev(vq),
+ virtio64_to_cpu(vq->vq.vdev, desc->addr),
+ virtio32_to_cpu(vq->vq.vdev, desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE, 0))
+ return;
+ }
+
dma_unmap_page(vring_dma_dev(vq),
virtio64_to_cpu(vq->vq.vdev, desc->addr),
virtio32_to_cpu(vq->vq.vdev, desc->len),
@@ -456,14 +481,22 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
extra[i].len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
- } else {
- dma_unmap_page(vring_dma_dev(vq),
- extra[i].addr,
- extra[i].len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ goto out;
+ } else if (vq->pre_dma_ops && vq->pre_dma_ops->unmap_page) {
+ if (vq->pre_dma_ops->unmap_page(vring_dma_dev(vq),
+ extra[i].addr,
+ extra[i].len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE, 0))
+ goto out;
}
+ dma_unmap_page(vring_dma_dev(vq),
+ extra[i].addr,
+ extra[i].len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+
out:
return extra[i].next;
}
@@ -1206,10 +1239,19 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
+ if (vq->pre_dma_ops && vq->pre_dma_ops->unmap_page) {
+ if (vq->pre_dma_ops->unmap_page(vring_dma_dev(vq),
+ extra->addr,
+ extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE, 0))
+ return;
+ }
dma_unmap_page(vring_dma_dev(vq),
extra->addr, extra->len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
+
}
}
@@ -1223,6 +1265,15 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
flags = le16_to_cpu(desc->flags);
+ if (vq->pre_dma_ops && vq->pre_dma_ops->unmap_page) {
+ if (vq->pre_dma_ops->unmap_page(vring_dma_dev(vq),
+ le64_to_cpu(desc->addr),
+ le32_to_cpu(desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE, 0))
+ return;
+ }
+
dma_unmap_page(vring_dma_dev(vq),
le64_to_cpu(desc->addr),
le32_to_cpu(desc->len),
@@ -2052,6 +2103,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->packed_ring = true;
vq->dma_dev = dma_dev;
vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->pre_dma_ops = NULL;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2541,6 +2593,7 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
#endif
vq->dma_dev = dma_dev;
vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->pre_dma_ops = NULL;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2945,4 +2998,12 @@ const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
+/* The virtio device driver can register its own DMA map/unmap pre-handler. */
+void virtqueue_register_pre_dma_ops(struct virtqueue *vq,
+ struct virtqueue_pre_dma_ops *pre_dma_ops)
+{
+ to_vvq(vq)->pre_dma_ops = pre_dma_ops;
+}
+EXPORT_SYMBOL_GPL(virtqueue_register_pre_dma_ops);
+
MODULE_LICENSE("GPL");
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index b93238db94e3..1d5755b5e03f 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
+#include <linux/dma-map-ops.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -203,4 +204,21 @@ void unregister_virtio_driver(struct virtio_driver *drv);
#define module_virtio_driver(__virtio_driver) \
module_driver(__virtio_driver, register_virtio_driver, \
unregister_virtio_driver)
+/**
+ * struct virtqueue_pre_dma_ops - DMA pre-handler for virtio device driver
+ * @map_page: map a single page of memory for DMA
+ * @unmap_page: unmap a single page of memory for DMA
+ */
+struct virtqueue_pre_dma_ops {
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+ bool (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+};
+
+void virtqueue_register_pre_dma_ops(struct virtqueue *vq,
+ struct virtqueue_pre_dma_ops *pre_dma_ops);
+
#endif /* _LINUX_VIRTIO_H */
--
2.31.1
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization
next prev parent reply other threads:[~2023-05-26 5:47 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-26 5:46 [PATCH net-next 1/5] virtio_net: Fix an unsafe reference to the page chain Liang Chen
2023-05-26 5:46 ` [PATCH net-next 2/5] virtio_net: Add page_pool support to improve performance Liang Chen
2023-05-26 6:50 ` Jason Wang
2023-05-27 12:35 ` Liang Chen
2023-05-28 6:40 ` Michael S. Tsirkin
2023-05-29 7:28 ` Liang Chen
2023-05-31 3:10 ` Xuan Zhuo
2023-06-07 9:11 ` Liang Chen
2023-06-07 9:33 ` Xuan Zhuo
2023-05-26 16:11 ` kernel test robot
2023-05-28 6:27 ` Michael S. Tsirkin
2023-05-29 7:28 ` Liang Chen
2023-05-28 6:20 ` Michael S. Tsirkin
2023-05-29 7:27 ` Liang Chen
2023-05-29 9:55 ` Michael S. Tsirkin
2023-05-30 1:19 ` Liang Chen
2023-06-07 9:08 ` Liang Chen
2023-06-07 9:35 ` Xuan Zhuo
2023-06-07 13:58 ` Liang Chen
2023-06-07 20:17 ` Michael S. Tsirkin
2023-06-08 0:38 ` Jason Wang
2023-06-08 3:54 ` Xuan Zhuo
2023-06-09 2:57 ` Liang Chen
2023-07-05 5:41 ` Liang Chen
2023-07-05 6:04 ` Jason Wang
2023-07-06 1:01 ` Liang Chen
2023-06-09 2:57 ` Liang Chen
2023-11-29 14:50 ` Zhu Yanjun
2023-11-29 14:59 ` Michael S. Tsirkin
2023-11-29 15:22 ` Zhu Yanjun
2023-11-29 15:29 ` Zhu Yanjun
2023-11-30 2:34 ` Xuan Zhuo
2023-11-30 5:30 ` Zhu Yanjun
2023-12-01 1:38 ` Xuan Zhuo
2023-12-04 5:24 ` Zhu Yanjun
2023-11-30 7:17 ` Zhu Yanjun
2023-05-26 5:46 ` [PATCH net-next 3/5] virtio_net: Add page pool fragmentation support Liang Chen
2023-05-26 17:44 ` kernel test robot
[not found] ` <20230526082914.owofnszwdjgcjwhi@soft-dev3-1>
2023-05-27 12:36 ` Liang Chen
2023-05-28 6:25 ` Michael S. Tsirkin
2023-05-29 7:29 ` Liang Chen
[not found] ` <992eb402-43f9-0289-f95c-b41cb17f2e59@huawei.com>
2023-05-29 7:30 ` Liang Chen
2023-05-26 5:46 ` Liang Chen [this message]
2023-05-26 6:57 ` [PATCH net-next 4/5] virtio_ring: Introduce DMA pre-handler Jason Wang
2023-05-26 5:46 ` [PATCH net-next 5/5] virtio_net: Implement " Liang Chen
2023-05-26 7:06 ` Jason Wang
2023-05-27 12:35 ` Liang Chen
2023-05-26 17:34 ` kernel test robot
2023-05-26 6:38 ` [PATCH net-next 1/5] virtio_net: Fix an unsafe reference to the page chain Jason Wang
2023-05-27 12:33 ` Liang Chen
2023-05-28 6:29 ` Michael S. Tsirkin
2023-05-29 7:25 ` Liang Chen
2023-05-28 6:16 ` Michael S. Tsirkin
2023-05-29 7:25 ` Liang Chen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230526054621.18371-4-liangchen.linux@gmail.com \
--to=liangchen.linux@gmail.com \
--cc=alexander.duyck@gmail.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=jasowang@redhat.com \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).