* [PATCH] virtio_ring: skip cpu sync when mapping fails
@ 2024-11-11 2:55 Jason Wang
2024-11-11 2:59 ` Xuan Zhuo
` (2 more replies)
0 siblings, 3 replies; 10+ messages in thread
From: Jason Wang @ 2024-11-11 2:55 UTC (permalink / raw)
To: mst, jasowang, xuanzhuo, eperezma, virtualization, linux-kernel
There's no need to sync DMA for CPU on mapping errors. So this patch
skips the CPU sync in the error handling path of DMA mapping.
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/virtio/virtio_ring.c | 98 +++++++++++++++++++++---------------
1 file changed, 57 insertions(+), 41 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index be7309b1e860..b422b5fb22db 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -441,8 +441,10 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
*/
static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
- const struct vring_desc *desc)
+ const struct vring_desc *desc,
+ bool skip_sync)
{
+ unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
u16 flags;
if (!vq->do_unmap)
@@ -450,16 +452,18 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
- dma_unmap_page(vring_dma_dev(vq),
- virtio64_to_cpu(vq->vq.vdev, desc->addr),
- virtio32_to_cpu(vq->vq.vdev, desc->len),
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ dma_unmap_page_attrs(vring_dma_dev(vq),
+ virtio64_to_cpu(vq->vq.vdev, desc->addr),
+ virtio32_to_cpu(vq->vq.vdev, desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ attrs);
}
static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
- unsigned int i)
+ unsigned int i, bool skip_sync)
{
+ unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
struct vring_desc_extra *extra = vq->split.desc_extra;
u16 flags;
@@ -469,20 +473,22 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
if (!vq->use_dma_api)
goto out;
- dma_unmap_single(vring_dma_dev(vq),
- extra[i].addr,
- extra[i].len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ dma_unmap_single_attrs(vring_dma_dev(vq),
+ extra[i].addr,
+ extra[i].len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ attrs);
} else {
if (!vq->do_unmap)
goto out;
- dma_unmap_page(vring_dma_dev(vq),
- extra[i].addr,
- extra[i].len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ dma_unmap_page_attrs(vring_dma_dev(vq),
+ extra[i].addr,
+ extra[i].len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ attrs);
}
out:
@@ -717,10 +723,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
if (i == err_idx)
break;
if (indirect) {
- vring_unmap_one_split_indirect(vq, &desc[i]);
+ vring_unmap_one_split_indirect(vq, &desc[i], true);
i = virtio16_to_cpu(_vq->vdev, desc[i].next);
} else
- i = vring_unmap_one_split(vq, i);
+ i = vring_unmap_one_split(vq, i, true);
}
free_indirect:
@@ -775,12 +781,12 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
i = head;
while (vq->split.vring.desc[i].flags & nextflag) {
- vring_unmap_one_split(vq, i);
+ vring_unmap_one_split(vq, i, false);
i = vq->split.desc_extra[i].next;
vq->vq.num_free++;
}
- vring_unmap_one_split(vq, i);
+ vring_unmap_one_split(vq, i, false);
vq->split.desc_extra[i].next = vq->free_head;
vq->free_head = head;
@@ -804,7 +810,8 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
if (vq->do_unmap) {
for (j = 0; j < len / sizeof(struct vring_desc); j++)
- vring_unmap_one_split_indirect(vq, &indir_desc[j]);
+ vring_unmap_one_split_indirect(vq,
+ &indir_desc[j], false);
}
kfree(indir_desc);
@@ -1221,8 +1228,10 @@ static u16 packed_last_used(u16 last_used_idx)
}
static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
- const struct vring_desc_extra *extra)
+ const struct vring_desc_extra *extra,
+ bool skip_sync)
{
+ unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
u16 flags;
flags = extra->flags;
@@ -1231,24 +1240,28 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
if (!vq->use_dma_api)
return;
- dma_unmap_single(vring_dma_dev(vq),
- extra->addr, extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ dma_unmap_single_attrs(vring_dma_dev(vq),
+ extra->addr, extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ attrs);
} else {
if (!vq->do_unmap)
return;
- dma_unmap_page(vring_dma_dev(vq),
- extra->addr, extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ dma_unmap_page_attrs(vring_dma_dev(vq),
+ extra->addr, extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ attrs);
}
}
static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
- const struct vring_packed_desc *desc)
+ const struct vring_packed_desc *desc,
+ bool skip_sync)
{
+ unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
u16 flags;
if (!vq->do_unmap)
@@ -1256,11 +1269,12 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
flags = le16_to_cpu(desc->flags);
- dma_unmap_page(vring_dma_dev(vq),
- le64_to_cpu(desc->addr),
- le32_to_cpu(desc->len),
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ dma_unmap_page_attrs(vring_dma_dev(vq),
+ le64_to_cpu(desc->addr),
+ le32_to_cpu(desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ attrs);
}
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
@@ -1389,7 +1403,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
err_idx = i;
for (i = 0; i < err_idx; i++)
- vring_unmap_desc_packed(vq, &desc[i]);
+ vring_unmap_desc_packed(vq, &desc[i], true);
free_desc:
kfree(desc);
@@ -1539,7 +1553,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
- vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
+ vring_unmap_extra_packed(vq,
+ &vq->packed.desc_extra[curr], true);
curr = vq->packed.desc_extra[curr].next;
i++;
if (i >= vq->packed.vring.num)
@@ -1619,7 +1634,8 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
curr = id;
for (i = 0; i < state->num; i++) {
vring_unmap_extra_packed(vq,
- &vq->packed.desc_extra[curr]);
+ &vq->packed.desc_extra[curr],
+ false);
curr = vq->packed.desc_extra[curr].next;
}
}
@@ -1636,7 +1652,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
len = vq->packed.desc_extra[id].len;
for (i = 0; i < len / sizeof(struct vring_packed_desc);
i++)
- vring_unmap_desc_packed(vq, &desc[i]);
+ vring_unmap_desc_packed(vq, &desc[i], false);
}
kfree(desc);
state->indir_desc = NULL;
--
2.31.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH] virtio_ring: skip cpu sync when mapping fails
2024-11-11 2:55 [PATCH] virtio_ring: skip cpu sync when mapping fails Jason Wang
@ 2024-11-11 2:59 ` Xuan Zhuo
2024-11-11 7:30 ` Michael S. Tsirkin
2025-01-08 11:35 ` Michael S. Tsirkin
2 siblings, 0 replies; 10+ messages in thread
From: Xuan Zhuo @ 2024-11-11 2:59 UTC (permalink / raw)
To: Jason Wang; +Cc: mst, jasowang, eperezma, virtualization, linux-kernel
On Mon, 11 Nov 2024 10:55:38 +0800, Jason Wang <jasowang@redhat.com> wrote:
> There's no need to sync DMA for CPU on mapping errors. So this patch
> skips the CPU sync in the error handling path of DMA mapping.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
> drivers/virtio/virtio_ring.c | 98 +++++++++++++++++++++---------------
> 1 file changed, 57 insertions(+), 41 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index be7309b1e860..b422b5fb22db 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -441,8 +441,10 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
> */
>
> static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
> - const struct vring_desc *desc)
> + const struct vring_desc *desc,
> + bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> u16 flags;
>
> if (!vq->do_unmap)
> @@ -450,16 +452,18 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
>
> flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
>
> - dma_unmap_page(vring_dma_dev(vq),
> - virtio64_to_cpu(vq->vq.vdev, desc->addr),
> - virtio32_to_cpu(vq->vq.vdev, desc->len),
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + virtio64_to_cpu(vq->vq.vdev, desc->addr),
> + virtio32_to_cpu(vq->vq.vdev, desc->len),
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
>
> static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> - unsigned int i)
> + unsigned int i, bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> struct vring_desc_extra *extra = vq->split.desc_extra;
> u16 flags;
>
> @@ -469,20 +473,22 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> if (!vq->use_dma_api)
> goto out;
>
> - dma_unmap_single(vring_dma_dev(vq),
> - extra[i].addr,
> - extra[i].len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_single_attrs(vring_dma_dev(vq),
> + extra[i].addr,
> + extra[i].len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> } else {
> if (!vq->do_unmap)
> goto out;
>
> - dma_unmap_page(vring_dma_dev(vq),
> - extra[i].addr,
> - extra[i].len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + extra[i].addr,
> + extra[i].len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
>
> out:
> @@ -717,10 +723,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> if (i == err_idx)
> break;
> if (indirect) {
> - vring_unmap_one_split_indirect(vq, &desc[i]);
> + vring_unmap_one_split_indirect(vq, &desc[i], true);
> i = virtio16_to_cpu(_vq->vdev, desc[i].next);
> } else
> - i = vring_unmap_one_split(vq, i);
> + i = vring_unmap_one_split(vq, i, true);
> }
>
> free_indirect:
> @@ -775,12 +781,12 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> i = head;
>
> while (vq->split.vring.desc[i].flags & nextflag) {
> - vring_unmap_one_split(vq, i);
> + vring_unmap_one_split(vq, i, false);
> i = vq->split.desc_extra[i].next;
> vq->vq.num_free++;
> }
>
> - vring_unmap_one_split(vq, i);
> + vring_unmap_one_split(vq, i, false);
> vq->split.desc_extra[i].next = vq->free_head;
> vq->free_head = head;
>
> @@ -804,7 +810,8 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
>
> if (vq->do_unmap) {
> for (j = 0; j < len / sizeof(struct vring_desc); j++)
> - vring_unmap_one_split_indirect(vq, &indir_desc[j]);
> + vring_unmap_one_split_indirect(vq,
> + &indir_desc[j], false);
> }
>
> kfree(indir_desc);
> @@ -1221,8 +1228,10 @@ static u16 packed_last_used(u16 last_used_idx)
> }
>
> static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
> - const struct vring_desc_extra *extra)
> + const struct vring_desc_extra *extra,
> + bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> u16 flags;
>
> flags = extra->flags;
> @@ -1231,24 +1240,28 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
> if (!vq->use_dma_api)
> return;
>
> - dma_unmap_single(vring_dma_dev(vq),
> - extra->addr, extra->len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_single_attrs(vring_dma_dev(vq),
> + extra->addr, extra->len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> } else {
> if (!vq->do_unmap)
> return;
>
> - dma_unmap_page(vring_dma_dev(vq),
> - extra->addr, extra->len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + extra->addr, extra->len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
> }
>
> static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
> - const struct vring_packed_desc *desc)
> + const struct vring_packed_desc *desc,
> + bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> u16 flags;
>
> if (!vq->do_unmap)
> @@ -1256,11 +1269,12 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
>
> flags = le16_to_cpu(desc->flags);
>
> - dma_unmap_page(vring_dma_dev(vq),
> - le64_to_cpu(desc->addr),
> - le32_to_cpu(desc->len),
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + le64_to_cpu(desc->addr),
> + le32_to_cpu(desc->len),
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
>
> static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
> @@ -1389,7 +1403,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
> err_idx = i;
>
> for (i = 0; i < err_idx; i++)
> - vring_unmap_desc_packed(vq, &desc[i]);
> + vring_unmap_desc_packed(vq, &desc[i], true);
>
> free_desc:
> kfree(desc);
> @@ -1539,7 +1553,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
> for (n = 0; n < total_sg; n++) {
> if (i == err_idx)
> break;
> - vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
> + vring_unmap_extra_packed(vq,
> + &vq->packed.desc_extra[curr], true);
> curr = vq->packed.desc_extra[curr].next;
> i++;
> if (i >= vq->packed.vring.num)
> @@ -1619,7 +1634,8 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
> curr = id;
> for (i = 0; i < state->num; i++) {
> vring_unmap_extra_packed(vq,
> - &vq->packed.desc_extra[curr]);
> + &vq->packed.desc_extra[curr],
> + false);
> curr = vq->packed.desc_extra[curr].next;
> }
> }
> @@ -1636,7 +1652,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
> len = vq->packed.desc_extra[id].len;
> for (i = 0; i < len / sizeof(struct vring_packed_desc);
> i++)
> - vring_unmap_desc_packed(vq, &desc[i]);
> + vring_unmap_desc_packed(vq, &desc[i], false);
> }
> kfree(desc);
> state->indir_desc = NULL;
> --
> 2.31.1
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] virtio_ring: skip cpu sync when mapping fails
2024-11-11 2:55 [PATCH] virtio_ring: skip cpu sync when mapping fails Jason Wang
2024-11-11 2:59 ` Xuan Zhuo
@ 2024-11-11 7:30 ` Michael S. Tsirkin
2024-11-11 8:36 ` Jason Wang
2025-01-08 11:35 ` Michael S. Tsirkin
2 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2024-11-11 7:30 UTC (permalink / raw)
To: Jason Wang; +Cc: xuanzhuo, eperezma, virtualization, linux-kernel
On Mon, Nov 11, 2024 at 10:55:38AM +0800, Jason Wang wrote:
> There's no need to sync DMA for CPU on mapping errors. So this patch
> skips the CPU sync in the error handling path of DMA mapping.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
DMA sync is idempotent.
Extra work for slow path. Why do we bother?
> ---
> drivers/virtio/virtio_ring.c | 98 +++++++++++++++++++++---------------
> 1 file changed, 57 insertions(+), 41 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index be7309b1e860..b422b5fb22db 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -441,8 +441,10 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
> */
>
> static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
> - const struct vring_desc *desc)
> + const struct vring_desc *desc,
> + bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> u16 flags;
>
> if (!vq->do_unmap)
> @@ -450,16 +452,18 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
>
> flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
>
> - dma_unmap_page(vring_dma_dev(vq),
> - virtio64_to_cpu(vq->vq.vdev, desc->addr),
> - virtio32_to_cpu(vq->vq.vdev, desc->len),
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + virtio64_to_cpu(vq->vq.vdev, desc->addr),
> + virtio32_to_cpu(vq->vq.vdev, desc->len),
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
>
> static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> - unsigned int i)
> + unsigned int i, bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> struct vring_desc_extra *extra = vq->split.desc_extra;
> u16 flags;
>
> @@ -469,20 +473,22 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> if (!vq->use_dma_api)
> goto out;
>
> - dma_unmap_single(vring_dma_dev(vq),
> - extra[i].addr,
> - extra[i].len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_single_attrs(vring_dma_dev(vq),
> + extra[i].addr,
> + extra[i].len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> } else {
> if (!vq->do_unmap)
> goto out;
>
> - dma_unmap_page(vring_dma_dev(vq),
> - extra[i].addr,
> - extra[i].len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + extra[i].addr,
> + extra[i].len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
>
> out:
> @@ -717,10 +723,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> if (i == err_idx)
> break;
> if (indirect) {
> - vring_unmap_one_split_indirect(vq, &desc[i]);
> + vring_unmap_one_split_indirect(vq, &desc[i], true);
> i = virtio16_to_cpu(_vq->vdev, desc[i].next);
> } else
> - i = vring_unmap_one_split(vq, i);
> + i = vring_unmap_one_split(vq, i, true);
> }
>
> free_indirect:
> @@ -775,12 +781,12 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> i = head;
>
> while (vq->split.vring.desc[i].flags & nextflag) {
> - vring_unmap_one_split(vq, i);
> + vring_unmap_one_split(vq, i, false);
> i = vq->split.desc_extra[i].next;
> vq->vq.num_free++;
> }
>
> - vring_unmap_one_split(vq, i);
> + vring_unmap_one_split(vq, i, false);
> vq->split.desc_extra[i].next = vq->free_head;
> vq->free_head = head;
>
> @@ -804,7 +810,8 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
>
> if (vq->do_unmap) {
> for (j = 0; j < len / sizeof(struct vring_desc); j++)
> - vring_unmap_one_split_indirect(vq, &indir_desc[j]);
> + vring_unmap_one_split_indirect(vq,
> + &indir_desc[j], false);
> }
>
> kfree(indir_desc);
> @@ -1221,8 +1228,10 @@ static u16 packed_last_used(u16 last_used_idx)
> }
>
> static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
> - const struct vring_desc_extra *extra)
> + const struct vring_desc_extra *extra,
> + bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> u16 flags;
>
> flags = extra->flags;
> @@ -1231,24 +1240,28 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
> if (!vq->use_dma_api)
> return;
>
> - dma_unmap_single(vring_dma_dev(vq),
> - extra->addr, extra->len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_single_attrs(vring_dma_dev(vq),
> + extra->addr, extra->len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> } else {
> if (!vq->do_unmap)
> return;
>
> - dma_unmap_page(vring_dma_dev(vq),
> - extra->addr, extra->len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + extra->addr, extra->len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
> }
>
> static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
> - const struct vring_packed_desc *desc)
> + const struct vring_packed_desc *desc,
> + bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> u16 flags;
>
> if (!vq->do_unmap)
> @@ -1256,11 +1269,12 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
>
> flags = le16_to_cpu(desc->flags);
>
> - dma_unmap_page(vring_dma_dev(vq),
> - le64_to_cpu(desc->addr),
> - le32_to_cpu(desc->len),
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + le64_to_cpu(desc->addr),
> + le32_to_cpu(desc->len),
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
>
> static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
> @@ -1389,7 +1403,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
> err_idx = i;
>
> for (i = 0; i < err_idx; i++)
> - vring_unmap_desc_packed(vq, &desc[i]);
> + vring_unmap_desc_packed(vq, &desc[i], true);
>
> free_desc:
> kfree(desc);
> @@ -1539,7 +1553,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
> for (n = 0; n < total_sg; n++) {
> if (i == err_idx)
> break;
> - vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
> + vring_unmap_extra_packed(vq,
> + &vq->packed.desc_extra[curr], true);
> curr = vq->packed.desc_extra[curr].next;
> i++;
> if (i >= vq->packed.vring.num)
> @@ -1619,7 +1634,8 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
> curr = id;
> for (i = 0; i < state->num; i++) {
> vring_unmap_extra_packed(vq,
> - &vq->packed.desc_extra[curr]);
> + &vq->packed.desc_extra[curr],
> + false);
> curr = vq->packed.desc_extra[curr].next;
> }
> }
> @@ -1636,7 +1652,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
> len = vq->packed.desc_extra[id].len;
> for (i = 0; i < len / sizeof(struct vring_packed_desc);
> i++)
> - vring_unmap_desc_packed(vq, &desc[i]);
> + vring_unmap_desc_packed(vq, &desc[i], false);
> }
> kfree(desc);
> state->indir_desc = NULL;
> --
> 2.31.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] virtio_ring: skip cpu sync when mapping fails
2024-11-11 7:30 ` Michael S. Tsirkin
@ 2024-11-11 8:36 ` Jason Wang
2024-11-12 21:45 ` Michael S. Tsirkin
0 siblings, 1 reply; 10+ messages in thread
From: Jason Wang @ 2024-11-11 8:36 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: xuanzhuo, eperezma, virtualization, linux-kernel
On Mon, Nov 11, 2024 at 3:30 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Mon, Nov 11, 2024 at 10:55:38AM +0800, Jason Wang wrote:
> > There's no need to sync DMA for CPU on mapping errors. So this patch
> > skips the CPU sync in the error handling path of DMA mapping.
> >
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
>
> DMA sync is idempotent.
> Extra work for slow path. Why do we bother?
dma_map_sg() did this, since current virtio hack sg mappings to per
page mapping, we lose such optimization.
Actually the path is not necessarily slowpath in some setups like
swiotlb or VDUSE.
Thanks
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] virtio_ring: skip cpu sync when mapping fails
2024-11-11 8:36 ` Jason Wang
@ 2024-11-12 21:45 ` Michael S. Tsirkin
2024-11-13 1:35 ` Jason Wang
0 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2024-11-12 21:45 UTC (permalink / raw)
To: Jason Wang; +Cc: xuanzhuo, eperezma, virtualization, linux-kernel
On Mon, Nov 11, 2024 at 04:36:52PM +0800, Jason Wang wrote:
> On Mon, Nov 11, 2024 at 3:30 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Mon, Nov 11, 2024 at 10:55:38AM +0800, Jason Wang wrote:
> > > There's no need to sync DMA for CPU on mapping errors. So this patch
> > > skips the CPU sync in the error handling path of DMA mapping.
> > >
> > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> >
> > DMA sync is idempotent.
> > Extra work for slow path. Why do we bother?
>
> dma_map_sg() did this, since current virtio hack sg mappings to per
> page mapping, we lose such optimization.
>
> Actually the path is not necessarily slowpath in some setups like
> swiotlb or VDUSE.
>
> Thanks
I don't get how it's not a slowpath. Example?
--
MST
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] virtio_ring: skip cpu sync when mapping fails
2024-11-12 21:45 ` Michael S. Tsirkin
@ 2024-11-13 1:35 ` Jason Wang
0 siblings, 0 replies; 10+ messages in thread
From: Jason Wang @ 2024-11-13 1:35 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: xuanzhuo, eperezma, virtualization, linux-kernel
On Wed, Nov 13, 2024 at 5:46 AM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Mon, Nov 11, 2024 at 04:36:52PM +0800, Jason Wang wrote:
> > On Mon, Nov 11, 2024 at 3:30 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Mon, Nov 11, 2024 at 10:55:38AM +0800, Jason Wang wrote:
> > > > There's no need to sync DMA for CPU on mapping errors. So this patch
> > > > skips the CPU sync in the error handling path of DMA mapping.
> > > >
> > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > >
> > > DMA sync is idempotent.
> > > Extra work for slow path. Why do we bother?
> >
> > dma_map_sg() did this, since current virtio hack sg mappings to per
> > page mapping, we lose such optimization.
> >
> > Actually the path is not necessarily slowpath in some setups like
> > swiotlb or VDUSE.
> >
> > Thanks
>
> I don't get how it's not a slowpath. Example?
I'm not sure how to define slowpath but a possible example is when we
reach the swiotlb limitation, in this case. vring_map_one_sg() can
fail easily.
Thanks
>
> --
> MST
>
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] virtio_ring: skip cpu sync when mapping fails
2024-11-11 2:55 [PATCH] virtio_ring: skip cpu sync when mapping fails Jason Wang
2024-11-11 2:59 ` Xuan Zhuo
2024-11-11 7:30 ` Michael S. Tsirkin
@ 2025-01-08 11:35 ` Michael S. Tsirkin
2025-01-10 3:32 ` Jason Wang
2 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2025-01-08 11:35 UTC (permalink / raw)
To: Jason Wang; +Cc: xuanzhuo, eperezma, virtualization, linux-kernel
On Mon, Nov 11, 2024 at 10:55:38AM +0800, Jason Wang wrote:
> There's no need to sync DMA for CPU on mapping errors. So this patch
> skips the CPU sync in the error handling path of DMA mapping.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
So as I said, I do not get why we are optimizing error paths.
The commit log at least needs to be improved to document
the motivation.
> ---
> drivers/virtio/virtio_ring.c | 98 +++++++++++++++++++++---------------
> 1 file changed, 57 insertions(+), 41 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index be7309b1e860..b422b5fb22db 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -441,8 +441,10 @@ static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
> */
>
> static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
> - const struct vring_desc *desc)
> + const struct vring_desc *desc,
> + bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> u16 flags;
If you really feel we must do it, just pass attrs directly so
we do not get an extra branch. Also makes for a more readable code.
> if (!vq->do_unmap)
> @@ -450,16 +452,18 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
>
> flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
>
> - dma_unmap_page(vring_dma_dev(vq),
> - virtio64_to_cpu(vq->vq.vdev, desc->addr),
> - virtio32_to_cpu(vq->vq.vdev, desc->len),
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + virtio64_to_cpu(vq->vq.vdev, desc->addr),
> + virtio32_to_cpu(vq->vq.vdev, desc->len),
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
>
> static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> - unsigned int i)
> + unsigned int i, bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> struct vring_desc_extra *extra = vq->split.desc_extra;
> u16 flags;
>
> @@ -469,20 +473,22 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
> if (!vq->use_dma_api)
> goto out;
>
> - dma_unmap_single(vring_dma_dev(vq),
> - extra[i].addr,
> - extra[i].len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_single_attrs(vring_dma_dev(vq),
> + extra[i].addr,
> + extra[i].len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> } else {
> if (!vq->do_unmap)
> goto out;
>
> - dma_unmap_page(vring_dma_dev(vq),
> - extra[i].addr,
> - extra[i].len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + extra[i].addr,
> + extra[i].len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
>
> out:
> @@ -717,10 +723,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
> if (i == err_idx)
> break;
> if (indirect) {
> - vring_unmap_one_split_indirect(vq, &desc[i]);
> + vring_unmap_one_split_indirect(vq, &desc[i], true);
> i = virtio16_to_cpu(_vq->vdev, desc[i].next);
> } else
> - i = vring_unmap_one_split(vq, i);
> + i = vring_unmap_one_split(vq, i, true);
> }
>
> free_indirect:
> @@ -775,12 +781,12 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> i = head;
>
> while (vq->split.vring.desc[i].flags & nextflag) {
> - vring_unmap_one_split(vq, i);
> + vring_unmap_one_split(vq, i, false);
> i = vq->split.desc_extra[i].next;
> vq->vq.num_free++;
> }
>
> - vring_unmap_one_split(vq, i);
> + vring_unmap_one_split(vq, i, false);
> vq->split.desc_extra[i].next = vq->free_head;
> vq->free_head = head;
>
> @@ -804,7 +810,8 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
>
> if (vq->do_unmap) {
> for (j = 0; j < len / sizeof(struct vring_desc); j++)
> - vring_unmap_one_split_indirect(vq, &indir_desc[j]);
> + vring_unmap_one_split_indirect(vq,
> + &indir_desc[j], false);
> }
>
> kfree(indir_desc);
> @@ -1221,8 +1228,10 @@ static u16 packed_last_used(u16 last_used_idx)
> }
>
> static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
> - const struct vring_desc_extra *extra)
> + const struct vring_desc_extra *extra,
> + bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> u16 flags;
>
> flags = extra->flags;
> @@ -1231,24 +1240,28 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
> if (!vq->use_dma_api)
> return;
>
> - dma_unmap_single(vring_dma_dev(vq),
> - extra->addr, extra->len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_single_attrs(vring_dma_dev(vq),
> + extra->addr, extra->len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> } else {
> if (!vq->do_unmap)
> return;
>
> - dma_unmap_page(vring_dma_dev(vq),
> - extra->addr, extra->len,
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + extra->addr, extra->len,
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
> }
>
> static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
> - const struct vring_packed_desc *desc)
> + const struct vring_packed_desc *desc,
> + bool skip_sync)
> {
> + unsigned long attrs = skip_sync ? DMA_ATTR_SKIP_CPU_SYNC : 0;
> u16 flags;
>
> if (!vq->do_unmap)
> @@ -1256,11 +1269,12 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
>
> flags = le16_to_cpu(desc->flags);
>
> - dma_unmap_page(vring_dma_dev(vq),
> - le64_to_cpu(desc->addr),
> - le32_to_cpu(desc->len),
> - (flags & VRING_DESC_F_WRITE) ?
> - DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + dma_unmap_page_attrs(vring_dma_dev(vq),
> + le64_to_cpu(desc->addr),
> + le32_to_cpu(desc->len),
> + (flags & VRING_DESC_F_WRITE) ?
> + DMA_FROM_DEVICE : DMA_TO_DEVICE,
> + attrs);
> }
>
> static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
> @@ -1389,7 +1403,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
> err_idx = i;
>
> for (i = 0; i < err_idx; i++)
> - vring_unmap_desc_packed(vq, &desc[i]);
> + vring_unmap_desc_packed(vq, &desc[i], true);
>
> free_desc:
> kfree(desc);
> @@ -1539,7 +1553,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
> for (n = 0; n < total_sg; n++) {
> if (i == err_idx)
> break;
> - vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
> + vring_unmap_extra_packed(vq,
> + &vq->packed.desc_extra[curr], true);
> curr = vq->packed.desc_extra[curr].next;
> i++;
> if (i >= vq->packed.vring.num)
> @@ -1619,7 +1634,8 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
> curr = id;
> for (i = 0; i < state->num; i++) {
> vring_unmap_extra_packed(vq,
> - &vq->packed.desc_extra[curr]);
> + &vq->packed.desc_extra[curr],
> + false);
> curr = vq->packed.desc_extra[curr].next;
> }
> }
> @@ -1636,7 +1652,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
> len = vq->packed.desc_extra[id].len;
> for (i = 0; i < len / sizeof(struct vring_packed_desc);
> i++)
> - vring_unmap_desc_packed(vq, &desc[i]);
> + vring_unmap_desc_packed(vq, &desc[i], false);
> }
> kfree(desc);
> state->indir_desc = NULL;
> --
> 2.31.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] virtio_ring: skip cpu sync when mapping fails
2025-01-08 11:35 ` Michael S. Tsirkin
@ 2025-01-10 3:32 ` Jason Wang
2025-01-10 8:32 ` Michael S. Tsirkin
0 siblings, 1 reply; 10+ messages in thread
From: Jason Wang @ 2025-01-10 3:32 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: xuanzhuo, eperezma, virtualization, linux-kernel
On Wed, Jan 8, 2025 at 7:35 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Mon, Nov 11, 2024 at 10:55:38AM +0800, Jason Wang wrote:
> > There's no need to sync DMA for CPU on mapping errors. So this patch
> > skips the CPU sync in the error handling path of DMA mapping.
> >
> > Signed-off-by: Jason Wang <jasowang@redhat.com>
>
>
> So as I said, I do not get why we are optimizing error paths.
> The commit log at least needs to be improved to document
> the motivation.
As replied before. Does the following make more sense?
1) dma_map_sg() did this
2) When the driver tries to submit more buffers than SWIOTLB allows,
dma_map might fail, in these cases, bouncing is useless.
Thanks
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] virtio_ring: skip cpu sync when mapping fails
2025-01-10 3:32 ` Jason Wang
@ 2025-01-10 8:32 ` Michael S. Tsirkin
2025-01-13 3:06 ` Jason Wang
0 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2025-01-10 8:32 UTC (permalink / raw)
To: Jason Wang; +Cc: xuanzhuo, eperezma, virtualization, linux-kernel
On Fri, Jan 10, 2025 at 11:32:46AM +0800, Jason Wang wrote:
> On Wed, Jan 8, 2025 at 7:35 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> >
> > On Mon, Nov 11, 2024 at 10:55:38AM +0800, Jason Wang wrote:
> > > There's no need to sync DMA for CPU on mapping errors. So this patch
> > > skips the CPU sync in the error handling path of DMA mapping.
> > >
> > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> >
> >
> > So as I said, I do not get why we are optimizing error paths.
> > The commit log at least needs to be improved to document
> > the motivation.
>
> As replied before. Does the following make more sense?
>
> 1) dma_map_sg() did this
> 2) When the driver tries to submit more buffers than SWIOTLB allows,
> dma_map might fail, in these cases, bouncing is useless.
>
> Thanks
About 2 - it's an error path. Is there a reason to think it's common?
About 1 - ok reasonable. You can say "while we have no data on how
common this is, it is consistent with what dma_map_sg does".
--
MST
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] virtio_ring: skip cpu sync when mapping fails
2025-01-10 8:32 ` Michael S. Tsirkin
@ 2025-01-13 3:06 ` Jason Wang
0 siblings, 0 replies; 10+ messages in thread
From: Jason Wang @ 2025-01-13 3:06 UTC (permalink / raw)
To: Michael S. Tsirkin; +Cc: xuanzhuo, eperezma, virtualization, linux-kernel
On Fri, Jan 10, 2025 at 4:32 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Fri, Jan 10, 2025 at 11:32:46AM +0800, Jason Wang wrote:
> > On Wed, Jan 8, 2025 at 7:35 PM Michael S. Tsirkin <mst@redhat.com> wrote:
> > >
> > > On Mon, Nov 11, 2024 at 10:55:38AM +0800, Jason Wang wrote:
> > > > There's no need to sync DMA for CPU on mapping errors. So this patch
> > > > skips the CPU sync in the error handling path of DMA mapping.
> > > >
> > > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > >
> > >
> > > So as I said, I do not get why we are optimizing error paths.
> > > The commit log at least needs to be improved to document
> > > the motivation.
> >
> > As replied before. Does the following make more sense?
> >
> > 1) dma_map_sg() did this
> > 2) When the driver tries to submit more buffers than SWIOTLB allows,
> > dma_map might fail, in these cases, bouncing is useless.
> >
> > Thanks
>
> About 2 - it's an error path. Is there a reason to think it's common?
For example, doing FIO testing with 1M or larger requests when SWIOTLB
is enabled.
> About 1 - ok reasonable. You can say "while we have no data on how
> common this is, it is consistent with what dma_map_sg does".
Ok.
Thanks
>
> --
> MST
>
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2025-01-13 3:07 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-11-11 2:55 [PATCH] virtio_ring: skip cpu sync when mapping fails Jason Wang
2024-11-11 2:59 ` Xuan Zhuo
2024-11-11 7:30 ` Michael S. Tsirkin
2024-11-11 8:36 ` Jason Wang
2024-11-12 21:45 ` Michael S. Tsirkin
2024-11-13 1:35 ` Jason Wang
2025-01-08 11:35 ` Michael S. Tsirkin
2025-01-10 3:32 ` Jason Wang
2025-01-10 8:32 ` Michael S. Tsirkin
2025-01-13 3:06 ` Jason Wang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).