From: "Michael S. Tsirkin" <mst@redhat.com>
To: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: virtualization@lists.linux-foundation.org,
Jason Wang <jasowang@redhat.com>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Jesper Dangaard Brouer <hawk@kernel.org>,
John Fastabend <john.fastabend@gmail.com>,
netdev@vger.kernel.org, bpf@vger.kernel.org
Subject: Re: [PATCH vhost v10 06/10] virtio_ring: packed-detach: support return dma info to driver
Date: Fri, 2 Jun 2023 07:40:46 -0400 [thread overview]
Message-ID: <20230602073827-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <20230602092206.50108-7-xuanzhuo@linux.alibaba.com>
On Fri, Jun 02, 2023 at 05:22:02PM +0800, Xuan Zhuo wrote:
> Under the premapped mode, the driver needs to unmap the DMA address
> after receiving the buffer. The virtio core records the DMA address,
> so the driver needs a way to get the dma info from the virtio core.
>
> A straightforward approach is to pass an array to the virtio core when
> calling virtqueue_get_buf(). However, it is not feasible when there are
> multiple DMA addresses in the descriptor chain, and the array size is
> unknown.
>
> To solve this problem, a helper be introduced. After calling
> virtqueue_get_buf(), the driver can call the helper to
> retrieve a dma info. If the helper function returns -EAGAIN, it means
> that there are more DMA addresses to be processed, and the driver should
> call the helper function again.
Please, keep error codes for when an actual error occurs.
A common pattern would be:
<0 - error
0 - success, done
>0 - success, more to do
> To keep track of the current position in
> the chain, a cursor must be passed to the helper function, which is
> initialized by virtqueue_get_buf().
>
> Some processes are done inside this helper, so this helper MUST be
> called under the premapped mode.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
> drivers/virtio/virtio_ring.c | 105 ++++++++++++++++++++++++++++++++---
> include/linux/virtio.h | 9 ++-
> 2 files changed, 103 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index cdc4349f6066..cbc22daae7e1 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -1695,8 +1695,85 @@ static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
> return needs_kick;
> }
>
> +static void detach_cursor_init_packed(struct vring_virtqueue *vq,
> + struct virtqueue_detach_cursor *cursor, u16 id)
> +{
> + struct vring_desc_state_packed *state = NULL;
> + u32 len;
> +
> + state = &vq->packed.desc_state[id];
> +
> + /* Clear data ptr. */
> + state->data = NULL;
> +
> + vq->packed.desc_extra[state->last].next = vq->free_head;
> + vq->free_head = id;
> + vq->vq.num_free += state->num;
> +
> + /* init cursor */
> + cursor->curr = id;
> + cursor->done = 0;
> + cursor->pos = 0;
> +
> + if (vq->packed.desc_extra[id].flags & VRING_DESC_F_INDIRECT) {
> + len = vq->split.desc_extra[id].len;
> +
> + cursor->num = len / sizeof(struct vring_packed_desc);
> + cursor->indirect = true;
> +
> + vring_unmap_extra_packed(vq, &vq->packed.desc_extra[id]);
> + } else {
> + cursor->num = state->num;
> + cursor->indirect = false;
> + }
> +}
> +
> +static int virtqueue_detach_packed(struct virtqueue *_vq, struct virtqueue_detach_cursor *cursor,
> + dma_addr_t *addr, u32 *len, enum dma_data_direction *dir)
> +{
> + struct vring_virtqueue *vq = to_vvq(_vq);
> +
> + if (unlikely(cursor->done))
> + return -EINVAL;
> +
> + if (!cursor->indirect) {
> + struct vring_desc_extra *extra;
> +
> + extra = &vq->packed.desc_extra[cursor->curr];
> + cursor->curr = extra->next;
> +
> + *addr = extra->addr;
> + *len = extra->len;
> + *dir = (extra->flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
> +
> + if (++cursor->pos == cursor->num) {
> + cursor->done = true;
> + return 0;
> + }
> + } else {
> + struct vring_packed_desc *indir_desc, *desc;
> + u16 flags;
> +
> + indir_desc = vq->packed.desc_state[cursor->curr].indir_desc;
> + desc = &indir_desc[cursor->pos];
> +
> + flags = le16_to_cpu(desc->flags);
> + *addr = le64_to_cpu(desc->addr);
> + *len = le32_to_cpu(desc->len);
> + *dir = (flags & VRING_DESC_F_WRITE) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
> +
> + if (++cursor->pos == cursor->num) {
> + kfree(indir_desc);
> + cursor->done = true;
> + return 0;
> + }
> + }
> +
> + return -EAGAIN;
> +}
> +
> static void detach_buf_packed(struct vring_virtqueue *vq,
> - unsigned int id, void **ctx)
> + unsigned int id)
> {
> struct vring_desc_state_packed *state = NULL;
> struct vring_packed_desc *desc;
> @@ -1736,8 +1813,6 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
> }
> kfree(desc);
> state->indir_desc = NULL;
> - } else if (ctx) {
> - *ctx = state->indir_desc;
> }
> }
>
> @@ -1768,7 +1843,8 @@ static bool more_used_packed(const struct vring_virtqueue *vq)
>
> static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
> unsigned int *len,
> - void **ctx)
> + void **ctx,
> + struct virtqueue_detach_cursor *cursor)
> {
> struct vring_virtqueue *vq = to_vvq(_vq);
> u16 last_used, id, last_used_idx;
> @@ -1808,7 +1884,14 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
>
> /* detach_buf_packed clears data, so grab it now. */
> ret = vq->packed.desc_state[id].data;
> - detach_buf_packed(vq, id, ctx);
> +
> + if (!vq->indirect && ctx)
> + *ctx = vq->packed.desc_state[id].indir_desc;
> +
> + if (vq->premapped)
> + detach_cursor_init_packed(vq, cursor, id);
> + else
> + detach_buf_packed(vq, id);
>
> last_used += vq->packed.desc_state[id].num;
> if (unlikely(last_used >= vq->packed.vring.num)) {
> @@ -1960,7 +2043,8 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
> return true;
> }
>
> -static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
> +static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq,
> + struct virtqueue_detach_cursor *cursor)
> {
> struct vring_virtqueue *vq = to_vvq(_vq);
> unsigned int i;
> @@ -1973,7 +2057,10 @@ static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
> continue;
> /* detach_buf clears data, so grab it now. */
> buf = vq->packed.desc_state[i].data;
> - detach_buf_packed(vq, i, NULL);
> + if (vq->premapped)
> + detach_cursor_init_packed(vq, cursor, i);
> + else
> + detach_buf_packed(vq, i);
> END_USE(vq);
> return buf;
> }
> @@ -2458,7 +2545,7 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
> {
> struct vring_virtqueue *vq = to_vvq(_vq);
>
> - return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
> + return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx, NULL) :
> virtqueue_get_buf_ctx_split(_vq, len, ctx, NULL);
> }
> EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
> @@ -2590,7 +2677,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
> {
> struct vring_virtqueue *vq = to_vvq(_vq);
>
> - return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
> + return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq, NULL) :
> virtqueue_detach_unused_buf_split(_vq, NULL);
> }
> EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
> diff --git a/include/linux/virtio.h b/include/linux/virtio.h
> index eb4a4e4329aa..7f137c7a9034 100644
> --- a/include/linux/virtio.h
> +++ b/include/linux/virtio.h
> @@ -43,8 +43,13 @@ struct virtqueue_detach_cursor {
> unsigned done:1;
> unsigned hole:14;
>
> - /* for split head */
> - unsigned head:16;
> + union {
> + /* for split head */
> + unsigned head:16;
> +
> + /* for packed id */
> + unsigned curr:16;
> + };
> unsigned num:16;
> unsigned pos:16;
> };
> --
> 2.32.0.3.g01195cf9f
next prev parent reply other threads:[~2023-06-02 11:40 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-02 9:21 [PATCH vhost v10 00/10] virtio core prepares for AF_XDP Xuan Zhuo
2023-06-02 9:21 ` [PATCH vhost v10 01/10] virtio_ring: put mapping error check in vring_map_one_sg Xuan Zhuo
2023-06-27 8:03 ` Jason Wang
2023-06-02 9:21 ` [PATCH vhost v10 02/10] virtio_ring: introduce virtqueue_set_premapped() Xuan Zhuo
2023-06-27 8:03 ` Jason Wang
2023-06-27 8:50 ` Xuan Zhuo
2023-06-27 14:56 ` Michael S. Tsirkin
2023-06-28 1:34 ` Xuan Zhuo
2023-06-02 9:21 ` [PATCH vhost v10 03/10] virtio_ring: split: support add premapped buf Xuan Zhuo
2023-06-27 8:03 ` Jason Wang
2023-06-27 9:01 ` Xuan Zhuo
2023-06-02 9:22 ` [PATCH vhost v10 04/10] virtio_ring: packed: " Xuan Zhuo
2023-06-27 8:03 ` Jason Wang
2023-06-27 9:05 ` Xuan Zhuo
2023-06-02 9:22 ` [PATCH vhost v10 05/10] virtio_ring: split-detach: support return dma info to driver Xuan Zhuo
2023-06-22 19:36 ` Michael S. Tsirkin
2023-06-25 2:10 ` Xuan Zhuo
2023-06-27 8:03 ` Jason Wang
2023-06-27 9:21 ` Xuan Zhuo
2023-06-02 9:22 ` [PATCH vhost v10 06/10] virtio_ring: packed-detach: " Xuan Zhuo
2023-06-02 11:40 ` Michael S. Tsirkin [this message]
2023-06-02 9:22 ` [PATCH vhost v10 07/10] virtio_ring: introduce helpers for premapped Xuan Zhuo
2023-06-04 13:45 ` Michael S. Tsirkin
2023-06-05 2:06 ` Xuan Zhuo
2023-06-05 5:38 ` Michael S. Tsirkin
2023-06-06 2:01 ` Xuan Zhuo
2023-06-22 19:29 ` Michael S. Tsirkin
2023-06-02 9:22 ` [PATCH vhost v10 08/10] virtio_ring: introduce virtqueue_dma_dev() Xuan Zhuo
2023-06-02 9:22 ` [PATCH vhost v10 09/10] virtio_ring: introduce virtqueue_add_sg() Xuan Zhuo
2023-06-02 9:22 ` [PATCH vhost v10 10/10] virtio_net: support dma premapped Xuan Zhuo
2023-06-03 6:31 ` Jakub Kicinski
2023-06-05 2:10 ` Xuan Zhuo
2023-06-05 5:44 ` Michael S. Tsirkin
2023-06-06 2:11 ` Xuan Zhuo
2023-06-22 12:15 ` Michael S. Tsirkin
2023-06-25 2:43 ` Xuan Zhuo
2023-06-27 8:03 ` Jason Wang
2023-06-27 9:23 ` Xuan Zhuo
2023-06-03 6:29 ` [PATCH vhost v10 00/10] virtio core prepares for AF_XDP Jakub Kicinski
2023-06-05 1:58 ` Xuan Zhuo
2023-06-07 14:05 ` Christoph Hellwig
2023-06-07 20:15 ` Michael S. Tsirkin
2023-06-21 6:42 ` Xuan Zhuo
2023-06-25 7:19 ` Jason Wang
2023-06-22 19:38 ` Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230602073827-mutt-send-email-mst@kernel.org \
--to=mst@redhat.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=hawk@kernel.org \
--cc=jasowang@redhat.com \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).