From: "Eugenio Pérez" <eperezma@redhat.com>
To: qemu-devel@nongnu.org
Cc: Cornelia Huck <cohuck@redhat.com>,
Si-Wei Liu <si-wei.liu@oracle.com>,
Parav Pandit <parav@mellanox.com>,
Jason Wang <jasowang@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Harpreet Singh Anand <hanand@xilinx.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Zhu Lingshan <lingshan.zhu@intel.com>, Cindy Lu <lulu@redhat.com>,
Gautam Dawar <gdawar@xilinx.com>,
Paolo Bonzini <pbonzini@redhat.com>, Eli Cohen <eli@mellanox.com>,
"Gonglei (Arei)" <arei.gonglei@huawei.com>,
Liuxiangdong <liuxiangdong5@huawei.com>,
Laurent Vivier <lvivier@redhat.com>,
Stefano Garzarella <sgarzare@redhat.com>
Subject: [PATCH v3 1/6] vdpa: Make VhostVDPAState cvq_cmd_in_buffer control ack type
Date: Thu, 25 Aug 2022 21:03:51 +0200 [thread overview]
Message-ID: <20220825190356.317527-2-eperezma@redhat.com> (raw)
In-Reply-To: <20220825190356.317527-1-eperezma@redhat.com>
This allows to simplify the code. Rename to status while we're at it.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
v3: Rename it to status.
---
net/vhost-vdpa.c | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 6ce68fcd3f..535315c1d0 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -35,7 +35,9 @@ typedef struct VhostVDPAState {
VHostNetState *vhost_net;
/* Control commands shadow buffers */
- void *cvq_cmd_out_buffer, *cvq_cmd_in_buffer;
+ void *cvq_cmd_out_buffer;
+ virtio_net_ctrl_ack *status;
+
bool started;
} VhostVDPAState;
@@ -158,7 +160,7 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
struct vhost_dev *dev = &s->vhost_net->dev;
qemu_vfree(s->cvq_cmd_out_buffer);
- qemu_vfree(s->cvq_cmd_in_buffer);
+ qemu_vfree(s->status);
if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
}
@@ -310,7 +312,7 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
return r;
}
- r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer,
+ r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
vhost_vdpa_net_cvq_cmd_page_len(), true);
if (unlikely(r < 0)) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
@@ -327,7 +329,7 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
if (s->vhost_vdpa.shadow_vqs_enabled) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
- vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer);
+ vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
}
}
@@ -340,7 +342,7 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
.iov_len = out_len,
};
const struct iovec in = {
- .iov_base = s->cvq_cmd_in_buffer,
+ .iov_base = s->status,
.iov_len = sizeof(virtio_net_ctrl_ack),
};
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
@@ -396,7 +398,7 @@ static int vhost_vdpa_net_load(NetClientState *nc)
return dev_written;
}
- return *((virtio_net_ctrl_ack *)s->cvq_cmd_in_buffer) != VIRTIO_NET_OK;
+ return *s->status != VIRTIO_NET_OK;
}
return 0;
@@ -491,8 +493,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
goto out;
}
- memcpy(&status, s->cvq_cmd_in_buffer, sizeof(status));
- if (status != VIRTIO_NET_OK) {
+ if (*s->status != VIRTIO_NET_OK) {
return VIRTIO_NET_ERR;
}
@@ -549,9 +550,9 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
vhost_vdpa_net_cvq_cmd_page_len());
memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
- s->cvq_cmd_in_buffer = qemu_memalign(qemu_real_host_page_size(),
- vhost_vdpa_net_cvq_cmd_page_len());
- memset(s->cvq_cmd_in_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
+ s->status = qemu_memalign(qemu_real_host_page_size(),
+ vhost_vdpa_net_cvq_cmd_page_len());
+ memset(s->status, 0, vhost_vdpa_net_cvq_cmd_page_len());
s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
s->vhost_vdpa.shadow_vq_ops_opaque = s;
--
2.31.1
next prev parent reply other threads:[~2022-08-25 19:09 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-25 19:03 [PATCH v3 0/6] Vhost-vdpa Shadow Virtqueue multiqueue support Eugenio Pérez
2022-08-25 19:03 ` Eugenio Pérez [this message]
2022-08-25 19:03 ` [PATCH v3 2/6] vdpa: extract vhost_vdpa_net_load_mac from vhost_vdpa_net_load Eugenio Pérez
2022-08-25 19:03 ` [PATCH v3 3/6] vdpa: Add vhost_vdpa_net_load_mq Eugenio Pérez
2022-08-25 19:03 ` [PATCH v3 4/6] vdpa: validate MQ CVQ commands Eugenio Pérez
2022-08-25 19:03 ` [PATCH v3 5/6] virtio-net: Update virtio-net curr_queue_pairs in vdpa backends Eugenio Pérez
2022-08-25 19:03 ` [PATCH v3 6/6] vdpa: Allow MQ feature in SVQ Eugenio Pérez
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220825190356.317527-2-eperezma@redhat.com \
--to=eperezma@redhat.com \
--cc=arei.gonglei@huawei.com \
--cc=cohuck@redhat.com \
--cc=eli@mellanox.com \
--cc=gdawar@xilinx.com \
--cc=hanand@xilinx.com \
--cc=jasowang@redhat.com \
--cc=lingshan.zhu@intel.com \
--cc=liuxiangdong5@huawei.com \
--cc=lulu@redhat.com \
--cc=lvivier@redhat.com \
--cc=mst@redhat.com \
--cc=parav@mellanox.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sgarzare@redhat.com \
--cc=si-wei.liu@oracle.com \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).