* [PATCH v2 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call
@ 2020-03-31 17:59 Eugenio Pérez
2020-03-31 17:59 ` [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data Eugenio Pérez
` (7 more replies)
0 siblings, 8 replies; 13+ messages in thread
From: Eugenio Pérez @ 2020-03-31 17:59 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Eugenio Pérez, kvm list,
Halil Pasic, virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
Vhost did not reset properly the batched descriptors on SET_VRING_BASE
event. Because of that, is possible to return an invalid descriptor to
the guest.
This series ammend this, resetting them every time backend changes, and
creates a test to assert correct behavior. To do that, they need to
expose a new function in virtio_ring, virtqueue_reset_free_head, only
on test code.
Another useful thing would be to check if mutex is properly get in
vq private_data accessors. Not sure if mutex debug code allow that,
similar to C++ unique lock::owns_lock. Not acquiring in the function
because caller code holds the mutex in order to perform more actions.
v2:
* Squashed commits.
* Create vq private_data accesors (mst).
This is meant to be applied on top of
c4f1c41a6094582903c75c0dcfacb453c959d457 in
git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git.
Eugenio Pérez (5):
vhost: Create accessors for virtqueues private_data
tools/virtio: Add --batch option
tools/virtio: Add --batch=random option
tools/virtio: Add --reset=random
tools/virtio: Make --reset reset ring idx
Michael S. Tsirkin (3):
vhost: option to fetch descriptors through an independent struct
vhost: use batched version by default
vhost: batching fetches
drivers/vhost/net.c | 28 ++--
drivers/vhost/test.c | 59 +++++++-
drivers/vhost/test.h | 1 +
drivers/vhost/vhost.c | 271 +++++++++++++++++++++++------------
drivers/vhost/vhost.h | 45 +++++-
drivers/vhost/vsock.c | 14 +-
drivers/virtio/virtio_ring.c | 29 ++++
tools/virtio/linux/virtio.h | 2 +
tools/virtio/virtio_test.c | 123 ++++++++++++++--
9 files changed, 445 insertions(+), 127 deletions(-)
--
2.18.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data
2020-03-31 17:59 [PATCH v2 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
@ 2020-03-31 17:59 ` Eugenio Pérez
2020-03-31 18:14 ` Michael S. Tsirkin
2020-03-31 18:28 ` Michael S. Tsirkin
2020-03-31 18:00 ` [PATCH v2 2/8] vhost: option to fetch descriptors through an independent struct Eugenio Pérez
` (6 subsequent siblings)
7 siblings, 2 replies; 13+ messages in thread
From: Eugenio Pérez @ 2020-03-31 17:59 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Eugenio Pérez, kvm list,
Halil Pasic, virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/vhost/net.c | 28 +++++++++++++++-------------
drivers/vhost/vhost.h | 28 ++++++++++++++++++++++++++++
drivers/vhost/vsock.c | 14 +++++++-------
3 files changed, 50 insertions(+), 20 deletions(-)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e158159671fa..6c5e7a6f712c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -424,7 +424,7 @@ static void vhost_net_disable_vq(struct vhost_net *n,
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend_opaque(vq))
return;
vhost_poll_stop(poll);
}
@@ -437,7 +437,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock;
- sock = vq->private_data;
+ sock = vhost_vq_get_backend_opaque(vq);
if (!sock)
return 0;
@@ -524,7 +524,7 @@ static void vhost_net_busy_poll(struct vhost_net *net,
return;
vhost_disable_notify(&net->dev, vq);
- sock = rvq->private_data;
+ sock = vhost_vq_get_backend_opaque(rvq);
busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
tvq->busyloop_timeout;
@@ -570,8 +570,10 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
if (r == tvq->num && tvq->busyloop_timeout) {
/* Flush batched packets first */
- if (!vhost_sock_zcopy(tvq->private_data))
- vhost_tx_batch(net, tnvq, tvq->private_data, msghdr);
+ if (!vhost_sock_zcopy(vhost_vq_get_backend_opaque(tvq)))
+ vhost_tx_batch(net, tnvq,
+ vhost_vq_get_backend_opaque(tvq),
+ msghdr);
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
@@ -685,7 +687,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_virtqueue *vq = &nvq->vq;
struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev);
- struct socket *sock = vq->private_data;
+ struct socket *sock = vhost_vq_get_backend_opaque(vq);
struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
@@ -952,7 +954,7 @@ static void handle_tx(struct vhost_net *net)
struct socket *sock;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
- sock = vq->private_data;
+ sock = vhost_vq_get_backend_opaque(vq);
if (!sock)
goto out;
@@ -1121,7 +1123,7 @@ static void handle_rx(struct vhost_net *net)
int recv_pkts = 0;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
- sock = vq->private_data;
+ sock = vhost_vq_get_backend_opaque(vq);
if (!sock)
goto out;
@@ -1344,9 +1346,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
container_of(vq, struct vhost_net_virtqueue, vq);
mutex_lock(&vq->mutex);
- sock = vq->private_data;
+ sock = vhost_vq_get_backend_opaque(vq);
vhost_net_disable_vq(n, vq);
- vq->private_data = NULL;
+ vhost_vq_set_backend_opaque(vq, NULL);
vhost_net_buf_unproduce(nvq);
nvq->rx_ring = NULL;
mutex_unlock(&vq->mutex);
@@ -1528,7 +1530,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
/* start polling new socket */
- oldsock = vq->private_data;
+ oldsock = vhost_vq_get_backend_opaque(vq);
if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock));
@@ -1538,7 +1540,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
vhost_net_disable_vq(n, vq);
- vq->private_data = sock;
+ vhost_vq_set_backend_opaque(vq, sock);
vhost_net_buf_unproduce(nvq);
r = vhost_vq_init_access(vq);
if (r)
@@ -1575,7 +1577,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
return 0;
err_used:
- vq->private_data = oldsock;
+ vhost_vq_set_backend_opaque(vq, oldsock);
vhost_net_enable_vq(n, vq);
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a123fd70847e..0808188f7e8f 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -244,6 +244,34 @@ enum {
(1ULL << VIRTIO_F_VERSION_1)
};
+/**
+ * vhost_vq_set_backend_opaque - Set backend opaque.
+ *
+ * @vq Virtqueue.
+ * @private_data The private data.
+ *
+ * Context: Need to call with vq->mutex acquired.
+ */
+static inline void vhost_vq_set_backend_opaque(struct vhost_virtqueue *vq,
+ void *private_data)
+{
+ vq->private_data = private_data;
+}
+
+/**
+ * vhost_vq_get_backend_opaque - Get backend opaque.
+ *
+ * @vq Virtqueue.
+ * @private_data The private data.
+ *
+ * Context: Need to call with vq->mutex acquired.
+ * Return: Opaque previously set with vhost_vq_set_backend_opaque.
+ */
+static inline void *vhost_vq_get_backend_opaque(struct vhost_virtqueue *vq)
+{
+ return vq->private_data;
+}
+
static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
{
return vq->acked_features & (1ULL << bit);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index c2d7d57e98cf..6e20dbe14acd 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -91,7 +91,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
mutex_lock(&vq->mutex);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend_opaque(vq))
goto out;
/* Avoid further vmexits, we're already processing the virtqueue */
@@ -440,7 +440,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
mutex_lock(&vq->mutex);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend_opaque(vq))
goto out;
vhost_disable_notify(&vsock->dev, vq);
@@ -533,8 +533,8 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
goto err_vq;
}
- if (!vq->private_data) {
- vq->private_data = vsock;
+ if (!vhost_vq_get_backend_opaque(vq)) {
+ vhost_vq_set_backend_opaque(vq, vsock);
ret = vhost_vq_init_access(vq);
if (ret)
goto err_vq;
@@ -547,14 +547,14 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
return 0;
err_vq:
- vq->private_data = NULL;
+ vhost_vq_set_backend_opaque(vq, NULL);
mutex_unlock(&vq->mutex);
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
- vq->private_data = NULL;
+ vhost_vq_set_backend_opaque(vq, NULL);
mutex_unlock(&vq->mutex);
}
err:
@@ -577,7 +577,7 @@ static int vhost_vsock_stop(struct vhost_vsock *vsock)
struct vhost_virtqueue *vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
- vq->private_data = NULL;
+ vhost_vq_set_backend_opaque(vq, NULL);
mutex_unlock(&vq->mutex);
}
--
2.18.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v2 2/8] vhost: option to fetch descriptors through an independent struct
2020-03-31 17:59 [PATCH v2 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
2020-03-31 17:59 ` [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data Eugenio Pérez
@ 2020-03-31 18:00 ` Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 3/8] vhost: use batched version by default Eugenio Pérez
` (5 subsequent siblings)
7 siblings, 0 replies; 13+ messages in thread
From: Eugenio Pérez @ 2020-03-31 18:00 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Eugenio Pérez, kvm list,
Halil Pasic, virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
From: "Michael S. Tsirkin" <mst@redhat.com>
The idea is to support multiple ring formats by converting
to a format-independent array of descriptors.
This costs extra cycles, but we gain in ability
to fetch a batch of descriptors in one go, which
is good for code cache locality.
When used, this causes a minor performance degradation,
it's been kept as simple as possible for ease of review.
A follow-up patch gets us back the performance by adding batching.
To simplify benchmarking, I kept the old code around so one can switch
back and forth between old and new code. This will go away in the final
submission.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/vhost/vhost.c | 297 +++++++++++++++++++++++++++++++++++++++++-
drivers/vhost/vhost.h | 16 +++
2 files changed, 312 insertions(+), 1 deletion(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f44340b41494..38e65a36465b 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -302,6 +302,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
vq->num = 1;
+ vq->ndescs = 0;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
@@ -372,6 +373,9 @@ static int vhost_worker(void *data)
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
+ kfree(vq->descs);
+ vq->descs = NULL;
+ vq->max_descs = 0;
kfree(vq->indirect);
vq->indirect = NULL;
kfree(vq->log);
@@ -388,6 +392,10 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
+ vq->max_descs = dev->iov_limit;
+ vq->descs = kmalloc_array(vq->max_descs,
+ sizeof(*vq->descs),
+ GFP_KERNEL);
vq->indirect = kmalloc_array(UIO_MAXIOV,
sizeof(*vq->indirect),
GFP_KERNEL);
@@ -395,7 +403,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
GFP_KERNEL);
vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
GFP_KERNEL);
- if (!vq->indirect || !vq->log || !vq->heads)
+ if (!vq->indirect || !vq->log || !vq->heads || !vq->descs)
goto err_nomem;
}
return 0;
@@ -2352,6 +2360,293 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
}
EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
+static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq)
+{
+ BUG_ON(!vq->ndescs);
+ return &vq->descs[vq->ndescs - 1];
+}
+
+static void pop_split_desc(struct vhost_virtqueue *vq)
+{
+ BUG_ON(!vq->ndescs);
+ --vq->ndescs;
+}
+
+#define VHOST_DESC_FLAGS (VRING_DESC_F_INDIRECT | VRING_DESC_F_WRITE | \
+ VRING_DESC_F_NEXT)
+static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, u16 id)
+{
+ struct vhost_desc *h;
+
+ if (unlikely(vq->ndescs >= vq->max_descs))
+ return -EINVAL;
+ h = &vq->descs[vq->ndescs++];
+ h->addr = vhost64_to_cpu(vq, desc->addr);
+ h->len = vhost32_to_cpu(vq, desc->len);
+ h->flags = vhost16_to_cpu(vq, desc->flags) & VHOST_DESC_FLAGS;
+ h->id = id;
+
+ return 0;
+}
+
+static int fetch_indirect_descs(struct vhost_virtqueue *vq,
+ struct vhost_desc *indirect,
+ u16 head)
+{
+ struct vring_desc desc;
+ unsigned int i = 0, count, found = 0;
+ u32 len = indirect->len;
+ struct iov_iter from;
+ int ret;
+
+ /* Sanity check */
+ if (unlikely(len % sizeof desc)) {
+ vq_err(vq, "Invalid length in indirect descriptor: "
+ "len 0x%llx not multiple of 0x%zx\n",
+ (unsigned long long)len,
+ sizeof desc);
+ return -EINVAL;
+ }
+
+ ret = translate_desc(vq, indirect->addr, len, vq->indirect,
+ UIO_MAXIOV, VHOST_ACCESS_RO);
+ if (unlikely(ret < 0)) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d in indirect.\n", ret);
+ return ret;
+ }
+ iov_iter_init(&from, READ, vq->indirect, ret, len);
+
+ /* We will use the result as an address to read from, so most
+ * architectures only need a compiler barrier here. */
+ read_barrier_depends();
+
+ count = len / sizeof desc;
+ /* Buffers are chained via a 16 bit next field, so
+ * we can have at most 2^16 of these. */
+ if (unlikely(count > USHRT_MAX + 1)) {
+ vq_err(vq, "Indirect buffer length too big: %d\n",
+ indirect->len);
+ return -E2BIG;
+ }
+ if (unlikely(vq->ndescs + count > vq->max_descs)) {
+ vq_err(vq, "Too many indirect + direct descs: %d + %d\n",
+ vq->ndescs, indirect->len);
+ return -E2BIG;
+ }
+
+ do {
+ if (unlikely(++found > count)) {
+ vq_err(vq, "Loop detected: last one at %u "
+ "indirect size %u\n",
+ i, count);
+ return -EINVAL;
+ }
+ if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
+ vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
+ i, (size_t)indirect->addr + i * sizeof desc);
+ return -EINVAL;
+ }
+ if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
+ vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
+ i, (size_t)indirect->addr + i * sizeof desc);
+ return -EINVAL;
+ }
+
+ push_split_desc(vq, &desc, head);
+ } while ((i = next_desc(vq, &desc)) != -1);
+ return 0;
+}
+
+static int fetch_descs(struct vhost_virtqueue *vq)
+{
+ unsigned int i, head, found = 0;
+ struct vhost_desc *last;
+ struct vring_desc desc;
+ __virtio16 avail_idx;
+ __virtio16 ring_head;
+ u16 last_avail_idx;
+ int ret;
+
+ /* Check it isn't doing very strange things with descriptor numbers. */
+ last_avail_idx = vq->last_avail_idx;
+
+ if (vq->avail_idx == vq->last_avail_idx) {
+ if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
+ vq_err(vq, "Failed to access avail idx at %p\n",
+ &vq->avail->idx);
+ return -EFAULT;
+ }
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+
+ if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
+ vq_err(vq, "Guest moved used index from %u to %u",
+ last_avail_idx, vq->avail_idx);
+ return -EFAULT;
+ }
+
+ /* If there's nothing new since last we looked, return
+ * invalid.
+ */
+ if (vq->avail_idx == last_avail_idx)
+ return vq->num;
+
+ /* Only get avail ring entries after they have been
+ * exposed by guest.
+ */
+ smp_rmb();
+ }
+
+ /* Grab the next descriptor number they're advertising */
+ if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
+ vq_err(vq, "Failed to read head: idx %d address %p\n",
+ last_avail_idx,
+ &vq->avail->ring[last_avail_idx % vq->num]);
+ return -EFAULT;
+ }
+
+ head = vhost16_to_cpu(vq, ring_head);
+
+ /* If their number is silly, that's an error. */
+ if (unlikely(head >= vq->num)) {
+ vq_err(vq, "Guest says index %u > %u is available",
+ head, vq->num);
+ return -EINVAL;
+ }
+
+ i = head;
+ do {
+ if (unlikely(i >= vq->num)) {
+ vq_err(vq, "Desc index is %u > %u, head = %u",
+ i, vq->num, head);
+ return -EINVAL;
+ }
+ if (unlikely(++found > vq->num)) {
+ vq_err(vq, "Loop detected: last one at %u "
+ "vq size %u head %u\n",
+ i, vq->num, head);
+ return -EINVAL;
+ }
+ ret = vhost_get_desc(vq, &desc, i);
+ if (unlikely(ret)) {
+ vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
+ i, vq->desc + i);
+ return -EFAULT;
+ }
+ ret = push_split_desc(vq, &desc, head);
+ if (unlikely(ret)) {
+ vq_err(vq, "Failed to save descriptor: idx %d\n", i);
+ return -EINVAL;
+ }
+ } while ((i = next_desc(vq, &desc)) != -1);
+
+ last = peek_split_desc(vq);
+ if (unlikely(last->flags & VRING_DESC_F_INDIRECT)) {
+ pop_split_desc(vq);
+ ret = fetch_indirect_descs(vq, last, head);
+ if (unlikely(ret < 0)) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "Failure detected "
+ "in indirect descriptor at idx %d\n", head);
+ return ret;
+ }
+ }
+
+ /* Assume notifications from guest are disabled at this point,
+ * if they aren't we would need to update avail_event index. */
+ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
+
+ /* On success, increment avail index. */
+ vq->last_avail_idx++;
+
+ return 0;
+}
+
+/* This looks in the virtqueue and for the first available buffer, and converts
+ * it to an iovec for convenient access. Since descriptors consist of some
+ * number of output then some number of input descriptors, it's actually two
+ * iovecs, but we pack them into one and note how many of each there were.
+ *
+ * This function returns the descriptor number found, or vq->num (which is
+ * never a valid descriptor number) if none was found. A negative code is
+ * returned on error. */
+int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num)
+{
+ int ret = fetch_descs(vq);
+ int i;
+
+ if (ret)
+ return ret;
+
+ /* Now convert to IOV */
+ /* When we start there are none of either input nor output. */
+ *out_num = *in_num = 0;
+ if (unlikely(log))
+ *log_num = 0;
+
+ for (i = 0; i < vq->ndescs; ++i) {
+ unsigned iov_count = *in_num + *out_num;
+ struct vhost_desc *desc = &vq->descs[i];
+ int access;
+
+ if (desc->flags & ~VHOST_DESC_FLAGS) {
+ vq_err(vq, "Unexpected flags: 0x%x at descriptor id 0x%x\n",
+ desc->flags, desc->id);
+ ret = -EINVAL;
+ goto err;
+ }
+ if (desc->flags & VRING_DESC_F_WRITE)
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
+ ret = translate_desc(vq, desc->addr,
+ desc->len, iov + iov_count,
+ iov_size - iov_count, access);
+ if (unlikely(ret < 0)) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d descriptor idx %d\n",
+ ret, i);
+ goto err;
+ }
+ if (access == VHOST_ACCESS_WO) {
+ /* If this is an input descriptor,
+ * increment that count. */
+ *in_num += ret;
+ if (unlikely(log && ret)) {
+ log[*log_num].addr = desc->addr;
+ log[*log_num].len = desc->len;
+ ++*log_num;
+ }
+ } else {
+ /* If it's an output descriptor, they're all supposed
+ * to come before any input descriptors. */
+ if (unlikely(*in_num)) {
+ vq_err(vq, "Descriptor has out after in: "
+ "idx %d\n", i);
+ ret = -EINVAL;
+ goto err;
+ }
+ *out_num += ret;
+ }
+
+ ret = desc->id;
+ }
+
+ vq->ndescs = 0;
+
+ return ret;
+
+err:
+ vhost_discard_vq_desc(vq, 1);
+ vq->ndescs = 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_get_vq_desc_batch);
+
/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
{
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 0808188f7e8f..4dfb668620ca 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -80,6 +80,13 @@ enum vhost_uaddr_type {
VHOST_NUM_ADDRS = 3,
};
+struct vhost_desc {
+ u64 addr;
+ u32 len;
+ u16 flags; /* VRING_DESC_F_WRITE, VRING_DESC_F_NEXT */
+ u16 id;
+};
+
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -90,6 +97,11 @@ struct vhost_virtqueue {
struct vring_desc __user *desc;
struct vring_avail __user *avail;
struct vring_used __user *used;
+
+ struct vhost_desc *descs;
+ int ndescs;
+ int max_descs;
+
const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick;
struct eventfd_ctx *call_ctx;
@@ -191,6 +203,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
bool vhost_log_access_ok(struct vhost_dev *);
+int vhost_get_vq_desc_batch(struct vhost_virtqueue *,
+ struct iovec iov[], unsigned int iov_count,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num);
int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_count,
unsigned int *out_num, unsigned int *in_num,
--
2.18.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v2 3/8] vhost: use batched version by default
2020-03-31 17:59 [PATCH v2 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
2020-03-31 17:59 ` [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 2/8] vhost: option to fetch descriptors through an independent struct Eugenio Pérez
@ 2020-03-31 18:00 ` Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 4/8] vhost: batching fetches Eugenio Pérez
` (4 subsequent siblings)
7 siblings, 0 replies; 13+ messages in thread
From: Eugenio Pérez @ 2020-03-31 18:00 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Eugenio Pérez, kvm list,
Halil Pasic, virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
From: "Michael S. Tsirkin" <mst@redhat.com>
As testing shows no performance change, switch to that now.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/vhost/vhost.c | 251 +-----------------------------------------
drivers/vhost/vhost.h | 4 -
2 files changed, 2 insertions(+), 253 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 38e65a36465b..56c5253056ee 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2113,253 +2113,6 @@ static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
return next;
}
-static int get_indirect(struct vhost_virtqueue *vq,
- struct iovec iov[], unsigned int iov_size,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num,
- struct vring_desc *indirect)
-{
- struct vring_desc desc;
- unsigned int i = 0, count, found = 0;
- u32 len = vhost32_to_cpu(vq, indirect->len);
- struct iov_iter from;
- int ret, access;
-
- /* Sanity check */
- if (unlikely(len % sizeof desc)) {
- vq_err(vq, "Invalid length in indirect descriptor: "
- "len 0x%llx not multiple of 0x%zx\n",
- (unsigned long long)len,
- sizeof desc);
- return -EINVAL;
- }
-
- ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
- UIO_MAXIOV, VHOST_ACCESS_RO);
- if (unlikely(ret < 0)) {
- if (ret != -EAGAIN)
- vq_err(vq, "Translation failure %d in indirect.\n", ret);
- return ret;
- }
- iov_iter_init(&from, READ, vq->indirect, ret, len);
-
- /* We will use the result as an address to read from, so most
- * architectures only need a compiler barrier here. */
- read_barrier_depends();
-
- count = len / sizeof desc;
- /* Buffers are chained via a 16 bit next field, so
- * we can have at most 2^16 of these. */
- if (unlikely(count > USHRT_MAX + 1)) {
- vq_err(vq, "Indirect buffer length too big: %d\n",
- indirect->len);
- return -E2BIG;
- }
-
- do {
- unsigned iov_count = *in_num + *out_num;
- if (unlikely(++found > count)) {
- vq_err(vq, "Loop detected: last one at %u "
- "indirect size %u\n",
- i, count);
- return -EINVAL;
- }
- if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
- vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
- i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
- return -EINVAL;
- }
- if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
- vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
- i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
- return -EINVAL;
- }
-
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
- access = VHOST_ACCESS_WO;
- else
- access = VHOST_ACCESS_RO;
-
- ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
- vhost32_to_cpu(vq, desc.len), iov + iov_count,
- iov_size - iov_count, access);
- if (unlikely(ret < 0)) {
- if (ret != -EAGAIN)
- vq_err(vq, "Translation failure %d indirect idx %d\n",
- ret, i);
- return ret;
- }
- /* If this is an input descriptor, increment that count. */
- if (access == VHOST_ACCESS_WO) {
- *in_num += ret;
- if (unlikely(log && ret)) {
- log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
- log[*log_num].len = vhost32_to_cpu(vq, desc.len);
- ++*log_num;
- }
- } else {
- /* If it's an output descriptor, they're all supposed
- * to come before any input descriptors. */
- if (unlikely(*in_num)) {
- vq_err(vq, "Indirect descriptor "
- "has out after in: idx %d\n", i);
- return -EINVAL;
- }
- *out_num += ret;
- }
- } while ((i = next_desc(vq, &desc)) != -1);
- return 0;
-}
-
-/* This looks in the virtqueue and for the first available buffer, and converts
- * it to an iovec for convenient access. Since descriptors consist of some
- * number of output then some number of input descriptors, it's actually two
- * iovecs, but we pack them into one and note how many of each there were.
- *
- * This function returns the descriptor number found, or vq->num (which is
- * never a valid descriptor number) if none was found. A negative code is
- * returned on error. */
-int vhost_get_vq_desc(struct vhost_virtqueue *vq,
- struct iovec iov[], unsigned int iov_size,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num)
-{
- struct vring_desc desc;
- unsigned int i, head, found = 0;
- u16 last_avail_idx;
- __virtio16 avail_idx;
- __virtio16 ring_head;
- int ret, access;
-
- /* Check it isn't doing very strange things with descriptor numbers. */
- last_avail_idx = vq->last_avail_idx;
-
- if (vq->avail_idx == vq->last_avail_idx) {
- if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
- vq_err(vq, "Failed to access avail idx at %p\n",
- &vq->avail->idx);
- return -EFAULT;
- }
- vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
-
- if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
- vq_err(vq, "Guest moved used index from %u to %u",
- last_avail_idx, vq->avail_idx);
- return -EFAULT;
- }
-
- /* If there's nothing new since last we looked, return
- * invalid.
- */
- if (vq->avail_idx == last_avail_idx)
- return vq->num;
-
- /* Only get avail ring entries after they have been
- * exposed by guest.
- */
- smp_rmb();
- }
-
- /* Grab the next descriptor number they're advertising, and increment
- * the index we've seen. */
- if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
- vq_err(vq, "Failed to read head: idx %d address %p\n",
- last_avail_idx,
- &vq->avail->ring[last_avail_idx % vq->num]);
- return -EFAULT;
- }
-
- head = vhost16_to_cpu(vq, ring_head);
-
- /* If their number is silly, that's an error. */
- if (unlikely(head >= vq->num)) {
- vq_err(vq, "Guest says index %u > %u is available",
- head, vq->num);
- return -EINVAL;
- }
-
- /* When we start there are none of either input nor output. */
- *out_num = *in_num = 0;
- if (unlikely(log))
- *log_num = 0;
-
- i = head;
- do {
- unsigned iov_count = *in_num + *out_num;
- if (unlikely(i >= vq->num)) {
- vq_err(vq, "Desc index is %u > %u, head = %u",
- i, vq->num, head);
- return -EINVAL;
- }
- if (unlikely(++found > vq->num)) {
- vq_err(vq, "Loop detected: last one at %u "
- "vq size %u head %u\n",
- i, vq->num, head);
- return -EINVAL;
- }
- ret = vhost_get_desc(vq, &desc, i);
- if (unlikely(ret)) {
- vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
- i, vq->desc + i);
- return -EFAULT;
- }
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
- ret = get_indirect(vq, iov, iov_size,
- out_num, in_num,
- log, log_num, &desc);
- if (unlikely(ret < 0)) {
- if (ret != -EAGAIN)
- vq_err(vq, "Failure detected "
- "in indirect descriptor at idx %d\n", i);
- return ret;
- }
- continue;
- }
-
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
- access = VHOST_ACCESS_WO;
- else
- access = VHOST_ACCESS_RO;
- ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
- vhost32_to_cpu(vq, desc.len), iov + iov_count,
- iov_size - iov_count, access);
- if (unlikely(ret < 0)) {
- if (ret != -EAGAIN)
- vq_err(vq, "Translation failure %d descriptor idx %d\n",
- ret, i);
- return ret;
- }
- if (access == VHOST_ACCESS_WO) {
- /* If this is an input descriptor,
- * increment that count. */
- *in_num += ret;
- if (unlikely(log && ret)) {
- log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
- log[*log_num].len = vhost32_to_cpu(vq, desc.len);
- ++*log_num;
- }
- } else {
- /* If it's an output descriptor, they're all supposed
- * to come before any input descriptors. */
- if (unlikely(*in_num)) {
- vq_err(vq, "Descriptor has out after in: "
- "idx %d\n", i);
- return -EINVAL;
- }
- *out_num += ret;
- }
- } while ((i = next_desc(vq, &desc)) != -1);
-
- /* On success, increment avail index. */
- vq->last_avail_idx++;
-
- /* Assume notifications from guest are disabled at this point,
- * if they aren't we would need to update avail_event index. */
- BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
- return head;
-}
-EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
-
static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq)
{
BUG_ON(!vq->ndescs);
@@ -2570,7 +2323,7 @@ static int fetch_descs(struct vhost_virtqueue *vq)
* This function returns the descriptor number found, or vq->num (which is
* never a valid descriptor number) if none was found. A negative code is
* returned on error. */
-int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq,
+int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num)
@@ -2645,7 +2398,7 @@ int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq,
return ret;
}
-EXPORT_SYMBOL_GPL(vhost_get_vq_desc_batch);
+EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 4dfb668620ca..c1efd8256a88 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -203,10 +203,6 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
bool vhost_log_access_ok(struct vhost_dev *);
-int vhost_get_vq_desc_batch(struct vhost_virtqueue *,
- struct iovec iov[], unsigned int iov_count,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num);
int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_count,
unsigned int *out_num, unsigned int *in_num,
--
2.18.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v2 4/8] vhost: batching fetches
2020-03-31 17:59 [PATCH v2 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (2 preceding siblings ...)
2020-03-31 18:00 ` [PATCH v2 3/8] vhost: use batched version by default Eugenio Pérez
@ 2020-03-31 18:00 ` Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 5/8] tools/virtio: Add --batch option Eugenio Pérez
` (3 subsequent siblings)
7 siblings, 0 replies; 13+ messages in thread
From: Eugenio Pérez @ 2020-03-31 18:00 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Eugenio Pérez, kvm list,
Halil Pasic, virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
From: "Michael S. Tsirkin" <mst@redhat.com>
With this patch applied, new and old code perform identically.
Lots of extra optimizations are now possible, e.g.
we can fetch multiple heads with copy_from/to_user now.
We can get rid of maintaining the log array. Etc etc.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/vhost/test.c | 2 +-
drivers/vhost/vhost.c | 47 ++++++++++++++++++++++++++++++++++++++-----
drivers/vhost/vhost.h | 5 ++++-
3 files changed, 47 insertions(+), 7 deletions(-)
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index e37c92d4d7ad..58da17af93e9 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -119,7 +119,7 @@ static int vhost_test_open(struct inode *inode, struct file *f)
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
- vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
+ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV + 64,
VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
f->private_data = n;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 56c5253056ee..1646b1ce312a 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -303,6 +303,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
{
vq->num = 1;
vq->ndescs = 0;
+ vq->first_desc = 0;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
@@ -371,6 +372,11 @@ static int vhost_worker(void *data)
return 0;
}
+static int vhost_vq_num_batch_descs(struct vhost_virtqueue *vq)
+{
+ return vq->max_descs - UIO_MAXIOV;
+}
+
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
kfree(vq->descs);
@@ -393,6 +399,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->max_descs = dev->iov_limit;
+ if (vhost_vq_num_batch_descs(vq) < 0) {
+ return -EINVAL;
+ }
vq->descs = kmalloc_array(vq->max_descs,
sizeof(*vq->descs),
GFP_KERNEL);
@@ -1643,6 +1652,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
vq->last_avail_idx = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
+ vq->ndescs = vq->first_desc = 0;
break;
case VHOST_GET_VRING_BASE:
s.index = idx;
@@ -2211,7 +2221,7 @@ static int fetch_indirect_descs(struct vhost_virtqueue *vq,
return 0;
}
-static int fetch_descs(struct vhost_virtqueue *vq)
+static int fetch_buf(struct vhost_virtqueue *vq)
{
unsigned int i, head, found = 0;
struct vhost_desc *last;
@@ -2224,7 +2234,11 @@ static int fetch_descs(struct vhost_virtqueue *vq)
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vq->last_avail_idx;
- if (vq->avail_idx == vq->last_avail_idx) {
+ if (unlikely(vq->avail_idx == vq->last_avail_idx)) {
+ /* If we already have work to do, don't bother re-checking. */
+ if (likely(vq->ndescs))
+ return vq->num;
+
if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
vq_err(vq, "Failed to access avail idx at %p\n",
&vq->avail->idx);
@@ -2315,6 +2329,24 @@ static int fetch_descs(struct vhost_virtqueue *vq)
return 0;
}
+static int fetch_descs(struct vhost_virtqueue *vq)
+{
+ int ret = 0;
+
+ if (unlikely(vq->first_desc >= vq->ndescs)) {
+ vq->first_desc = 0;
+ vq->ndescs = 0;
+ }
+
+ if (vq->ndescs)
+ return 0;
+
+ while (!ret && vq->ndescs <= vhost_vq_num_batch_descs(vq))
+ ret = fetch_buf(vq);
+
+ return vq->ndescs ? 0 : ret;
+}
+
/* This looks in the virtqueue and for the first available buffer, and converts
* it to an iovec for convenient access. Since descriptors consist of some
* number of output then some number of input descriptors, it's actually two
@@ -2340,7 +2372,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
if (unlikely(log))
*log_num = 0;
- for (i = 0; i < vq->ndescs; ++i) {
+ for (i = vq->first_desc; i < vq->ndescs; ++i) {
unsigned iov_count = *in_num + *out_num;
struct vhost_desc *desc = &vq->descs[i];
int access;
@@ -2386,14 +2418,19 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
}
ret = desc->id;
+
+ if (!(desc->flags & VRING_DESC_F_NEXT))
+ break;
}
- vq->ndescs = 0;
+ vq->first_desc = i + 1;
return ret;
err:
- vhost_discard_vq_desc(vq, 1);
+ for (i = vq->first_desc; i < vq->ndescs; ++i)
+ if (!(vq->descs[i].flags & VRING_DESC_F_NEXT))
+ vhost_discard_vq_desc(vq, 1);
vq->ndescs = 0;
return ret;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index c1efd8256a88..64a4ebab6dd2 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -100,6 +100,7 @@ struct vhost_virtqueue {
struct vhost_desc *descs;
int ndescs;
+ int first_desc;
int max_descs;
const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
@@ -242,7 +243,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
#define vq_err(vq, fmt, ...) do { \
- pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
+ pr_err(pr_fmt(fmt), ##__VA_ARGS__); \
if ((vq)->error_ctx) \
eventfd_signal((vq)->error_ctx, 1);\
} while (0)
@@ -268,6 +269,8 @@ static inline void vhost_vq_set_backend_opaque(struct vhost_virtqueue *vq,
void *private_data)
{
vq->private_data = private_data;
+ vq->ndescs = 0;
+ vq->first_desc = 0;
}
/**
--
2.18.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v2 5/8] tools/virtio: Add --batch option
2020-03-31 17:59 [PATCH v2 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (3 preceding siblings ...)
2020-03-31 18:00 ` [PATCH v2 4/8] vhost: batching fetches Eugenio Pérez
@ 2020-03-31 18:00 ` Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 6/8] tools/virtio: Add --batch=random option Eugenio Pérez
` (2 subsequent siblings)
7 siblings, 0 replies; 13+ messages in thread
From: Eugenio Pérez @ 2020-03-31 18:00 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Eugenio Pérez, kvm list,
Halil Pasic, virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
This allow to test vhost having >1 buffers in flight
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tools/virtio/virtio_test.c | 47 ++++++++++++++++++++++++++++++--------
1 file changed, 37 insertions(+), 10 deletions(-)
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index b427def67e7e..c30de9088f3c 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <getopt.h>
+#include <limits.h>
#include <string.h>
#include <poll.h>
#include <sys/eventfd.h>
@@ -152,11 +153,11 @@ static void wait_for_interrupt(struct vdev_info *dev)
}
static void run_test(struct vdev_info *dev, struct vq_info *vq,
- bool delayed, int bufs)
+ bool delayed, int batch, int bufs)
{
struct scatterlist sl;
long started = 0, completed = 0;
- long completed_before;
+ long completed_before, started_before;
int r, test = 1;
unsigned len;
long long spurious = 0;
@@ -165,28 +166,42 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
for (;;) {
virtqueue_disable_cb(vq->vq);
completed_before = completed;
+ started_before = started;
do {
- if (started < bufs) {
+ while (started < bufs &&
+ (started - completed) < batch) {
sg_init_one(&sl, dev->buf, dev->buf_size);
r = virtqueue_add_outbuf(vq->vq, &sl, 1,
dev->buf + started,
GFP_ATOMIC);
- if (likely(r == 0)) {
- ++started;
- if (unlikely(!virtqueue_kick(vq->vq)))
+ if (unlikely(r != 0)) {
+ if (r == -ENOSPC &&
+ started > started_before)
+ r = 0;
+ else
r = -1;
+ break;
}
- } else
+
+ ++started;
+
+ if (unlikely(!virtqueue_kick(vq->vq))) {
+ r = -1;
+ break;
+ }
+ }
+
+ if (started >= bufs)
r = -1;
/* Flush out completed bufs if any */
- if (virtqueue_get_buf(vq->vq, &len)) {
+ while (virtqueue_get_buf(vq->vq, &len)) {
++completed;
r = 0;
}
} while (r == 0);
- if (completed == completed_before)
+ if (completed == completed_before && started == started_before)
++spurious;
assert(completed <= bufs);
assert(started <= bufs);
@@ -244,6 +259,11 @@ const struct option longopts[] = {
.name = "no-delayed-interrupt",
.val = 'd',
},
+ {
+ .name = "batch",
+ .val = 'b',
+ .has_arg = required_argument,
+ },
{
}
};
@@ -255,6 +275,7 @@ static void help(void)
" [--no-event-idx]"
" [--no-virtio-1]"
" [--delayed-interrupt]"
+ " [--batch=N]"
"\n");
}
@@ -263,6 +284,7 @@ int main(int argc, char **argv)
struct vdev_info dev;
unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
(1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
+ long batch = 1;
int o;
bool delayed = false;
@@ -289,6 +311,11 @@ int main(int argc, char **argv)
case 'D':
delayed = true;
break;
+ case 'b':
+ batch = strtol(optarg, NULL, 10);
+ assert(batch > 0);
+ assert(batch < (long)INT_MAX + 1);
+ break;
default:
assert(0);
break;
@@ -298,6 +325,6 @@ int main(int argc, char **argv)
done:
vdev_info_init(&dev, features);
vq_info_add(&dev, 256);
- run_test(&dev, &dev.vqs[0], delayed, 0x100000);
+ run_test(&dev, &dev.vqs[0], delayed, batch, 0x100000);
return 0;
}
--
2.18.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v2 6/8] tools/virtio: Add --batch=random option
2020-03-31 17:59 [PATCH v2 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (4 preceding siblings ...)
2020-03-31 18:00 ` [PATCH v2 5/8] tools/virtio: Add --batch option Eugenio Pérez
@ 2020-03-31 18:00 ` Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 7/8] tools/virtio: Add --reset=random Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 8/8] tools/virtio: Make --reset reset ring idx Eugenio Pérez
7 siblings, 0 replies; 13+ messages in thread
From: Eugenio Pérez @ 2020-03-31 18:00 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Eugenio Pérez, kvm list,
Halil Pasic, virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
So we can test with non-deterministic batches in flight.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tools/virtio/virtio_test.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index c30de9088f3c..4a2b9d11f287 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -19,6 +19,8 @@
#include <linux/virtio_ring.h>
#include "../../drivers/vhost/test.h"
+#define RANDOM_BATCH -1
+
/* Unused */
void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
@@ -161,6 +163,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
int r, test = 1;
unsigned len;
long long spurious = 0;
+ const bool random_batch = batch == RANDOM_BATCH;
r = ioctl(dev->control, VHOST_TEST_RUN, &test);
assert(r >= 0);
for (;;) {
@@ -168,6 +171,9 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
completed_before = completed;
started_before = started;
do {
+ if (random_batch)
+ batch = (random() % vq->vring.num) + 1;
+
while (started < bufs &&
(started - completed) < batch) {
sg_init_one(&sl, dev->buf, dev->buf_size);
@@ -275,7 +281,7 @@ static void help(void)
" [--no-event-idx]"
" [--no-virtio-1]"
" [--delayed-interrupt]"
- " [--batch=N]"
+ " [--batch=random/N]"
"\n");
}
@@ -312,9 +318,13 @@ int main(int argc, char **argv)
delayed = true;
break;
case 'b':
- batch = strtol(optarg, NULL, 10);
- assert(batch > 0);
- assert(batch < (long)INT_MAX + 1);
+ if (0 == strcmp(optarg, "random")) {
+ batch = RANDOM_BATCH;
+ } else {
+ batch = strtol(optarg, NULL, 10);
+ assert(batch > 0);
+ assert(batch < (long)INT_MAX + 1);
+ }
break;
default:
assert(0);
--
2.18.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v2 7/8] tools/virtio: Add --reset=random
2020-03-31 17:59 [PATCH v2 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (5 preceding siblings ...)
2020-03-31 18:00 ` [PATCH v2 6/8] tools/virtio: Add --batch=random option Eugenio Pérez
@ 2020-03-31 18:00 ` Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 8/8] tools/virtio: Make --reset reset ring idx Eugenio Pérez
7 siblings, 0 replies; 13+ messages in thread
From: Eugenio Pérez @ 2020-03-31 18:00 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Eugenio Pérez, kvm list,
Halil Pasic, virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
Currently, it only removes and add backend, but it will reset vq
position in future commits.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/vhost/test.c | 57 ++++++++++++++++++++++++++++++++++++++
drivers/vhost/test.h | 1 +
tools/virtio/virtio_test.c | 44 ++++++++++++++++++++++++++---
3 files changed, 98 insertions(+), 4 deletions(-)
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 58da17af93e9..329a865227ff 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -263,9 +263,62 @@ static int vhost_test_set_features(struct vhost_test *n, u64 features)
return 0;
}
+static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
+{
+ static void *private_data;
+
+ const bool enable = fd != -1;
+ struct vhost_virtqueue *vq;
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_check_owner(&n->dev);
+ if (r)
+ goto err;
+
+ if (index >= VHOST_TEST_VQ_MAX) {
+ r = -ENOBUFS;
+ goto err;
+ }
+ vq = &n->vqs[index];
+ mutex_lock(&vq->mutex);
+
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(vq)) {
+ r = -EFAULT;
+ goto err_vq;
+ }
+ if (!enable) {
+ vhost_poll_stop(&vq->poll);
+ private_data = vq->private_data;
+ vq->private_data = NULL;
+ } else {
+ r = vhost_vq_init_access(vq);
+ vq->private_data = private_data;
+ if (r == 0)
+ r = vhost_poll_start(&vq->poll, vq->kick);
+ }
+
+ mutex_unlock(&vq->mutex);
+
+ if (enable) {
+ vhost_test_flush_vq(n, index);
+ }
+
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+
+err_vq:
+ mutex_unlock(&vq->mutex);
+err:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
+ struct vhost_vring_file backend;
struct vhost_test *n = f->private_data;
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
@@ -277,6 +330,10 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&test, argp, sizeof test))
return -EFAULT;
return vhost_test_run(n, test);
+ case VHOST_TEST_SET_BACKEND:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ return vhost_test_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
features = VHOST_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
diff --git a/drivers/vhost/test.h b/drivers/vhost/test.h
index 7dd265bfdf81..822bc4bee03a 100644
--- a/drivers/vhost/test.h
+++ b/drivers/vhost/test.h
@@ -4,5 +4,6 @@
/* Start a given test on the virtio null device. 0 stops all tests. */
#define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
+#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int)
#endif
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index 4a2b9d11f287..93d81cd64ba0 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -20,6 +20,7 @@
#include "../../drivers/vhost/test.h"
#define RANDOM_BATCH -1
+#define RANDOM_RESET -1
/* Unused */
void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
@@ -46,6 +47,9 @@ struct vdev_info {
struct vhost_memory *mem;
};
+static const struct vhost_vring_file no_backend = { .fd = -1 },
+ backend = { .fd = 1 };
+
bool vq_notify(struct virtqueue *vq)
{
struct vq_info *info = vq->priv;
@@ -155,10 +159,10 @@ static void wait_for_interrupt(struct vdev_info *dev)
}
static void run_test(struct vdev_info *dev, struct vq_info *vq,
- bool delayed, int batch, int bufs)
+ bool delayed, int batch, int reset_n, int bufs)
{
struct scatterlist sl;
- long started = 0, completed = 0;
+ long started = 0, completed = 0, next_reset = reset_n;
long completed_before, started_before;
int r, test = 1;
unsigned len;
@@ -171,6 +175,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
completed_before = completed;
started_before = started;
do {
+ const bool reset = reset_n && completed > next_reset;
if (random_batch)
batch = (random() % vq->vring.num) + 1;
@@ -200,12 +205,26 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
if (started >= bufs)
r = -1;
+ if (reset) {
+ r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+ &no_backend);
+ assert(!r);
+ }
+
/* Flush out completed bufs if any */
while (virtqueue_get_buf(vq->vq, &len)) {
++completed;
r = 0;
}
+ if (reset) {
+ r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+ &backend);
+ assert(!r);
+
+ while (completed > next_reset)
+ next_reset += completed;
+ }
} while (r == 0);
if (completed == completed_before && started == started_before)
++spurious;
@@ -270,6 +289,11 @@ const struct option longopts[] = {
.val = 'b',
.has_arg = required_argument,
},
+ {
+ .name = "reset",
+ .val = 'r',
+ .has_arg = optional_argument,
+ },
{
}
};
@@ -282,6 +306,7 @@ static void help(void)
" [--no-virtio-1]"
" [--delayed-interrupt]"
" [--batch=random/N]"
+ " [--reset=random/N]"
"\n");
}
@@ -290,7 +315,7 @@ int main(int argc, char **argv)
struct vdev_info dev;
unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
(1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
- long batch = 1;
+ long batch = 1, reset = 0;
int o;
bool delayed = false;
@@ -326,6 +351,17 @@ int main(int argc, char **argv)
assert(batch < (long)INT_MAX + 1);
}
break;
+ case 'r':
+ if (!optarg) {
+ reset = 1;
+ } else if (0 == strcmp(optarg, "random")) {
+ reset = RANDOM_RESET;
+ } else {
+ reset = strtol(optarg, NULL, 10);
+ assert(reset >= 0);
+ assert(reset < (long)INT_MAX + 1);
+ }
+ break;
default:
assert(0);
break;
@@ -335,6 +371,6 @@ int main(int argc, char **argv)
done:
vdev_info_init(&dev, features);
vq_info_add(&dev, 256);
- run_test(&dev, &dev.vqs[0], delayed, batch, 0x100000);
+ run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000);
return 0;
}
--
2.18.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH v2 8/8] tools/virtio: Make --reset reset ring idx
2020-03-31 17:59 [PATCH v2 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (6 preceding siblings ...)
2020-03-31 18:00 ` [PATCH v2 7/8] tools/virtio: Add --reset=random Eugenio Pérez
@ 2020-03-31 18:00 ` Eugenio Pérez
7 siblings, 0 replies; 13+ messages in thread
From: Eugenio Pérez @ 2020-03-31 18:00 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Eugenio Pérez, kvm list,
Halil Pasic, virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/virtio/virtio_ring.c | 29 +++++++++++++++++++++++++++++
tools/virtio/linux/virtio.h | 2 ++
tools/virtio/virtio_test.c | 28 +++++++++++++++++++++++++++-
3 files changed, 58 insertions(+), 1 deletion(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 867c7ebd3f10..01b322ee2e04 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1810,6 +1810,35 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
+#ifndef __KERNEL__
+
+/**
+ * virtqueue_reset_free_head - Reset to 0 the members of split ring.
+ * @vq: Virtqueue to reset.
+ *
+ * At this moment, is only meant for debug the ring index change, do not use
+ * in production.
+ */
+void virtqueue_reset_free_head(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ // vq->last_used_idx = 0;
+ vq->num_added = 0;
+
+ vq->split.queue_size_in_bytes = 0;
+ vq->split.avail_flags_shadow = 0;
+ vq->split.avail_idx_shadow = 0;
+
+ memset(vq->split.desc_state, 0, vq->split.vring.num *
+ sizeof(struct vring_desc_state_split));
+
+ vq->free_head = 0;
+}
+EXPORT_SYMBOL_GPL(virtqueue_reset_free_head);
+
+#endif
+
/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
* @_vq: the struct virtqueue
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index b751350d4ce8..5d33eab6b814 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -65,4 +65,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
const char *name);
void vring_del_virtqueue(struct virtqueue *vq);
+void virtqueue_reset_free_head(struct virtqueue *vq);
+
#endif
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index 93d81cd64ba0..bf21ece30594 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -49,6 +49,7 @@ struct vdev_info {
static const struct vhost_vring_file no_backend = { .fd = -1 },
backend = { .fd = 1 };
+static const struct vhost_vring_state null_state = {};
bool vq_notify(struct virtqueue *vq)
{
@@ -218,10 +219,33 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
}
if (reset) {
+ struct vhost_vring_state s = { .index = 0 };
+ int i;
+ vq->vring.avail->idx = 0;
+ vq->vq->num_free = vq->vring.num;
+
+ // Put everything in free lists.
+ for (i = 0; i < vq->vring.num-1; i++)
+ vq->vring.desc[i].next =
+ cpu_to_virtio16(&dev->vdev,
+ i + 1);
+ vq->vring.desc[vq->vring.num-1].next = 0;
+ virtqueue_reset_free_head(vq->vq);
+
+ r = ioctl(dev->control, VHOST_GET_VRING_BASE,
+ &s);
+ assert(!r);
+
+ s.num = 0;
+ r = ioctl(dev->control, VHOST_SET_VRING_BASE,
+ &null_state);
+ assert(!r);
+
r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
&backend);
assert(!r);
+ started = completed;
while (completed > next_reset)
next_reset += completed;
}
@@ -243,7 +267,9 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
test = 0;
r = ioctl(dev->control, VHOST_TEST_RUN, &test);
assert(r >= 0);
- fprintf(stderr, "spurious wakeups: 0x%llx\n", spurious);
+ fprintf(stderr,
+ "spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n",
+ spurious, started, completed);
}
const char optstring[] = "h";
--
2.18.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data
2020-03-31 17:59 ` [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data Eugenio Pérez
@ 2020-03-31 18:14 ` Michael S. Tsirkin
2020-03-31 19:29 ` Eugenio Perez Martin
2020-03-31 18:28 ` Michael S. Tsirkin
1 sibling, 1 reply; 13+ messages in thread
From: Michael S. Tsirkin @ 2020-03-31 18:14 UTC (permalink / raw)
To: Eugenio Pérez
Cc: linux-kernel@vger.kernel.org, kvm list, Halil Pasic,
virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
On Tue, Mar 31, 2020 at 07:59:59PM +0200, Eugenio Pérez wrote:
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
> drivers/vhost/net.c | 28 +++++++++++++++-------------
> drivers/vhost/vhost.h | 28 ++++++++++++++++++++++++++++
> drivers/vhost/vsock.c | 14 +++++++-------
> 3 files changed, 50 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> index e158159671fa..6c5e7a6f712c 100644
> --- a/drivers/vhost/net.c
> +++ b/drivers/vhost/net.c
> @@ -424,7 +424,7 @@ static void vhost_net_disable_vq(struct vhost_net *n,
> struct vhost_net_virtqueue *nvq =
> container_of(vq, struct vhost_net_virtqueue, vq);
> struct vhost_poll *poll = n->poll + (nvq - n->vqs);
> - if (!vq->private_data)
> + if (!vhost_vq_get_backend_opaque(vq))
> return;
> vhost_poll_stop(poll);
> }
> @@ -437,7 +437,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
> struct vhost_poll *poll = n->poll + (nvq - n->vqs);
> struct socket *sock;
>
> - sock = vq->private_data;
> + sock = vhost_vq_get_backend_opaque(vq);
> if (!sock)
> return 0;
>
> @@ -524,7 +524,7 @@ static void vhost_net_busy_poll(struct vhost_net *net,
> return;
>
> vhost_disable_notify(&net->dev, vq);
> - sock = rvq->private_data;
> + sock = vhost_vq_get_backend_opaque(rvq);
>
> busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
> tvq->busyloop_timeout;
> @@ -570,8 +570,10 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
>
> if (r == tvq->num && tvq->busyloop_timeout) {
> /* Flush batched packets first */
> - if (!vhost_sock_zcopy(tvq->private_data))
> - vhost_tx_batch(net, tnvq, tvq->private_data, msghdr);
> + if (!vhost_sock_zcopy(vhost_vq_get_backend_opaque(tvq)))
> + vhost_tx_batch(net, tnvq,
> + vhost_vq_get_backend_opaque(tvq),
> + msghdr);
>
> vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
>
> @@ -685,7 +687,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
> struct vhost_virtqueue *vq = &nvq->vq;
> struct vhost_net *net = container_of(vq->dev, struct vhost_net,
> dev);
> - struct socket *sock = vq->private_data;
> + struct socket *sock = vhost_vq_get_backend_opaque(vq);
> struct page_frag *alloc_frag = &net->page_frag;
> struct virtio_net_hdr *gso;
> struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
> @@ -952,7 +954,7 @@ static void handle_tx(struct vhost_net *net)
> struct socket *sock;
>
> mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
> - sock = vq->private_data;
> + sock = vhost_vq_get_backend_opaque(vq);
> if (!sock)
> goto out;
>
> @@ -1121,7 +1123,7 @@ static void handle_rx(struct vhost_net *net)
> int recv_pkts = 0;
>
> mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
> - sock = vq->private_data;
> + sock = vhost_vq_get_backend_opaque(vq);
> if (!sock)
> goto out;
>
> @@ -1344,9 +1346,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
> container_of(vq, struct vhost_net_virtqueue, vq);
>
> mutex_lock(&vq->mutex);
> - sock = vq->private_data;
> + sock = vhost_vq_get_backend_opaque(vq);
> vhost_net_disable_vq(n, vq);
> - vq->private_data = NULL;
> + vhost_vq_set_backend_opaque(vq, NULL);
> vhost_net_buf_unproduce(nvq);
> nvq->rx_ring = NULL;
> mutex_unlock(&vq->mutex);
> @@ -1528,7 +1530,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> }
>
> /* start polling new socket */
> - oldsock = vq->private_data;
> + oldsock = vhost_vq_get_backend_opaque(vq);
> if (sock != oldsock) {
> ubufs = vhost_net_ubuf_alloc(vq,
> sock && vhost_sock_zcopy(sock));
> @@ -1538,7 +1540,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> }
>
> vhost_net_disable_vq(n, vq);
> - vq->private_data = sock;
> + vhost_vq_set_backend_opaque(vq, sock);
> vhost_net_buf_unproduce(nvq);
> r = vhost_vq_init_access(vq);
> if (r)
> @@ -1575,7 +1577,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> return 0;
>
> err_used:
> - vq->private_data = oldsock;
> + vhost_vq_set_backend_opaque(vq, oldsock);
> vhost_net_enable_vq(n, vq);
> if (ubufs)
> vhost_net_ubuf_put_wait_and_free(ubufs);
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index a123fd70847e..0808188f7e8f 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -244,6 +244,34 @@ enum {
> (1ULL << VIRTIO_F_VERSION_1)
> };
>
> +/**
> + * vhost_vq_set_backend_opaque - Set backend opaque.
> + *
> + * @vq Virtqueue.
> + * @private_data The private data.
> + *
> + * Context: Need to call with vq->mutex acquired.
> + */
> +static inline void vhost_vq_set_backend_opaque(struct vhost_virtqueue *vq,
> + void *private_data)
> +{
> + vq->private_data = private_data;
> +}
> +
> +/**
> + * vhost_vq_get_backend_opaque - Get backend opaque.
> + *
> + * @vq Virtqueue.
> + * @private_data The private data.
> + *
> + * Context: Need to call with vq->mutex acquired.
> + * Return: Opaque previously set with vhost_vq_set_backend_opaque.
> + */
> +static inline void *vhost_vq_get_backend_opaque(struct vhost_virtqueue *vq)
> +{
> + return vq->private_data;
> +}
> +
> static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
> {
> return vq->acked_features & (1ULL << bit);
I think I prefer vhost_vq_get_backend and vhost_vq_set_backend.
"opaque" just means that it's void * that is clear from the signature
anyway.
> diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> index c2d7d57e98cf..6e20dbe14acd 100644
> --- a/drivers/vhost/vsock.c
> +++ b/drivers/vhost/vsock.c
> @@ -91,7 +91,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
>
> mutex_lock(&vq->mutex);
>
> - if (!vq->private_data)
> + if (!vhost_vq_get_backend_opaque(vq))
> goto out;
>
> /* Avoid further vmexits, we're already processing the virtqueue */
> @@ -440,7 +440,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
>
> mutex_lock(&vq->mutex);
>
> - if (!vq->private_data)
> + if (!vhost_vq_get_backend_opaque(vq))
> goto out;
>
> vhost_disable_notify(&vsock->dev, vq);
> @@ -533,8 +533,8 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
> goto err_vq;
> }
>
> - if (!vq->private_data) {
> - vq->private_data = vsock;
> + if (!vhost_vq_get_backend_opaque(vq)) {
> + vhost_vq_set_backend_opaque(vq, vsock);
> ret = vhost_vq_init_access(vq);
> if (ret)
> goto err_vq;
> @@ -547,14 +547,14 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
> return 0;
>
> err_vq:
> - vq->private_data = NULL;
> + vhost_vq_set_backend_opaque(vq, NULL);
> mutex_unlock(&vq->mutex);
>
> for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
> vq = &vsock->vqs[i];
>
> mutex_lock(&vq->mutex);
> - vq->private_data = NULL;
> + vhost_vq_set_backend_opaque(vq, NULL);
> mutex_unlock(&vq->mutex);
> }
> err:
> @@ -577,7 +577,7 @@ static int vhost_vsock_stop(struct vhost_vsock *vsock)
> struct vhost_virtqueue *vq = &vsock->vqs[i];
>
> mutex_lock(&vq->mutex);
> - vq->private_data = NULL;
> + vhost_vq_set_backend_opaque(vq, NULL);
> mutex_unlock(&vq->mutex);
> }
>
> --
> 2.18.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data
2020-03-31 17:59 ` [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data Eugenio Pérez
2020-03-31 18:14 ` Michael S. Tsirkin
@ 2020-03-31 18:28 ` Michael S. Tsirkin
2020-03-31 19:28 ` Eugenio Perez Martin
1 sibling, 1 reply; 13+ messages in thread
From: Michael S. Tsirkin @ 2020-03-31 18:28 UTC (permalink / raw)
To: Eugenio Pérez
Cc: linux-kernel@vger.kernel.org, kvm list, Halil Pasic,
virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
On Tue, Mar 31, 2020 at 07:59:59PM +0200, Eugenio Pérez wrote:
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
> drivers/vhost/net.c | 28 +++++++++++++++-------------
> drivers/vhost/vhost.h | 28 ++++++++++++++++++++++++++++
> drivers/vhost/vsock.c | 14 +++++++-------
Seems to be missing scsi and test.
> 3 files changed, 50 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> index e158159671fa..6c5e7a6f712c 100644
> --- a/drivers/vhost/net.c
> +++ b/drivers/vhost/net.c
> @@ -424,7 +424,7 @@ static void vhost_net_disable_vq(struct vhost_net *n,
> struct vhost_net_virtqueue *nvq =
> container_of(vq, struct vhost_net_virtqueue, vq);
> struct vhost_poll *poll = n->poll + (nvq - n->vqs);
> - if (!vq->private_data)
> + if (!vhost_vq_get_backend_opaque(vq))
> return;
> vhost_poll_stop(poll);
> }
> @@ -437,7 +437,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
> struct vhost_poll *poll = n->poll + (nvq - n->vqs);
> struct socket *sock;
>
> - sock = vq->private_data;
> + sock = vhost_vq_get_backend_opaque(vq);
> if (!sock)
> return 0;
>
> @@ -524,7 +524,7 @@ static void vhost_net_busy_poll(struct vhost_net *net,
> return;
>
> vhost_disable_notify(&net->dev, vq);
> - sock = rvq->private_data;
> + sock = vhost_vq_get_backend_opaque(rvq);
>
> busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
> tvq->busyloop_timeout;
> @@ -570,8 +570,10 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
>
> if (r == tvq->num && tvq->busyloop_timeout) {
> /* Flush batched packets first */
> - if (!vhost_sock_zcopy(tvq->private_data))
> - vhost_tx_batch(net, tnvq, tvq->private_data, msghdr);
> + if (!vhost_sock_zcopy(vhost_vq_get_backend_opaque(tvq)))
> + vhost_tx_batch(net, tnvq,
> + vhost_vq_get_backend_opaque(tvq),
> + msghdr);
>
> vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
>
> @@ -685,7 +687,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
> struct vhost_virtqueue *vq = &nvq->vq;
> struct vhost_net *net = container_of(vq->dev, struct vhost_net,
> dev);
> - struct socket *sock = vq->private_data;
> + struct socket *sock = vhost_vq_get_backend_opaque(vq);
> struct page_frag *alloc_frag = &net->page_frag;
> struct virtio_net_hdr *gso;
> struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
> @@ -952,7 +954,7 @@ static void handle_tx(struct vhost_net *net)
> struct socket *sock;
>
> mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
> - sock = vq->private_data;
> + sock = vhost_vq_get_backend_opaque(vq);
> if (!sock)
> goto out;
>
> @@ -1121,7 +1123,7 @@ static void handle_rx(struct vhost_net *net)
> int recv_pkts = 0;
>
> mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
> - sock = vq->private_data;
> + sock = vhost_vq_get_backend_opaque(vq);
> if (!sock)
> goto out;
>
> @@ -1344,9 +1346,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
> container_of(vq, struct vhost_net_virtqueue, vq);
>
> mutex_lock(&vq->mutex);
> - sock = vq->private_data;
> + sock = vhost_vq_get_backend_opaque(vq);
> vhost_net_disable_vq(n, vq);
> - vq->private_data = NULL;
> + vhost_vq_set_backend_opaque(vq, NULL);
> vhost_net_buf_unproduce(nvq);
> nvq->rx_ring = NULL;
> mutex_unlock(&vq->mutex);
> @@ -1528,7 +1530,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> }
>
> /* start polling new socket */
> - oldsock = vq->private_data;
> + oldsock = vhost_vq_get_backend_opaque(vq);
> if (sock != oldsock) {
> ubufs = vhost_net_ubuf_alloc(vq,
> sock && vhost_sock_zcopy(sock));
> @@ -1538,7 +1540,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> }
>
> vhost_net_disable_vq(n, vq);
> - vq->private_data = sock;
> + vhost_vq_set_backend_opaque(vq, sock);
> vhost_net_buf_unproduce(nvq);
> r = vhost_vq_init_access(vq);
> if (r)
> @@ -1575,7 +1577,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> return 0;
>
> err_used:
> - vq->private_data = oldsock;
> + vhost_vq_set_backend_opaque(vq, oldsock);
> vhost_net_enable_vq(n, vq);
> if (ubufs)
> vhost_net_ubuf_put_wait_and_free(ubufs);
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index a123fd70847e..0808188f7e8f 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -244,6 +244,34 @@ enum {
> (1ULL << VIRTIO_F_VERSION_1)
> };
>
> +/**
> + * vhost_vq_set_backend_opaque - Set backend opaque.
> + *
> + * @vq Virtqueue.
> + * @private_data The private data.
> + *
> + * Context: Need to call with vq->mutex acquired.
> + */
> +static inline void vhost_vq_set_backend_opaque(struct vhost_virtqueue *vq,
> + void *private_data)
> +{
> + vq->private_data = private_data;
> +}
> +
> +/**
> + * vhost_vq_get_backend_opaque - Get backend opaque.
> + *
> + * @vq Virtqueue.
> + * @private_data The private data.
> + *
> + * Context: Need to call with vq->mutex acquired.
> + * Return: Opaque previously set with vhost_vq_set_backend_opaque.
I prefer opaque -> private data in comments.
> + */
> +static inline void *vhost_vq_get_backend_opaque(struct vhost_virtqueue *vq)
> +{
> + return vq->private_data;
> +}
> +
> static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
> {
> return vq->acked_features & (1ULL << bit);
> diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> index c2d7d57e98cf..6e20dbe14acd 100644
> --- a/drivers/vhost/vsock.c
> +++ b/drivers/vhost/vsock.c
> @@ -91,7 +91,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
>
> mutex_lock(&vq->mutex);
>
> - if (!vq->private_data)
> + if (!vhost_vq_get_backend_opaque(vq))
> goto out;
>
> /* Avoid further vmexits, we're already processing the virtqueue */
> @@ -440,7 +440,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
>
> mutex_lock(&vq->mutex);
>
> - if (!vq->private_data)
> + if (!vhost_vq_get_backend_opaque(vq))
> goto out;
>
> vhost_disable_notify(&vsock->dev, vq);
> @@ -533,8 +533,8 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
> goto err_vq;
> }
>
> - if (!vq->private_data) {
> - vq->private_data = vsock;
> + if (!vhost_vq_get_backend_opaque(vq)) {
> + vhost_vq_set_backend_opaque(vq, vsock);
> ret = vhost_vq_init_access(vq);
> if (ret)
> goto err_vq;
> @@ -547,14 +547,14 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
> return 0;
>
> err_vq:
> - vq->private_data = NULL;
> + vhost_vq_set_backend_opaque(vq, NULL);
> mutex_unlock(&vq->mutex);
>
> for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
> vq = &vsock->vqs[i];
>
> mutex_lock(&vq->mutex);
> - vq->private_data = NULL;
> + vhost_vq_set_backend_opaque(vq, NULL);
> mutex_unlock(&vq->mutex);
> }
> err:
> @@ -577,7 +577,7 @@ static int vhost_vsock_stop(struct vhost_vsock *vsock)
> struct vhost_virtqueue *vq = &vsock->vqs[i];
>
> mutex_lock(&vq->mutex);
> - vq->private_data = NULL;
> + vhost_vq_set_backend_opaque(vq, NULL);
> mutex_unlock(&vq->mutex);
> }
>
> --
> 2.18.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data
2020-03-31 18:28 ` Michael S. Tsirkin
@ 2020-03-31 19:28 ` Eugenio Perez Martin
0 siblings, 0 replies; 13+ messages in thread
From: Eugenio Perez Martin @ 2020-03-31 19:28 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, kvm list, Halil Pasic,
virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
On Tue, Mar 31, 2020 at 8:29 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Tue, Mar 31, 2020 at 07:59:59PM +0200, Eugenio Pérez wrote:
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> > drivers/vhost/net.c | 28 +++++++++++++++-------------
> > drivers/vhost/vhost.h | 28 ++++++++++++++++++++++++++++
> > drivers/vhost/vsock.c | 14 +++++++-------
>
>
> Seems to be missing scsi and test.
Good point, changing them too!
>
>
> > 3 files changed, 50 insertions(+), 20 deletions(-)
> >
> > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> > index e158159671fa..6c5e7a6f712c 100644
> > --- a/drivers/vhost/net.c
> > +++ b/drivers/vhost/net.c
> > @@ -424,7 +424,7 @@ static void vhost_net_disable_vq(struct vhost_net *n,
> > struct vhost_net_virtqueue *nvq =
> > container_of(vq, struct vhost_net_virtqueue, vq);
> > struct vhost_poll *poll = n->poll + (nvq - n->vqs);
> > - if (!vq->private_data)
> > + if (!vhost_vq_get_backend_opaque(vq))
> > return;
> > vhost_poll_stop(poll);
> > }
> > @@ -437,7 +437,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
> > struct vhost_poll *poll = n->poll + (nvq - n->vqs);
> > struct socket *sock;
> >
> > - sock = vq->private_data;
> > + sock = vhost_vq_get_backend_opaque(vq);
> > if (!sock)
> > return 0;
> >
> > @@ -524,7 +524,7 @@ static void vhost_net_busy_poll(struct vhost_net *net,
> > return;
> >
> > vhost_disable_notify(&net->dev, vq);
> > - sock = rvq->private_data;
> > + sock = vhost_vq_get_backend_opaque(rvq);
> >
> > busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
> > tvq->busyloop_timeout;
> > @@ -570,8 +570,10 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
> >
> > if (r == tvq->num && tvq->busyloop_timeout) {
> > /* Flush batched packets first */
> > - if (!vhost_sock_zcopy(tvq->private_data))
> > - vhost_tx_batch(net, tnvq, tvq->private_data, msghdr);
> > + if (!vhost_sock_zcopy(vhost_vq_get_backend_opaque(tvq)))
> > + vhost_tx_batch(net, tnvq,
> > + vhost_vq_get_backend_opaque(tvq),
> > + msghdr);
> >
> > vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
> >
> > @@ -685,7 +687,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
> > struct vhost_virtqueue *vq = &nvq->vq;
> > struct vhost_net *net = container_of(vq->dev, struct vhost_net,
> > dev);
> > - struct socket *sock = vq->private_data;
> > + struct socket *sock = vhost_vq_get_backend_opaque(vq);
> > struct page_frag *alloc_frag = &net->page_frag;
> > struct virtio_net_hdr *gso;
> > struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
> > @@ -952,7 +954,7 @@ static void handle_tx(struct vhost_net *net)
> > struct socket *sock;
> >
> > mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
> > - sock = vq->private_data;
> > + sock = vhost_vq_get_backend_opaque(vq);
> > if (!sock)
> > goto out;
> >
> > @@ -1121,7 +1123,7 @@ static void handle_rx(struct vhost_net *net)
> > int recv_pkts = 0;
> >
> > mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
> > - sock = vq->private_data;
> > + sock = vhost_vq_get_backend_opaque(vq);
> > if (!sock)
> > goto out;
> >
> > @@ -1344,9 +1346,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
> > container_of(vq, struct vhost_net_virtqueue, vq);
> >
> > mutex_lock(&vq->mutex);
> > - sock = vq->private_data;
> > + sock = vhost_vq_get_backend_opaque(vq);
> > vhost_net_disable_vq(n, vq);
> > - vq->private_data = NULL;
> > + vhost_vq_set_backend_opaque(vq, NULL);
> > vhost_net_buf_unproduce(nvq);
> > nvq->rx_ring = NULL;
> > mutex_unlock(&vq->mutex);
> > @@ -1528,7 +1530,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> > }
> >
> > /* start polling new socket */
> > - oldsock = vq->private_data;
> > + oldsock = vhost_vq_get_backend_opaque(vq);
> > if (sock != oldsock) {
> > ubufs = vhost_net_ubuf_alloc(vq,
> > sock && vhost_sock_zcopy(sock));
> > @@ -1538,7 +1540,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> > }
> >
> > vhost_net_disable_vq(n, vq);
> > - vq->private_data = sock;
> > + vhost_vq_set_backend_opaque(vq, sock);
> > vhost_net_buf_unproduce(nvq);
> > r = vhost_vq_init_access(vq);
> > if (r)
> > @@ -1575,7 +1577,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> > return 0;
> >
> > err_used:
> > - vq->private_data = oldsock;
> > + vhost_vq_set_backend_opaque(vq, oldsock);
> > vhost_net_enable_vq(n, vq);
> > if (ubufs)
> > vhost_net_ubuf_put_wait_and_free(ubufs);
> > diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> > index a123fd70847e..0808188f7e8f 100644
> > --- a/drivers/vhost/vhost.h
> > +++ b/drivers/vhost/vhost.h
> > @@ -244,6 +244,34 @@ enum {
> > (1ULL << VIRTIO_F_VERSION_1)
> > };
> >
> > +/**
> > + * vhost_vq_set_backend_opaque - Set backend opaque.
> > + *
> > + * @vq Virtqueue.
> > + * @private_data The private data.
> > + *
> > + * Context: Need to call with vq->mutex acquired.
> > + */
> > +static inline void vhost_vq_set_backend_opaque(struct vhost_virtqueue *vq,
> > + void *private_data)
> > +{
> > + vq->private_data = private_data;
> > +}
> > +
> > +/**
> > + * vhost_vq_get_backend_opaque - Get backend opaque.
> > + *
> > + * @vq Virtqueue.
> > + * @private_data The private data.
> > + *
> > + * Context: Need to call with vq->mutex acquired.
> > + * Return: Opaque previously set with vhost_vq_set_backend_opaque.
>
>
> I prefer opaque -> private data in comments.
>
Changing.
v3 sent.
Thanks!
> > + */
>
>
>
>
> > +static inline void *vhost_vq_get_backend_opaque(struct vhost_virtqueue *vq)
> > +{
> > + return vq->private_data;
> > +}
> > +
> > static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
> > {
> > return vq->acked_features & (1ULL << bit);
> > diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> > index c2d7d57e98cf..6e20dbe14acd 100644
> > --- a/drivers/vhost/vsock.c
> > +++ b/drivers/vhost/vsock.c
> > @@ -91,7 +91,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
> >
> > mutex_lock(&vq->mutex);
> >
> > - if (!vq->private_data)
> > + if (!vhost_vq_get_backend_opaque(vq))
> > goto out;
> >
> > /* Avoid further vmexits, we're already processing the virtqueue */
> > @@ -440,7 +440,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
> >
> > mutex_lock(&vq->mutex);
> >
> > - if (!vq->private_data)
> > + if (!vhost_vq_get_backend_opaque(vq))
> > goto out;
> >
> > vhost_disable_notify(&vsock->dev, vq);
> > @@ -533,8 +533,8 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
> > goto err_vq;
> > }
> >
> > - if (!vq->private_data) {
> > - vq->private_data = vsock;
> > + if (!vhost_vq_get_backend_opaque(vq)) {
> > + vhost_vq_set_backend_opaque(vq, vsock);
> > ret = vhost_vq_init_access(vq);
> > if (ret)
> > goto err_vq;
> > @@ -547,14 +547,14 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
> > return 0;
> >
> > err_vq:
> > - vq->private_data = NULL;
> > + vhost_vq_set_backend_opaque(vq, NULL);
> > mutex_unlock(&vq->mutex);
> >
> > for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
> > vq = &vsock->vqs[i];
> >
> > mutex_lock(&vq->mutex);
> > - vq->private_data = NULL;
> > + vhost_vq_set_backend_opaque(vq, NULL);
> > mutex_unlock(&vq->mutex);
> > }
> > err:
> > @@ -577,7 +577,7 @@ static int vhost_vsock_stop(struct vhost_vsock *vsock)
> > struct vhost_virtqueue *vq = &vsock->vqs[i];
> >
> > mutex_lock(&vq->mutex);
> > - vq->private_data = NULL;
> > + vhost_vq_set_backend_opaque(vq, NULL);
> > mutex_unlock(&vq->mutex);
> > }
> >
> > --
> > 2.18.1
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data
2020-03-31 18:14 ` Michael S. Tsirkin
@ 2020-03-31 19:29 ` Eugenio Perez Martin
0 siblings, 0 replies; 13+ messages in thread
From: Eugenio Perez Martin @ 2020-03-31 19:29 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, kvm list, Halil Pasic,
virtualization@lists.linux-foundation.org,
Linux Next Mailing List, Cornelia Huck, Stephen Rothwell,
Christian Borntraeger
On Tue, Mar 31, 2020 at 8:14 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Tue, Mar 31, 2020 at 07:59:59PM +0200, Eugenio Pérez wrote:
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> > drivers/vhost/net.c | 28 +++++++++++++++-------------
> > drivers/vhost/vhost.h | 28 ++++++++++++++++++++++++++++
> > drivers/vhost/vsock.c | 14 +++++++-------
> > 3 files changed, 50 insertions(+), 20 deletions(-)
> >
> > diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
> > index e158159671fa..6c5e7a6f712c 100644
> > --- a/drivers/vhost/net.c
> > +++ b/drivers/vhost/net.c
> > @@ -424,7 +424,7 @@ static void vhost_net_disable_vq(struct vhost_net *n,
> > struct vhost_net_virtqueue *nvq =
> > container_of(vq, struct vhost_net_virtqueue, vq);
> > struct vhost_poll *poll = n->poll + (nvq - n->vqs);
> > - if (!vq->private_data)
> > + if (!vhost_vq_get_backend_opaque(vq))
> > return;
> > vhost_poll_stop(poll);
> > }
> > @@ -437,7 +437,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
> > struct vhost_poll *poll = n->poll + (nvq - n->vqs);
> > struct socket *sock;
> >
> > - sock = vq->private_data;
> > + sock = vhost_vq_get_backend_opaque(vq);
> > if (!sock)
> > return 0;
> >
> > @@ -524,7 +524,7 @@ static void vhost_net_busy_poll(struct vhost_net *net,
> > return;
> >
> > vhost_disable_notify(&net->dev, vq);
> > - sock = rvq->private_data;
> > + sock = vhost_vq_get_backend_opaque(rvq);
> >
> > busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
> > tvq->busyloop_timeout;
> > @@ -570,8 +570,10 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
> >
> > if (r == tvq->num && tvq->busyloop_timeout) {
> > /* Flush batched packets first */
> > - if (!vhost_sock_zcopy(tvq->private_data))
> > - vhost_tx_batch(net, tnvq, tvq->private_data, msghdr);
> > + if (!vhost_sock_zcopy(vhost_vq_get_backend_opaque(tvq)))
> > + vhost_tx_batch(net, tnvq,
> > + vhost_vq_get_backend_opaque(tvq),
> > + msghdr);
> >
> > vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
> >
> > @@ -685,7 +687,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
> > struct vhost_virtqueue *vq = &nvq->vq;
> > struct vhost_net *net = container_of(vq->dev, struct vhost_net,
> > dev);
> > - struct socket *sock = vq->private_data;
> > + struct socket *sock = vhost_vq_get_backend_opaque(vq);
> > struct page_frag *alloc_frag = &net->page_frag;
> > struct virtio_net_hdr *gso;
> > struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
> > @@ -952,7 +954,7 @@ static void handle_tx(struct vhost_net *net)
> > struct socket *sock;
> >
> > mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
> > - sock = vq->private_data;
> > + sock = vhost_vq_get_backend_opaque(vq);
> > if (!sock)
> > goto out;
> >
> > @@ -1121,7 +1123,7 @@ static void handle_rx(struct vhost_net *net)
> > int recv_pkts = 0;
> >
> > mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
> > - sock = vq->private_data;
> > + sock = vhost_vq_get_backend_opaque(vq);
> > if (!sock)
> > goto out;
> >
> > @@ -1344,9 +1346,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
> > container_of(vq, struct vhost_net_virtqueue, vq);
> >
> > mutex_lock(&vq->mutex);
> > - sock = vq->private_data;
> > + sock = vhost_vq_get_backend_opaque(vq);
> > vhost_net_disable_vq(n, vq);
> > - vq->private_data = NULL;
> > + vhost_vq_set_backend_opaque(vq, NULL);
> > vhost_net_buf_unproduce(nvq);
> > nvq->rx_ring = NULL;
> > mutex_unlock(&vq->mutex);
> > @@ -1528,7 +1530,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> > }
> >
> > /* start polling new socket */
> > - oldsock = vq->private_data;
> > + oldsock = vhost_vq_get_backend_opaque(vq);
> > if (sock != oldsock) {
> > ubufs = vhost_net_ubuf_alloc(vq,
> > sock && vhost_sock_zcopy(sock));
> > @@ -1538,7 +1540,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> > }
> >
> > vhost_net_disable_vq(n, vq);
> > - vq->private_data = sock;
> > + vhost_vq_set_backend_opaque(vq, sock);
> > vhost_net_buf_unproduce(nvq);
> > r = vhost_vq_init_access(vq);
> > if (r)
> > @@ -1575,7 +1577,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
> > return 0;
> >
> > err_used:
> > - vq->private_data = oldsock;
> > + vhost_vq_set_backend_opaque(vq, oldsock);
> > vhost_net_enable_vq(n, vq);
> > if (ubufs)
> > vhost_net_ubuf_put_wait_and_free(ubufs);
> > diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> > index a123fd70847e..0808188f7e8f 100644
> > --- a/drivers/vhost/vhost.h
> > +++ b/drivers/vhost/vhost.h
> > @@ -244,6 +244,34 @@ enum {
> > (1ULL << VIRTIO_F_VERSION_1)
> > };
> >
> > +/**
> > + * vhost_vq_set_backend_opaque - Set backend opaque.
> > + *
> > + * @vq Virtqueue.
> > + * @private_data The private data.
> > + *
> > + * Context: Need to call with vq->mutex acquired.
> > + */
> > +static inline void vhost_vq_set_backend_opaque(struct vhost_virtqueue *vq,
> > + void *private_data)
> > +{
> > + vq->private_data = private_data;
> > +}
> > +
> > +/**
> > + * vhost_vq_get_backend_opaque - Get backend opaque.
> > + *
> > + * @vq Virtqueue.
> > + * @private_data The private data.
> > + *
> > + * Context: Need to call with vq->mutex acquired.
> > + * Return: Opaque previously set with vhost_vq_set_backend_opaque.
> > + */
> > +static inline void *vhost_vq_get_backend_opaque(struct vhost_virtqueue *vq)
> > +{
> > + return vq->private_data;
> > +}
> > +
> > static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
> > {
> > return vq->acked_features & (1ULL << bit);
>
>
> I think I prefer vhost_vq_get_backend and vhost_vq_set_backend.
>
> "opaque" just means that it's void * that is clear from the signature
> anyway.
>
I agree. Changed in sent v3.
Thanks!
>
> > diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
> > index c2d7d57e98cf..6e20dbe14acd 100644
> > --- a/drivers/vhost/vsock.c
> > +++ b/drivers/vhost/vsock.c
> > @@ -91,7 +91,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
> >
> > mutex_lock(&vq->mutex);
> >
> > - if (!vq->private_data)
> > + if (!vhost_vq_get_backend_opaque(vq))
> > goto out;
> >
> > /* Avoid further vmexits, we're already processing the virtqueue */
> > @@ -440,7 +440,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
> >
> > mutex_lock(&vq->mutex);
> >
> > - if (!vq->private_data)
> > + if (!vhost_vq_get_backend_opaque(vq))
> > goto out;
> >
> > vhost_disable_notify(&vsock->dev, vq);
> > @@ -533,8 +533,8 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
> > goto err_vq;
> > }
> >
> > - if (!vq->private_data) {
> > - vq->private_data = vsock;
> > + if (!vhost_vq_get_backend_opaque(vq)) {
> > + vhost_vq_set_backend_opaque(vq, vsock);
> > ret = vhost_vq_init_access(vq);
> > if (ret)
> > goto err_vq;
> > @@ -547,14 +547,14 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
> > return 0;
> >
> > err_vq:
> > - vq->private_data = NULL;
> > + vhost_vq_set_backend_opaque(vq, NULL);
> > mutex_unlock(&vq->mutex);
> >
> > for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
> > vq = &vsock->vqs[i];
> >
> > mutex_lock(&vq->mutex);
> > - vq->private_data = NULL;
> > + vhost_vq_set_backend_opaque(vq, NULL);
> > mutex_unlock(&vq->mutex);
> > }
> > err:
> > @@ -577,7 +577,7 @@ static int vhost_vsock_stop(struct vhost_vsock *vsock)
> > struct vhost_virtqueue *vq = &vsock->vqs[i];
> >
> > mutex_lock(&vq->mutex);
> > - vq->private_data = NULL;
> > + vhost_vq_set_backend_opaque(vq, NULL);
> > mutex_unlock(&vq->mutex);
> > }
> >
> > --
> > 2.18.1
>
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2020-03-31 19:29 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-03-31 17:59 [PATCH v2 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
2020-03-31 17:59 ` [PATCH v2 1/8] vhost: Create accessors for virtqueues private_data Eugenio Pérez
2020-03-31 18:14 ` Michael S. Tsirkin
2020-03-31 19:29 ` Eugenio Perez Martin
2020-03-31 18:28 ` Michael S. Tsirkin
2020-03-31 19:28 ` Eugenio Perez Martin
2020-03-31 18:00 ` [PATCH v2 2/8] vhost: option to fetch descriptors through an independent struct Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 3/8] vhost: use batched version by default Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 4/8] vhost: batching fetches Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 5/8] tools/virtio: Add --batch option Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 6/8] tools/virtio: Add --batch=random option Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 7/8] tools/virtio: Add --reset=random Eugenio Pérez
2020-03-31 18:00 ` [PATCH v2 8/8] tools/virtio: Make --reset reset ring idx Eugenio Pérez
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).