* [PATCH v3 1/8] vhost: Create accessors for virtqueues private_data
2020-03-31 19:27 [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
@ 2020-03-31 19:27 ` Eugenio Pérez
2020-03-31 19:27 ` [PATCH v3 2/8] vhost: option to fetch descriptors through an independent struct Eugenio Pérez
` (8 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Eugenio Pérez @ 2020-03-31 19:27 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Stephen Rothwell, kvm list,
Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Eugenio Pérez,
Christian Borntraeger, Halil Pasic, Cornelia Huck
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/vhost/net.c | 28 +++++++++++++++-------------
drivers/vhost/scsi.c | 14 +++++++-------
drivers/vhost/test.c | 10 +++++-----
drivers/vhost/vhost.h | 27 +++++++++++++++++++++++++++
drivers/vhost/vsock.c | 14 +++++++-------
5 files changed, 61 insertions(+), 32 deletions(-)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e158159671fa..6ad1612f496e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -424,7 +424,7 @@ static void vhost_net_disable_vq(struct vhost_net *n,
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
return;
vhost_poll_stop(poll);
}
@@ -437,7 +437,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock;
- sock = vq->private_data;
+ sock = vhost_vq_get_backend(vq);
if (!sock)
return 0;
@@ -524,7 +524,7 @@ static void vhost_net_busy_poll(struct vhost_net *net,
return;
vhost_disable_notify(&net->dev, vq);
- sock = rvq->private_data;
+ sock = vhost_vq_get_backend(rvq);
busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
tvq->busyloop_timeout;
@@ -570,8 +570,10 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
if (r == tvq->num && tvq->busyloop_timeout) {
/* Flush batched packets first */
- if (!vhost_sock_zcopy(tvq->private_data))
- vhost_tx_batch(net, tnvq, tvq->private_data, msghdr);
+ if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq)))
+ vhost_tx_batch(net, tnvq,
+ vhost_vq_get_backend(tvq),
+ msghdr);
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
@@ -685,7 +687,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_virtqueue *vq = &nvq->vq;
struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev);
- struct socket *sock = vq->private_data;
+ struct socket *sock = vhost_vq_get_backend(vq);
struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
@@ -952,7 +954,7 @@ static void handle_tx(struct vhost_net *net)
struct socket *sock;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
- sock = vq->private_data;
+ sock = vhost_vq_get_backend(vq);
if (!sock)
goto out;
@@ -1121,7 +1123,7 @@ static void handle_rx(struct vhost_net *net)
int recv_pkts = 0;
mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
- sock = vq->private_data;
+ sock = vhost_vq_get_backend(vq);
if (!sock)
goto out;
@@ -1344,9 +1346,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
container_of(vq, struct vhost_net_virtqueue, vq);
mutex_lock(&vq->mutex);
- sock = vq->private_data;
+ sock = vhost_vq_get_backend(vq);
vhost_net_disable_vq(n, vq);
- vq->private_data = NULL;
+ vhost_vq_set_backend(vq, NULL);
vhost_net_buf_unproduce(nvq);
nvq->rx_ring = NULL;
mutex_unlock(&vq->mutex);
@@ -1528,7 +1530,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
/* start polling new socket */
- oldsock = vq->private_data;
+ oldsock = vhost_vq_get_backend(vq);
if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock));
@@ -1538,7 +1540,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
vhost_net_disable_vq(n, vq);
- vq->private_data = sock;
+ vhost_vq_set_backend(vq, sock);
vhost_net_buf_unproduce(nvq);
r = vhost_vq_init_access(vq);
if (r)
@@ -1575,7 +1577,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
return 0;
err_used:
- vq->private_data = oldsock;
+ vhost_vq_set_backend(vq, oldsock);
vhost_net_enable_vq(n, vq);
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0b949a14bce3..4b5deeeabc3d 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -452,7 +452,7 @@ vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
unsigned out, in;
int head, ret;
- if (!vq->private_data) {
+ if (!vhost_vq_get_backend(vq)) {
vs->vs_events_missed = true;
return;
}
@@ -892,7 +892,7 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
} else {
struct vhost_scsi_tpg **vs_tpg, *tpg;
- vs_tpg = vq->private_data; /* validated at handler entry */
+ vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
tpg = READ_ONCE(vs_tpg[*vc->target]);
if (unlikely(!tpg)) {
@@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl.
*/
- vs_tpg = vq->private_data;
+ vs_tpg = vhost_vq_get_backend(vq);
if (!vs_tpg)
goto out;
@@ -1184,7 +1184,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl.
*/
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
goto out;
memset(&vc, 0, sizeof(vc));
@@ -1322,7 +1322,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
mutex_lock(&vq->mutex);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
goto out;
if (vs->vs_events_missed)
@@ -1460,7 +1460,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
- vq->private_data = vs_tpg;
+ vhost_vq_set_backend(vq, vs_tpg);
vhost_vq_init_access(vq);
mutex_unlock(&vq->mutex);
}
@@ -1547,7 +1547,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
- vq->private_data = NULL;
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
}
}
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index e37c92d4d7ad..394e2e5c772d 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -49,7 +49,7 @@ static void handle_vq(struct vhost_test *n)
void *private;
mutex_lock(&vq->mutex);
- private = vq->private_data;
+ private = vhost_vq_get_backend(vq);
if (!private) {
mutex_unlock(&vq->mutex);
return;
@@ -133,8 +133,8 @@ static void *vhost_test_stop_vq(struct vhost_test *n,
void *private;
mutex_lock(&vq->mutex);
- private = vq->private_data;
- vq->private_data = NULL;
+ private = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
return private;
}
@@ -198,8 +198,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
priv = test ? n : NULL;
/* start polling new socket */
- oldpriv = vq->private_data;
- vq->private_data = priv;
+ oldpriv = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, priv);
r = vhost_vq_init_access(&n->vqs[index]);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a123fd70847e..b77203e0cae2 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -244,6 +244,33 @@ enum {
(1ULL << VIRTIO_F_VERSION_1)
};
+/**
+ * vhost_vq_set_backend - Set backend.
+ *
+ * @vq Virtqueue.
+ * @private_data The private data.
+ *
+ * Context: Need to call with vq->mutex acquired.
+ */
+static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
+ void *private_data)
+{
+ vq->private_data = private_data;
+}
+
+/**
+ * vhost_vq_get_backend - Get backend.
+ *
+ * @vq Virtqueue.
+ *
+ * Context: Need to call with vq->mutex acquired.
+ * Return: Private data previously set with vhost_vq_set_backend.
+ */
+static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
+{
+ return vq->private_data;
+}
+
static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
{
return vq->acked_features & (1ULL << bit);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index c2d7d57e98cf..6c1fbfd621a2 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -91,7 +91,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
mutex_lock(&vq->mutex);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
goto out;
/* Avoid further vmexits, we're already processing the virtqueue */
@@ -440,7 +440,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
mutex_lock(&vq->mutex);
- if (!vq->private_data)
+ if (!vhost_vq_get_backend(vq))
goto out;
vhost_disable_notify(&vsock->dev, vq);
@@ -533,8 +533,8 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
goto err_vq;
}
- if (!vq->private_data) {
- vq->private_data = vsock;
+ if (!vhost_vq_get_backend(vq)) {
+ vhost_vq_set_backend(vq, vsock);
ret = vhost_vq_init_access(vq);
if (ret)
goto err_vq;
@@ -547,14 +547,14 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
return 0;
err_vq:
- vq->private_data = NULL;
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
- vq->private_data = NULL;
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
}
err:
@@ -577,7 +577,7 @@ static int vhost_vsock_stop(struct vhost_vsock *vsock)
struct vhost_virtqueue *vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
- vq->private_data = NULL;
+ vhost_vq_set_backend(vq, NULL);
mutex_unlock(&vq->mutex);
}
--
2.18.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v3 2/8] vhost: option to fetch descriptors through an independent struct
2020-03-31 19:27 [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
2020-03-31 19:27 ` [PATCH v3 1/8] vhost: Create accessors for virtqueues private_data Eugenio Pérez
@ 2020-03-31 19:27 ` Eugenio Pérez
2020-03-31 19:27 ` [PATCH v3 3/8] vhost: use batched version by default Eugenio Pérez
` (7 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Eugenio Pérez @ 2020-03-31 19:27 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Stephen Rothwell, kvm list,
Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Eugenio Pérez,
Christian Borntraeger, Halil Pasic, Cornelia Huck
From: "Michael S. Tsirkin" <mst@redhat.com>
The idea is to support multiple ring formats by converting
to a format-independent array of descriptors.
This costs extra cycles, but we gain in ability
to fetch a batch of descriptors in one go, which
is good for code cache locality.
When used, this causes a minor performance degradation,
it's been kept as simple as possible for ease of review.
A follow-up patch gets us back the performance by adding batching.
To simplify benchmarking, I kept the old code around so one can switch
back and forth between old and new code. This will go away in the final
submission.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/vhost/vhost.c | 297 +++++++++++++++++++++++++++++++++++++++++-
drivers/vhost/vhost.h | 16 +++
2 files changed, 312 insertions(+), 1 deletion(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f44340b41494..38e65a36465b 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -302,6 +302,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
vq->num = 1;
+ vq->ndescs = 0;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
@@ -372,6 +373,9 @@ static int vhost_worker(void *data)
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
+ kfree(vq->descs);
+ vq->descs = NULL;
+ vq->max_descs = 0;
kfree(vq->indirect);
vq->indirect = NULL;
kfree(vq->log);
@@ -388,6 +392,10 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
+ vq->max_descs = dev->iov_limit;
+ vq->descs = kmalloc_array(vq->max_descs,
+ sizeof(*vq->descs),
+ GFP_KERNEL);
vq->indirect = kmalloc_array(UIO_MAXIOV,
sizeof(*vq->indirect),
GFP_KERNEL);
@@ -395,7 +403,7 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
GFP_KERNEL);
vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
GFP_KERNEL);
- if (!vq->indirect || !vq->log || !vq->heads)
+ if (!vq->indirect || !vq->log || !vq->heads || !vq->descs)
goto err_nomem;
}
return 0;
@@ -2352,6 +2360,293 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
}
EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
+static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq)
+{
+ BUG_ON(!vq->ndescs);
+ return &vq->descs[vq->ndescs - 1];
+}
+
+static void pop_split_desc(struct vhost_virtqueue *vq)
+{
+ BUG_ON(!vq->ndescs);
+ --vq->ndescs;
+}
+
+#define VHOST_DESC_FLAGS (VRING_DESC_F_INDIRECT | VRING_DESC_F_WRITE | \
+ VRING_DESC_F_NEXT)
+static int push_split_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, u16 id)
+{
+ struct vhost_desc *h;
+
+ if (unlikely(vq->ndescs >= vq->max_descs))
+ return -EINVAL;
+ h = &vq->descs[vq->ndescs++];
+ h->addr = vhost64_to_cpu(vq, desc->addr);
+ h->len = vhost32_to_cpu(vq, desc->len);
+ h->flags = vhost16_to_cpu(vq, desc->flags) & VHOST_DESC_FLAGS;
+ h->id = id;
+
+ return 0;
+}
+
+static int fetch_indirect_descs(struct vhost_virtqueue *vq,
+ struct vhost_desc *indirect,
+ u16 head)
+{
+ struct vring_desc desc;
+ unsigned int i = 0, count, found = 0;
+ u32 len = indirect->len;
+ struct iov_iter from;
+ int ret;
+
+ /* Sanity check */
+ if (unlikely(len % sizeof desc)) {
+ vq_err(vq, "Invalid length in indirect descriptor: "
+ "len 0x%llx not multiple of 0x%zx\n",
+ (unsigned long long)len,
+ sizeof desc);
+ return -EINVAL;
+ }
+
+ ret = translate_desc(vq, indirect->addr, len, vq->indirect,
+ UIO_MAXIOV, VHOST_ACCESS_RO);
+ if (unlikely(ret < 0)) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d in indirect.\n", ret);
+ return ret;
+ }
+ iov_iter_init(&from, READ, vq->indirect, ret, len);
+
+ /* We will use the result as an address to read from, so most
+ * architectures only need a compiler barrier here. */
+ read_barrier_depends();
+
+ count = len / sizeof desc;
+ /* Buffers are chained via a 16 bit next field, so
+ * we can have at most 2^16 of these. */
+ if (unlikely(count > USHRT_MAX + 1)) {
+ vq_err(vq, "Indirect buffer length too big: %d\n",
+ indirect->len);
+ return -E2BIG;
+ }
+ if (unlikely(vq->ndescs + count > vq->max_descs)) {
+ vq_err(vq, "Too many indirect + direct descs: %d + %d\n",
+ vq->ndescs, indirect->len);
+ return -E2BIG;
+ }
+
+ do {
+ if (unlikely(++found > count)) {
+ vq_err(vq, "Loop detected: last one at %u "
+ "indirect size %u\n",
+ i, count);
+ return -EINVAL;
+ }
+ if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
+ vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
+ i, (size_t)indirect->addr + i * sizeof desc);
+ return -EINVAL;
+ }
+ if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
+ vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
+ i, (size_t)indirect->addr + i * sizeof desc);
+ return -EINVAL;
+ }
+
+ push_split_desc(vq, &desc, head);
+ } while ((i = next_desc(vq, &desc)) != -1);
+ return 0;
+}
+
+static int fetch_descs(struct vhost_virtqueue *vq)
+{
+ unsigned int i, head, found = 0;
+ struct vhost_desc *last;
+ struct vring_desc desc;
+ __virtio16 avail_idx;
+ __virtio16 ring_head;
+ u16 last_avail_idx;
+ int ret;
+
+ /* Check it isn't doing very strange things with descriptor numbers. */
+ last_avail_idx = vq->last_avail_idx;
+
+ if (vq->avail_idx == vq->last_avail_idx) {
+ if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
+ vq_err(vq, "Failed to access avail idx at %p\n",
+ &vq->avail->idx);
+ return -EFAULT;
+ }
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+
+ if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
+ vq_err(vq, "Guest moved used index from %u to %u",
+ last_avail_idx, vq->avail_idx);
+ return -EFAULT;
+ }
+
+ /* If there's nothing new since last we looked, return
+ * invalid.
+ */
+ if (vq->avail_idx == last_avail_idx)
+ return vq->num;
+
+ /* Only get avail ring entries after they have been
+ * exposed by guest.
+ */
+ smp_rmb();
+ }
+
+ /* Grab the next descriptor number they're advertising */
+ if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
+ vq_err(vq, "Failed to read head: idx %d address %p\n",
+ last_avail_idx,
+ &vq->avail->ring[last_avail_idx % vq->num]);
+ return -EFAULT;
+ }
+
+ head = vhost16_to_cpu(vq, ring_head);
+
+ /* If their number is silly, that's an error. */
+ if (unlikely(head >= vq->num)) {
+ vq_err(vq, "Guest says index %u > %u is available",
+ head, vq->num);
+ return -EINVAL;
+ }
+
+ i = head;
+ do {
+ if (unlikely(i >= vq->num)) {
+ vq_err(vq, "Desc index is %u > %u, head = %u",
+ i, vq->num, head);
+ return -EINVAL;
+ }
+ if (unlikely(++found > vq->num)) {
+ vq_err(vq, "Loop detected: last one at %u "
+ "vq size %u head %u\n",
+ i, vq->num, head);
+ return -EINVAL;
+ }
+ ret = vhost_get_desc(vq, &desc, i);
+ if (unlikely(ret)) {
+ vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
+ i, vq->desc + i);
+ return -EFAULT;
+ }
+ ret = push_split_desc(vq, &desc, head);
+ if (unlikely(ret)) {
+ vq_err(vq, "Failed to save descriptor: idx %d\n", i);
+ return -EINVAL;
+ }
+ } while ((i = next_desc(vq, &desc)) != -1);
+
+ last = peek_split_desc(vq);
+ if (unlikely(last->flags & VRING_DESC_F_INDIRECT)) {
+ pop_split_desc(vq);
+ ret = fetch_indirect_descs(vq, last, head);
+ if (unlikely(ret < 0)) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "Failure detected "
+ "in indirect descriptor at idx %d\n", head);
+ return ret;
+ }
+ }
+
+ /* Assume notifications from guest are disabled at this point,
+ * if they aren't we would need to update avail_event index. */
+ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
+
+ /* On success, increment avail index. */
+ vq->last_avail_idx++;
+
+ return 0;
+}
+
+/* This looks in the virtqueue and for the first available buffer, and converts
+ * it to an iovec for convenient access. Since descriptors consist of some
+ * number of output then some number of input descriptors, it's actually two
+ * iovecs, but we pack them into one and note how many of each there were.
+ *
+ * This function returns the descriptor number found, or vq->num (which is
+ * never a valid descriptor number) if none was found. A negative code is
+ * returned on error. */
+int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num)
+{
+ int ret = fetch_descs(vq);
+ int i;
+
+ if (ret)
+ return ret;
+
+ /* Now convert to IOV */
+ /* When we start there are none of either input nor output. */
+ *out_num = *in_num = 0;
+ if (unlikely(log))
+ *log_num = 0;
+
+ for (i = 0; i < vq->ndescs; ++i) {
+ unsigned iov_count = *in_num + *out_num;
+ struct vhost_desc *desc = &vq->descs[i];
+ int access;
+
+ if (desc->flags & ~VHOST_DESC_FLAGS) {
+ vq_err(vq, "Unexpected flags: 0x%x at descriptor id 0x%x\n",
+ desc->flags, desc->id);
+ ret = -EINVAL;
+ goto err;
+ }
+ if (desc->flags & VRING_DESC_F_WRITE)
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
+ ret = translate_desc(vq, desc->addr,
+ desc->len, iov + iov_count,
+ iov_size - iov_count, access);
+ if (unlikely(ret < 0)) {
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d descriptor idx %d\n",
+ ret, i);
+ goto err;
+ }
+ if (access == VHOST_ACCESS_WO) {
+ /* If this is an input descriptor,
+ * increment that count. */
+ *in_num += ret;
+ if (unlikely(log && ret)) {
+ log[*log_num].addr = desc->addr;
+ log[*log_num].len = desc->len;
+ ++*log_num;
+ }
+ } else {
+ /* If it's an output descriptor, they're all supposed
+ * to come before any input descriptors. */
+ if (unlikely(*in_num)) {
+ vq_err(vq, "Descriptor has out after in: "
+ "idx %d\n", i);
+ ret = -EINVAL;
+ goto err;
+ }
+ *out_num += ret;
+ }
+
+ ret = desc->id;
+ }
+
+ vq->ndescs = 0;
+
+ return ret;
+
+err:
+ vhost_discard_vq_desc(vq, 1);
+ vq->ndescs = 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_get_vq_desc_batch);
+
/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
{
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index b77203e0cae2..a6fc11797a8a 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -80,6 +80,13 @@ enum vhost_uaddr_type {
VHOST_NUM_ADDRS = 3,
};
+struct vhost_desc {
+ u64 addr;
+ u32 len;
+ u16 flags; /* VRING_DESC_F_WRITE, VRING_DESC_F_NEXT */
+ u16 id;
+};
+
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -90,6 +97,11 @@ struct vhost_virtqueue {
struct vring_desc __user *desc;
struct vring_avail __user *avail;
struct vring_used __user *used;
+
+ struct vhost_desc *descs;
+ int ndescs;
+ int max_descs;
+
const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick;
struct eventfd_ctx *call_ctx;
@@ -191,6 +203,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
bool vhost_log_access_ok(struct vhost_dev *);
+int vhost_get_vq_desc_batch(struct vhost_virtqueue *,
+ struct iovec iov[], unsigned int iov_count,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num);
int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_count,
unsigned int *out_num, unsigned int *in_num,
--
2.18.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v3 3/8] vhost: use batched version by default
2020-03-31 19:27 [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
2020-03-31 19:27 ` [PATCH v3 1/8] vhost: Create accessors for virtqueues private_data Eugenio Pérez
2020-03-31 19:27 ` [PATCH v3 2/8] vhost: option to fetch descriptors through an independent struct Eugenio Pérez
@ 2020-03-31 19:27 ` Eugenio Pérez
2020-03-31 19:28 ` [PATCH v3 4/8] vhost: batching fetches Eugenio Pérez
` (6 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Eugenio Pérez @ 2020-03-31 19:27 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Stephen Rothwell, kvm list,
Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Eugenio Pérez,
Christian Borntraeger, Halil Pasic, Cornelia Huck
From: "Michael S. Tsirkin" <mst@redhat.com>
As testing shows no performance change, switch to that now.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/vhost/vhost.c | 251 +-----------------------------------------
drivers/vhost/vhost.h | 4 -
2 files changed, 2 insertions(+), 253 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 38e65a36465b..56c5253056ee 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2113,253 +2113,6 @@ static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
return next;
}
-static int get_indirect(struct vhost_virtqueue *vq,
- struct iovec iov[], unsigned int iov_size,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num,
- struct vring_desc *indirect)
-{
- struct vring_desc desc;
- unsigned int i = 0, count, found = 0;
- u32 len = vhost32_to_cpu(vq, indirect->len);
- struct iov_iter from;
- int ret, access;
-
- /* Sanity check */
- if (unlikely(len % sizeof desc)) {
- vq_err(vq, "Invalid length in indirect descriptor: "
- "len 0x%llx not multiple of 0x%zx\n",
- (unsigned long long)len,
- sizeof desc);
- return -EINVAL;
- }
-
- ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
- UIO_MAXIOV, VHOST_ACCESS_RO);
- if (unlikely(ret < 0)) {
- if (ret != -EAGAIN)
- vq_err(vq, "Translation failure %d in indirect.\n", ret);
- return ret;
- }
- iov_iter_init(&from, READ, vq->indirect, ret, len);
-
- /* We will use the result as an address to read from, so most
- * architectures only need a compiler barrier here. */
- read_barrier_depends();
-
- count = len / sizeof desc;
- /* Buffers are chained via a 16 bit next field, so
- * we can have at most 2^16 of these. */
- if (unlikely(count > USHRT_MAX + 1)) {
- vq_err(vq, "Indirect buffer length too big: %d\n",
- indirect->len);
- return -E2BIG;
- }
-
- do {
- unsigned iov_count = *in_num + *out_num;
- if (unlikely(++found > count)) {
- vq_err(vq, "Loop detected: last one at %u "
- "indirect size %u\n",
- i, count);
- return -EINVAL;
- }
- if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
- vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
- i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
- return -EINVAL;
- }
- if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
- vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
- i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
- return -EINVAL;
- }
-
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
- access = VHOST_ACCESS_WO;
- else
- access = VHOST_ACCESS_RO;
-
- ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
- vhost32_to_cpu(vq, desc.len), iov + iov_count,
- iov_size - iov_count, access);
- if (unlikely(ret < 0)) {
- if (ret != -EAGAIN)
- vq_err(vq, "Translation failure %d indirect idx %d\n",
- ret, i);
- return ret;
- }
- /* If this is an input descriptor, increment that count. */
- if (access == VHOST_ACCESS_WO) {
- *in_num += ret;
- if (unlikely(log && ret)) {
- log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
- log[*log_num].len = vhost32_to_cpu(vq, desc.len);
- ++*log_num;
- }
- } else {
- /* If it's an output descriptor, they're all supposed
- * to come before any input descriptors. */
- if (unlikely(*in_num)) {
- vq_err(vq, "Indirect descriptor "
- "has out after in: idx %d\n", i);
- return -EINVAL;
- }
- *out_num += ret;
- }
- } while ((i = next_desc(vq, &desc)) != -1);
- return 0;
-}
-
-/* This looks in the virtqueue and for the first available buffer, and converts
- * it to an iovec for convenient access. Since descriptors consist of some
- * number of output then some number of input descriptors, it's actually two
- * iovecs, but we pack them into one and note how many of each there were.
- *
- * This function returns the descriptor number found, or vq->num (which is
- * never a valid descriptor number) if none was found. A negative code is
- * returned on error. */
-int vhost_get_vq_desc(struct vhost_virtqueue *vq,
- struct iovec iov[], unsigned int iov_size,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num)
-{
- struct vring_desc desc;
- unsigned int i, head, found = 0;
- u16 last_avail_idx;
- __virtio16 avail_idx;
- __virtio16 ring_head;
- int ret, access;
-
- /* Check it isn't doing very strange things with descriptor numbers. */
- last_avail_idx = vq->last_avail_idx;
-
- if (vq->avail_idx == vq->last_avail_idx) {
- if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
- vq_err(vq, "Failed to access avail idx at %p\n",
- &vq->avail->idx);
- return -EFAULT;
- }
- vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
-
- if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
- vq_err(vq, "Guest moved used index from %u to %u",
- last_avail_idx, vq->avail_idx);
- return -EFAULT;
- }
-
- /* If there's nothing new since last we looked, return
- * invalid.
- */
- if (vq->avail_idx == last_avail_idx)
- return vq->num;
-
- /* Only get avail ring entries after they have been
- * exposed by guest.
- */
- smp_rmb();
- }
-
- /* Grab the next descriptor number they're advertising, and increment
- * the index we've seen. */
- if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) {
- vq_err(vq, "Failed to read head: idx %d address %p\n",
- last_avail_idx,
- &vq->avail->ring[last_avail_idx % vq->num]);
- return -EFAULT;
- }
-
- head = vhost16_to_cpu(vq, ring_head);
-
- /* If their number is silly, that's an error. */
- if (unlikely(head >= vq->num)) {
- vq_err(vq, "Guest says index %u > %u is available",
- head, vq->num);
- return -EINVAL;
- }
-
- /* When we start there are none of either input nor output. */
- *out_num = *in_num = 0;
- if (unlikely(log))
- *log_num = 0;
-
- i = head;
- do {
- unsigned iov_count = *in_num + *out_num;
- if (unlikely(i >= vq->num)) {
- vq_err(vq, "Desc index is %u > %u, head = %u",
- i, vq->num, head);
- return -EINVAL;
- }
- if (unlikely(++found > vq->num)) {
- vq_err(vq, "Loop detected: last one at %u "
- "vq size %u head %u\n",
- i, vq->num, head);
- return -EINVAL;
- }
- ret = vhost_get_desc(vq, &desc, i);
- if (unlikely(ret)) {
- vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
- i, vq->desc + i);
- return -EFAULT;
- }
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
- ret = get_indirect(vq, iov, iov_size,
- out_num, in_num,
- log, log_num, &desc);
- if (unlikely(ret < 0)) {
- if (ret != -EAGAIN)
- vq_err(vq, "Failure detected "
- "in indirect descriptor at idx %d\n", i);
- return ret;
- }
- continue;
- }
-
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
- access = VHOST_ACCESS_WO;
- else
- access = VHOST_ACCESS_RO;
- ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
- vhost32_to_cpu(vq, desc.len), iov + iov_count,
- iov_size - iov_count, access);
- if (unlikely(ret < 0)) {
- if (ret != -EAGAIN)
- vq_err(vq, "Translation failure %d descriptor idx %d\n",
- ret, i);
- return ret;
- }
- if (access == VHOST_ACCESS_WO) {
- /* If this is an input descriptor,
- * increment that count. */
- *in_num += ret;
- if (unlikely(log && ret)) {
- log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
- log[*log_num].len = vhost32_to_cpu(vq, desc.len);
- ++*log_num;
- }
- } else {
- /* If it's an output descriptor, they're all supposed
- * to come before any input descriptors. */
- if (unlikely(*in_num)) {
- vq_err(vq, "Descriptor has out after in: "
- "idx %d\n", i);
- return -EINVAL;
- }
- *out_num += ret;
- }
- } while ((i = next_desc(vq, &desc)) != -1);
-
- /* On success, increment avail index. */
- vq->last_avail_idx++;
-
- /* Assume notifications from guest are disabled at this point,
- * if they aren't we would need to update avail_event index. */
- BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
- return head;
-}
-EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
-
static struct vhost_desc *peek_split_desc(struct vhost_virtqueue *vq)
{
BUG_ON(!vq->ndescs);
@@ -2570,7 +2323,7 @@ static int fetch_descs(struct vhost_virtqueue *vq)
* This function returns the descriptor number found, or vq->num (which is
* never a valid descriptor number) if none was found. A negative code is
* returned on error. */
-int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq,
+int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num)
@@ -2645,7 +2398,7 @@ int vhost_get_vq_desc_batch(struct vhost_virtqueue *vq,
return ret;
}
-EXPORT_SYMBOL_GPL(vhost_get_vq_desc_batch);
+EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a6fc11797a8a..1dbb5e44fba4 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -203,10 +203,6 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
bool vhost_log_access_ok(struct vhost_dev *);
-int vhost_get_vq_desc_batch(struct vhost_virtqueue *,
- struct iovec iov[], unsigned int iov_count,
- unsigned int *out_num, unsigned int *in_num,
- struct vhost_log *log, unsigned int *log_num);
int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_count,
unsigned int *out_num, unsigned int *in_num,
--
2.18.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v3 4/8] vhost: batching fetches
2020-03-31 19:27 [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (2 preceding siblings ...)
2020-03-31 19:27 ` [PATCH v3 3/8] vhost: use batched version by default Eugenio Pérez
@ 2020-03-31 19:28 ` Eugenio Pérez
2020-03-31 19:28 ` [PATCH v3 5/8] tools/virtio: Add --batch option Eugenio Pérez
` (5 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Eugenio Pérez @ 2020-03-31 19:28 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Stephen Rothwell, kvm list,
Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Eugenio Pérez,
Christian Borntraeger, Halil Pasic, Cornelia Huck
From: "Michael S. Tsirkin" <mst@redhat.com>
With this patch applied, new and old code perform identically.
Lots of extra optimizations are now possible, e.g.
we can fetch multiple heads with copy_from/to_user now.
We can get rid of maintaining the log array. Etc etc.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/vhost/test.c | 2 +-
drivers/vhost/vhost.c | 47 ++++++++++++++++++++++++++++++++++++++-----
drivers/vhost/vhost.h | 5 ++++-
3 files changed, 47 insertions(+), 7 deletions(-)
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 394e2e5c772d..4b00cd4266ad 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -119,7 +119,7 @@ static int vhost_test_open(struct inode *inode, struct file *f)
dev = &n->dev;
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
- vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
+ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV + 64,
VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
f->private_data = n;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 56c5253056ee..1646b1ce312a 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -303,6 +303,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
{
vq->num = 1;
vq->ndescs = 0;
+ vq->first_desc = 0;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
@@ -371,6 +372,11 @@ static int vhost_worker(void *data)
return 0;
}
+static int vhost_vq_num_batch_descs(struct vhost_virtqueue *vq)
+{
+ return vq->max_descs - UIO_MAXIOV;
+}
+
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
kfree(vq->descs);
@@ -393,6 +399,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->max_descs = dev->iov_limit;
+ if (vhost_vq_num_batch_descs(vq) < 0) {
+ return -EINVAL;
+ }
vq->descs = kmalloc_array(vq->max_descs,
sizeof(*vq->descs),
GFP_KERNEL);
@@ -1643,6 +1652,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
vq->last_avail_idx = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
+ vq->ndescs = vq->first_desc = 0;
break;
case VHOST_GET_VRING_BASE:
s.index = idx;
@@ -2211,7 +2221,7 @@ static int fetch_indirect_descs(struct vhost_virtqueue *vq,
return 0;
}
-static int fetch_descs(struct vhost_virtqueue *vq)
+static int fetch_buf(struct vhost_virtqueue *vq)
{
unsigned int i, head, found = 0;
struct vhost_desc *last;
@@ -2224,7 +2234,11 @@ static int fetch_descs(struct vhost_virtqueue *vq)
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vq->last_avail_idx;
- if (vq->avail_idx == vq->last_avail_idx) {
+ if (unlikely(vq->avail_idx == vq->last_avail_idx)) {
+ /* If we already have work to do, don't bother re-checking. */
+ if (likely(vq->ndescs))
+ return vq->num;
+
if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) {
vq_err(vq, "Failed to access avail idx at %p\n",
&vq->avail->idx);
@@ -2315,6 +2329,24 @@ static int fetch_descs(struct vhost_virtqueue *vq)
return 0;
}
+static int fetch_descs(struct vhost_virtqueue *vq)
+{
+ int ret = 0;
+
+ if (unlikely(vq->first_desc >= vq->ndescs)) {
+ vq->first_desc = 0;
+ vq->ndescs = 0;
+ }
+
+ if (vq->ndescs)
+ return 0;
+
+ while (!ret && vq->ndescs <= vhost_vq_num_batch_descs(vq))
+ ret = fetch_buf(vq);
+
+ return vq->ndescs ? 0 : ret;
+}
+
/* This looks in the virtqueue and for the first available buffer, and converts
* it to an iovec for convenient access. Since descriptors consist of some
* number of output then some number of input descriptors, it's actually two
@@ -2340,7 +2372,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
if (unlikely(log))
*log_num = 0;
- for (i = 0; i < vq->ndescs; ++i) {
+ for (i = vq->first_desc; i < vq->ndescs; ++i) {
unsigned iov_count = *in_num + *out_num;
struct vhost_desc *desc = &vq->descs[i];
int access;
@@ -2386,14 +2418,19 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
}
ret = desc->id;
+
+ if (!(desc->flags & VRING_DESC_F_NEXT))
+ break;
}
- vq->ndescs = 0;
+ vq->first_desc = i + 1;
return ret;
err:
- vhost_discard_vq_desc(vq, 1);
+ for (i = vq->first_desc; i < vq->ndescs; ++i)
+ if (!(vq->descs[i].flags & VRING_DESC_F_NEXT))
+ vhost_discard_vq_desc(vq, 1);
vq->ndescs = 0;
return ret;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 1dbb5e44fba4..e1caca605c56 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -100,6 +100,7 @@ struct vhost_virtqueue {
struct vhost_desc *descs;
int ndescs;
+ int first_desc;
int max_descs;
const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
@@ -242,7 +243,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
#define vq_err(vq, fmt, ...) do { \
- pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
+ pr_err(pr_fmt(fmt), ##__VA_ARGS__); \
if ((vq)->error_ctx) \
eventfd_signal((vq)->error_ctx, 1);\
} while (0)
@@ -268,6 +269,8 @@ static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
void *private_data)
{
vq->private_data = private_data;
+ vq->ndescs = 0;
+ vq->first_desc = 0;
}
/**
--
2.18.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v3 5/8] tools/virtio: Add --batch option
2020-03-31 19:27 [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (3 preceding siblings ...)
2020-03-31 19:28 ` [PATCH v3 4/8] vhost: batching fetches Eugenio Pérez
@ 2020-03-31 19:28 ` Eugenio Pérez
2020-03-31 19:28 ` [PATCH v3 6/8] tools/virtio: Add --batch=random option Eugenio Pérez
` (4 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Eugenio Pérez @ 2020-03-31 19:28 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Stephen Rothwell, kvm list,
Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Eugenio Pérez,
Christian Borntraeger, Halil Pasic, Cornelia Huck
This allow to test vhost having >1 buffers in flight
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tools/virtio/virtio_test.c | 47 ++++++++++++++++++++++++++++++--------
1 file changed, 37 insertions(+), 10 deletions(-)
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index b427def67e7e..c30de9088f3c 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <getopt.h>
+#include <limits.h>
#include <string.h>
#include <poll.h>
#include <sys/eventfd.h>
@@ -152,11 +153,11 @@ static void wait_for_interrupt(struct vdev_info *dev)
}
static void run_test(struct vdev_info *dev, struct vq_info *vq,
- bool delayed, int bufs)
+ bool delayed, int batch, int bufs)
{
struct scatterlist sl;
long started = 0, completed = 0;
- long completed_before;
+ long completed_before, started_before;
int r, test = 1;
unsigned len;
long long spurious = 0;
@@ -165,28 +166,42 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
for (;;) {
virtqueue_disable_cb(vq->vq);
completed_before = completed;
+ started_before = started;
do {
- if (started < bufs) {
+ while (started < bufs &&
+ (started - completed) < batch) {
sg_init_one(&sl, dev->buf, dev->buf_size);
r = virtqueue_add_outbuf(vq->vq, &sl, 1,
dev->buf + started,
GFP_ATOMIC);
- if (likely(r == 0)) {
- ++started;
- if (unlikely(!virtqueue_kick(vq->vq)))
+ if (unlikely(r != 0)) {
+ if (r == -ENOSPC &&
+ started > started_before)
+ r = 0;
+ else
r = -1;
+ break;
}
- } else
+
+ ++started;
+
+ if (unlikely(!virtqueue_kick(vq->vq))) {
+ r = -1;
+ break;
+ }
+ }
+
+ if (started >= bufs)
r = -1;
/* Flush out completed bufs if any */
- if (virtqueue_get_buf(vq->vq, &len)) {
+ while (virtqueue_get_buf(vq->vq, &len)) {
++completed;
r = 0;
}
} while (r == 0);
- if (completed == completed_before)
+ if (completed == completed_before && started == started_before)
++spurious;
assert(completed <= bufs);
assert(started <= bufs);
@@ -244,6 +259,11 @@ const struct option longopts[] = {
.name = "no-delayed-interrupt",
.val = 'd',
},
+ {
+ .name = "batch",
+ .val = 'b',
+ .has_arg = required_argument,
+ },
{
}
};
@@ -255,6 +275,7 @@ static void help(void)
" [--no-event-idx]"
" [--no-virtio-1]"
" [--delayed-interrupt]"
+ " [--batch=N]"
"\n");
}
@@ -263,6 +284,7 @@ int main(int argc, char **argv)
struct vdev_info dev;
unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
(1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
+ long batch = 1;
int o;
bool delayed = false;
@@ -289,6 +311,11 @@ int main(int argc, char **argv)
case 'D':
delayed = true;
break;
+ case 'b':
+ batch = strtol(optarg, NULL, 10);
+ assert(batch > 0);
+ assert(batch < (long)INT_MAX + 1);
+ break;
default:
assert(0);
break;
@@ -298,6 +325,6 @@ int main(int argc, char **argv)
done:
vdev_info_init(&dev, features);
vq_info_add(&dev, 256);
- run_test(&dev, &dev.vqs[0], delayed, 0x100000);
+ run_test(&dev, &dev.vqs[0], delayed, batch, 0x100000);
return 0;
}
--
2.18.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v3 6/8] tools/virtio: Add --batch=random option
2020-03-31 19:27 [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (4 preceding siblings ...)
2020-03-31 19:28 ` [PATCH v3 5/8] tools/virtio: Add --batch option Eugenio Pérez
@ 2020-03-31 19:28 ` Eugenio Pérez
2020-03-31 19:28 ` [PATCH v3 7/8] tools/virtio: Add --reset=random Eugenio Pérez
` (3 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Eugenio Pérez @ 2020-03-31 19:28 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Stephen Rothwell, kvm list,
Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Eugenio Pérez,
Christian Borntraeger, Halil Pasic, Cornelia Huck
So we can test with non-deterministic batches in flight.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
tools/virtio/virtio_test.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index c30de9088f3c..4a2b9d11f287 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -19,6 +19,8 @@
#include <linux/virtio_ring.h>
#include "../../drivers/vhost/test.h"
+#define RANDOM_BATCH -1
+
/* Unused */
void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
@@ -161,6 +163,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
int r, test = 1;
unsigned len;
long long spurious = 0;
+ const bool random_batch = batch == RANDOM_BATCH;
r = ioctl(dev->control, VHOST_TEST_RUN, &test);
assert(r >= 0);
for (;;) {
@@ -168,6 +171,9 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
completed_before = completed;
started_before = started;
do {
+ if (random_batch)
+ batch = (random() % vq->vring.num) + 1;
+
while (started < bufs &&
(started - completed) < batch) {
sg_init_one(&sl, dev->buf, dev->buf_size);
@@ -275,7 +281,7 @@ static void help(void)
" [--no-event-idx]"
" [--no-virtio-1]"
" [--delayed-interrupt]"
- " [--batch=N]"
+ " [--batch=random/N]"
"\n");
}
@@ -312,9 +318,13 @@ int main(int argc, char **argv)
delayed = true;
break;
case 'b':
- batch = strtol(optarg, NULL, 10);
- assert(batch > 0);
- assert(batch < (long)INT_MAX + 1);
+ if (0 == strcmp(optarg, "random")) {
+ batch = RANDOM_BATCH;
+ } else {
+ batch = strtol(optarg, NULL, 10);
+ assert(batch > 0);
+ assert(batch < (long)INT_MAX + 1);
+ }
break;
default:
assert(0);
--
2.18.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v3 7/8] tools/virtio: Add --reset=random
2020-03-31 19:27 [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (5 preceding siblings ...)
2020-03-31 19:28 ` [PATCH v3 6/8] tools/virtio: Add --batch=random option Eugenio Pérez
@ 2020-03-31 19:28 ` Eugenio Pérez
2020-03-31 19:28 ` [PATCH v3 8/8] tools/virtio: Make --reset reset ring idx Eugenio Pérez
` (2 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Eugenio Pérez @ 2020-03-31 19:28 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Stephen Rothwell, kvm list,
Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Eugenio Pérez,
Christian Borntraeger, Halil Pasic, Cornelia Huck
Currently, it only removes and add backend, but it will reset vq
position in future commits.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/vhost/test.c | 57 ++++++++++++++++++++++++++++++++++++++
drivers/vhost/test.h | 1 +
tools/virtio/virtio_test.c | 44 ++++++++++++++++++++++++++---
3 files changed, 98 insertions(+), 4 deletions(-)
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 4b00cd4266ad..102066515b48 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -263,9 +263,62 @@ static int vhost_test_set_features(struct vhost_test *n, u64 features)
return 0;
}
+static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
+{
+ static void *private_data;
+
+ const bool enable = fd != -1;
+ struct vhost_virtqueue *vq;
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_check_owner(&n->dev);
+ if (r)
+ goto err;
+
+ if (index >= VHOST_TEST_VQ_MAX) {
+ r = -ENOBUFS;
+ goto err;
+ }
+ vq = &n->vqs[index];
+ mutex_lock(&vq->mutex);
+
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(vq)) {
+ r = -EFAULT;
+ goto err_vq;
+ }
+ if (!enable) {
+ vhost_poll_stop(&vq->poll);
+ private_data = vq->private_data;
+ vq->private_data = NULL;
+ } else {
+ r = vhost_vq_init_access(vq);
+ vq->private_data = private_data;
+ if (r == 0)
+ r = vhost_poll_start(&vq->poll, vq->kick);
+ }
+
+ mutex_unlock(&vq->mutex);
+
+ if (enable) {
+ vhost_test_flush_vq(n, index);
+ }
+
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+
+err_vq:
+ mutex_unlock(&vq->mutex);
+err:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
+ struct vhost_vring_file backend;
struct vhost_test *n = f->private_data;
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
@@ -277,6 +330,10 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&test, argp, sizeof test))
return -EFAULT;
return vhost_test_run(n, test);
+ case VHOST_TEST_SET_BACKEND:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ return vhost_test_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
features = VHOST_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
diff --git a/drivers/vhost/test.h b/drivers/vhost/test.h
index 7dd265bfdf81..822bc4bee03a 100644
--- a/drivers/vhost/test.h
+++ b/drivers/vhost/test.h
@@ -4,5 +4,6 @@
/* Start a given test on the virtio null device. 0 stops all tests. */
#define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
+#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int)
#endif
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index 4a2b9d11f287..93d81cd64ba0 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -20,6 +20,7 @@
#include "../../drivers/vhost/test.h"
#define RANDOM_BATCH -1
+#define RANDOM_RESET -1
/* Unused */
void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
@@ -46,6 +47,9 @@ struct vdev_info {
struct vhost_memory *mem;
};
+static const struct vhost_vring_file no_backend = { .fd = -1 },
+ backend = { .fd = 1 };
+
bool vq_notify(struct virtqueue *vq)
{
struct vq_info *info = vq->priv;
@@ -155,10 +159,10 @@ static void wait_for_interrupt(struct vdev_info *dev)
}
static void run_test(struct vdev_info *dev, struct vq_info *vq,
- bool delayed, int batch, int bufs)
+ bool delayed, int batch, int reset_n, int bufs)
{
struct scatterlist sl;
- long started = 0, completed = 0;
+ long started = 0, completed = 0, next_reset = reset_n;
long completed_before, started_before;
int r, test = 1;
unsigned len;
@@ -171,6 +175,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
completed_before = completed;
started_before = started;
do {
+ const bool reset = reset_n && completed > next_reset;
if (random_batch)
batch = (random() % vq->vring.num) + 1;
@@ -200,12 +205,26 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
if (started >= bufs)
r = -1;
+ if (reset) {
+ r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+ &no_backend);
+ assert(!r);
+ }
+
/* Flush out completed bufs if any */
while (virtqueue_get_buf(vq->vq, &len)) {
++completed;
r = 0;
}
+ if (reset) {
+ r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+ &backend);
+ assert(!r);
+
+ while (completed > next_reset)
+ next_reset += completed;
+ }
} while (r == 0);
if (completed == completed_before && started == started_before)
++spurious;
@@ -270,6 +289,11 @@ const struct option longopts[] = {
.val = 'b',
.has_arg = required_argument,
},
+ {
+ .name = "reset",
+ .val = 'r',
+ .has_arg = optional_argument,
+ },
{
}
};
@@ -282,6 +306,7 @@ static void help(void)
" [--no-virtio-1]"
" [--delayed-interrupt]"
" [--batch=random/N]"
+ " [--reset=random/N]"
"\n");
}
@@ -290,7 +315,7 @@ int main(int argc, char **argv)
struct vdev_info dev;
unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
(1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
- long batch = 1;
+ long batch = 1, reset = 0;
int o;
bool delayed = false;
@@ -326,6 +351,17 @@ int main(int argc, char **argv)
assert(batch < (long)INT_MAX + 1);
}
break;
+ case 'r':
+ if (!optarg) {
+ reset = 1;
+ } else if (0 == strcmp(optarg, "random")) {
+ reset = RANDOM_RESET;
+ } else {
+ reset = strtol(optarg, NULL, 10);
+ assert(reset >= 0);
+ assert(reset < (long)INT_MAX + 1);
+ }
+ break;
default:
assert(0);
break;
@@ -335,6 +371,6 @@ int main(int argc, char **argv)
done:
vdev_info_init(&dev, features);
vq_info_add(&dev, 256);
- run_test(&dev, &dev.vqs[0], delayed, batch, 0x100000);
+ run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000);
return 0;
}
--
2.18.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH v3 8/8] tools/virtio: Make --reset reset ring idx
2020-03-31 19:27 [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (6 preceding siblings ...)
2020-03-31 19:28 ` [PATCH v3 7/8] tools/virtio: Add --reset=random Eugenio Pérez
@ 2020-03-31 19:28 ` Eugenio Pérez
2020-03-31 19:51 ` [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Michael S. Tsirkin
2020-04-01 7:19 ` Christian Borntraeger
9 siblings, 0 replies; 15+ messages in thread
From: Eugenio Pérez @ 2020-03-31 19:28 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Stephen Rothwell, kvm list,
Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Eugenio Pérez,
Christian Borntraeger, Halil Pasic, Cornelia Huck
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
drivers/virtio/virtio_ring.c | 29 +++++++++++++++++++++++++++++
tools/virtio/linux/virtio.h | 2 ++
tools/virtio/virtio_test.c | 28 +++++++++++++++++++++++++++-
3 files changed, 58 insertions(+), 1 deletion(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 867c7ebd3f10..01b322ee2e04 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1810,6 +1810,35 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
+#ifndef __KERNEL__
+
+/**
+ * virtqueue_reset_free_head - Reset to 0 the members of split ring.
+ * @vq: Virtqueue to reset.
+ *
+ * At this moment, is only meant for debug the ring index change, do not use
+ * in production.
+ */
+void virtqueue_reset_free_head(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ // vq->last_used_idx = 0;
+ vq->num_added = 0;
+
+ vq->split.queue_size_in_bytes = 0;
+ vq->split.avail_flags_shadow = 0;
+ vq->split.avail_idx_shadow = 0;
+
+ memset(vq->split.desc_state, 0, vq->split.vring.num *
+ sizeof(struct vring_desc_state_split));
+
+ vq->free_head = 0;
+}
+EXPORT_SYMBOL_GPL(virtqueue_reset_free_head);
+
+#endif
+
/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
* @_vq: the struct virtqueue
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index b751350d4ce8..5d33eab6b814 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -65,4 +65,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
const char *name);
void vring_del_virtqueue(struct virtqueue *vq);
+void virtqueue_reset_free_head(struct virtqueue *vq);
+
#endif
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index 93d81cd64ba0..bf21ece30594 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -49,6 +49,7 @@ struct vdev_info {
static const struct vhost_vring_file no_backend = { .fd = -1 },
backend = { .fd = 1 };
+static const struct vhost_vring_state null_state = {};
bool vq_notify(struct virtqueue *vq)
{
@@ -218,10 +219,33 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
}
if (reset) {
+ struct vhost_vring_state s = { .index = 0 };
+ int i;
+ vq->vring.avail->idx = 0;
+ vq->vq->num_free = vq->vring.num;
+
+ // Put everything in free lists.
+ for (i = 0; i < vq->vring.num-1; i++)
+ vq->vring.desc[i].next =
+ cpu_to_virtio16(&dev->vdev,
+ i + 1);
+ vq->vring.desc[vq->vring.num-1].next = 0;
+ virtqueue_reset_free_head(vq->vq);
+
+ r = ioctl(dev->control, VHOST_GET_VRING_BASE,
+ &s);
+ assert(!r);
+
+ s.num = 0;
+ r = ioctl(dev->control, VHOST_SET_VRING_BASE,
+ &null_state);
+ assert(!r);
+
r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
&backend);
assert(!r);
+ started = completed;
while (completed > next_reset)
next_reset += completed;
}
@@ -243,7 +267,9 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
test = 0;
r = ioctl(dev->control, VHOST_TEST_RUN, &test);
assert(r >= 0);
- fprintf(stderr, "spurious wakeups: 0x%llx\n", spurious);
+ fprintf(stderr,
+ "spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n",
+ spurious, started, completed);
}
const char optstring[] = "h";
--
2.18.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call
2020-03-31 19:27 [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (7 preceding siblings ...)
2020-03-31 19:28 ` [PATCH v3 8/8] tools/virtio: Make --reset reset ring idx Eugenio Pérez
@ 2020-03-31 19:51 ` Michael S. Tsirkin
2020-04-01 7:19 ` Christian Borntraeger
9 siblings, 0 replies; 15+ messages in thread
From: Michael S. Tsirkin @ 2020-03-31 19:51 UTC (permalink / raw)
To: Eugenio Pérez
Cc: linux-kernel@vger.kernel.org, Stephen Rothwell, kvm list,
Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Christian Borntraeger,
Halil Pasic, Cornelia Huck
On Tue, Mar 31, 2020 at 09:27:56PM +0200, Eugenio Pérez wrote:
> Vhost did not reset properly the batched descriptors on SET_VRING_BASE
> event. Because of that, is possible to return an invalid descriptor to
> the guest.
>
> This series ammend this, resetting them every time backend changes, and
> creates a test to assert correct behavior. To do that, they need to
> expose a new function in virtio_ring, virtqueue_reset_free_head, only
> on test code.
>
> Another useful thing would be to check if mutex is properly get in
> vq private_data accessors. Not sure if mutex debug code allow that,
> similar to C++ unique lock::owns_lock. Not acquiring in the function
> because caller code holds the mutex in order to perform more actions.
I pushed vhost branch with patch 1, pls send patches on top of that!
> v3:
> * Rename accesors functions.
> * Make scsi and test use the accesors too.
>
> v2:
> * Squashed commits.
> * Create vq private_data accesors (mst).
>
> This is meant to be applied on top of
> c4f1c41a6094582903c75c0dcfacb453c959d457 in
> git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git.
>
> Eugenio Pérez (5):
> vhost: Create accessors for virtqueues private_data
> tools/virtio: Add --batch option
> tools/virtio: Add --batch=random option
> tools/virtio: Add --reset=random
> tools/virtio: Make --reset reset ring idx
>
> Michael S. Tsirkin (3):
> vhost: option to fetch descriptors through an independent struct
> vhost: use batched version by default
> vhost: batching fetches
>
> drivers/vhost/net.c | 28 ++--
> drivers/vhost/scsi.c | 14 +-
> drivers/vhost/test.c | 69 ++++++++-
> drivers/vhost/test.h | 1 +
> drivers/vhost/vhost.c | 271 +++++++++++++++++++++++------------
> drivers/vhost/vhost.h | 44 +++++-
> drivers/vhost/vsock.c | 14 +-
> drivers/virtio/virtio_ring.c | 29 ++++
> tools/virtio/linux/virtio.h | 2 +
> tools/virtio/virtio_test.c | 123 ++++++++++++++--
> 10 files changed, 456 insertions(+), 139 deletions(-)
>
> --
> 2.18.1
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call
2020-03-31 19:27 [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Eugenio Pérez
` (8 preceding siblings ...)
2020-03-31 19:51 ` [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call Michael S. Tsirkin
@ 2020-04-01 7:19 ` Christian Borntraeger
2020-04-01 18:40 ` Eugenio Perez Martin
9 siblings, 1 reply; 15+ messages in thread
From: Christian Borntraeger @ 2020-04-01 7:19 UTC (permalink / raw)
To: Eugenio Pérez, Michael S. Tsirkin
Cc: linux-kernel@vger.kernel.org, Stephen Rothwell, kvm list,
Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Halil Pasic,
Cornelia Huck
On 31.03.20 21:27, Eugenio Pérez wrote:
> Vhost did not reset properly the batched descriptors on SET_VRING_BASE
> event. Because of that, is possible to return an invalid descriptor to
> the guest.
>
> This series ammend this, resetting them every time backend changes, and
> creates a test to assert correct behavior. To do that, they need to
> expose a new function in virtio_ring, virtqueue_reset_free_head, only
> on test code.
>
> Another useful thing would be to check if mutex is properly get in
> vq private_data accessors. Not sure if mutex debug code allow that,
> similar to C++ unique lock::owns_lock. Not acquiring in the function
> because caller code holds the mutex in order to perform more actions.
>
> v3:
> * Rename accesors functions.
> * Make scsi and test use the accesors too.
>
> v2:
> * Squashed commits.
> * Create vq private_data accesors (mst).
>
> This is meant to be applied on top of
> c4f1c41a6094582903c75c0dcfacb453c959d457 in
> git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git.
A quick test on s390 looks good.
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call
2020-04-01 7:19 ` Christian Borntraeger
@ 2020-04-01 18:40 ` Eugenio Perez Martin
2020-04-01 18:44 ` Christian Borntraeger
0 siblings, 1 reply; 15+ messages in thread
From: Eugenio Perez Martin @ 2020-04-01 18:40 UTC (permalink / raw)
To: Christian Borntraeger
Cc: Michael S. Tsirkin, linux-kernel@vger.kernel.org,
Stephen Rothwell, kvm list, Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Halil Pasic,
Cornelia Huck
On Wed, Apr 1, 2020 at 9:19 AM Christian Borntraeger
<borntraeger@de.ibm.com> wrote:
>
> On 31.03.20 21:27, Eugenio Pérez wrote:
> > Vhost did not reset properly the batched descriptors on SET_VRING_BASE
> > event. Because of that, is possible to return an invalid descriptor to
> > the guest.
> >
> > This series ammend this, resetting them every time backend changes, and
> > creates a test to assert correct behavior. To do that, they need to
> > expose a new function in virtio_ring, virtqueue_reset_free_head, only
> > on test code.
> >
> > Another useful thing would be to check if mutex is properly get in
> > vq private_data accessors. Not sure if mutex debug code allow that,
> > similar to C++ unique lock::owns_lock. Not acquiring in the function
> > because caller code holds the mutex in order to perform more actions.
>
>
>
> >
> > v3:
> > * Rename accesors functions.
> > * Make scsi and test use the accesors too.
> >
> > v2:
> > * Squashed commits.
> > * Create vq private_data accesors (mst).
> >
> > This is meant to be applied on top of
> > c4f1c41a6094582903c75c0dcfacb453c959d457 in
> > git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git.
>
>
> A quick test on s390 looks good.
>
Really good to know :).
Would it be possible to investigate when qemu launches the offending ioctls?
Thanks!
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call
2020-04-01 18:40 ` Eugenio Perez Martin
@ 2020-04-01 18:44 ` Christian Borntraeger
2020-04-01 19:13 ` Christian Borntraeger
0 siblings, 1 reply; 15+ messages in thread
From: Christian Borntraeger @ 2020-04-01 18:44 UTC (permalink / raw)
To: Eugenio Perez Martin
Cc: Michael S. Tsirkin, linux-kernel@vger.kernel.org,
Stephen Rothwell, kvm list, Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Halil Pasic,
Cornelia Huck
On 01.04.20 20:40, Eugenio Perez Martin wrote:
> On Wed, Apr 1, 2020 at 9:19 AM Christian Borntraeger
> <borntraeger@de.ibm.com> wrote:
>>
>> On 31.03.20 21:27, Eugenio Pérez wrote:
>>> Vhost did not reset properly the batched descriptors on SET_VRING_BASE
>>> event. Because of that, is possible to return an invalid descriptor to
>>> the guest.
>>>
>>> This series ammend this, resetting them every time backend changes, and
>>> creates a test to assert correct behavior. To do that, they need to
>>> expose a new function in virtio_ring, virtqueue_reset_free_head, only
>>> on test code.
>>>
>>> Another useful thing would be to check if mutex is properly get in
>>> vq private_data accessors. Not sure if mutex debug code allow that,
>>> similar to C++ unique lock::owns_lock. Not acquiring in the function
>>> because caller code holds the mutex in order to perform more actions.
>>
>>
>>
>>>
>>> v3:
>>> * Rename accesors functions.
>>> * Make scsi and test use the accesors too.
>>>
>>> v2:
>>> * Squashed commits.
>>> * Create vq private_data accesors (mst).
>>>
>>> This is meant to be applied on top of
>>> c4f1c41a6094582903c75c0dcfacb453c959d457 in
>>> git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git.
>>
>>
>> A quick test on s390 looks good.
>>
>
> Really good to know :).
>
> Would it be possible to investigate when qemu launches the offending ioctls?
During guest reboot. This is obvious, no?
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call
2020-04-01 18:44 ` Christian Borntraeger
@ 2020-04-01 19:13 ` Christian Borntraeger
2020-04-02 8:17 ` Eugenio Perez Martin
0 siblings, 1 reply; 15+ messages in thread
From: Christian Borntraeger @ 2020-04-01 19:13 UTC (permalink / raw)
To: Eugenio Perez Martin
Cc: Michael S. Tsirkin, linux-kernel@vger.kernel.org,
Stephen Rothwell, kvm list, Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Halil Pasic,
Cornelia Huck
>> Would it be possible to investigate when qemu launches the offending ioctls?
>
> During guest reboot. This is obvious, no?
>
For example during reboot we do re-setup the virt queues:
#1 0x00000000010f3e7a in vhost_kernel_set_vring_base (dev=0x21f5f30, ring=0x3ff84d74e88) at /home/cborntra/REPOS/qemu/hw/virtio/vhost-backend.c:126
#2 0x00000000010f2f92 in vhost_virtqueue_start (idx=0, vq=0x21f6180, vdev=0x241d570, dev=0x21f5f30) at /home/cborntra/REPOS/qemu/hw/virtio/vhost.c:1016
#3 vhost_dev_start (hdev=hdev@entry=0x21f5f30, vdev=vdev@entry=0x241d570) at /home/cborntra/REPOS/qemu/hw/virtio/vhost.c:1646
#4 0x00000000011c265a in vhost_net_start_one (dev=0x241d570, net=0x21f5f30) at /home/cborntra/REPOS/qemu/hw/net/vhost_net.c:236
#5 vhost_net_start (dev=dev@entry=0x241d570, ncs=0x2450f40, total_queues=total_queues@entry=1) at /home/cborntra/REPOS/qemu/hw/net/vhost_net.c:338
#6 0x00000000010cfdfe in virtio_net_vhost_status (status=15 '\017', n=0x241d570) at /home/cborntra/REPOS/qemu/hw/net/virtio-net.c:250
#7 virtio_net_set_status (vdev=0x241d570, status=<optimized out>) at /home/cborntra/REPOS/qemu/hw/net/virtio-net.c:331
#8 0x00000000010eaef4 in virtio_set_status (vdev=vdev@entry=0x241d570, val=<optimized out>) at /home/cborntra/REPOS/qemu/hw/virtio/virtio.c:1956
#9 0x000000000110ba78 in virtio_ccw_cb (sch=0x2422c30, ccw=...) at /home/cborntra/REPOS/qemu/hw/s390x/virtio-ccw.c:509
#10 0x00000000011053fc in css_interpret_ccw (sch=sch@entry=0x2422c30, ccw_addr=<optimized out>, suspend_allowed=suspend_allowed@entry=false) at /home/cborntra/REPOS/qemu/hw/s390x/css.c:1108
#11 0x000000000110557c in sch_handle_start_func_virtual (sch=0x2422c30) at /home/cborntra/REPOS/qemu/hw/s390x/css.c:1162
#12 do_subchannel_work_virtual (sch=0x2422c30) at /home/cborntra/REPOS/qemu/hw/s390x/css.c:1256
#13 0x0000000001168592 in ioinst_handle_ssch (cpu=cpu@entry=0x234b920, reg1=<optimized out>, ipb=<optimized out>, ra=ra@entry=0) at /home/cborntra/REPOS/qemu/target/s390x/ioinst.c:218
#14 0x0000000001170012 in handle_b2 (ipa1=<optimized out>, run=0x3ff97880000, cpu=0x234b920) at /home/cborntra/REPOS/qemu/target/s390x/kvm.c:1279
#15 handle_instruction (run=0x3ff97880000, cpu=0x234b920) at /home/cborntra/REPOS/qemu/target/s390x/kvm.c:1664
#16 handle_intercept (cpu=0x234b920) at /home/cborntra/REPOS/qemu/target/s390x/kvm.c:1747
#17 kvm_arch_handle_exit (cs=cs@entry=0x234b920, run=run@entry=0x3ff97880000) at /home/cborntra/REPOS/qemu/target/s390x/kvm.c:1937
#18 0x00000000010972dc in kvm_cpu_exec (cpu=cpu@entry=0x234b920) at /home/cborntra/REPOS/qemu/accel/kvm/kvm-all.c:2445
#19 0x00000000010784f6 in qemu_kvm_cpu_thread_fn (arg=0x234b920) at /home/cborntra/REPOS/qemu/cpus.c:1246
#20 qemu_kvm_cpu_thread_fn (arg=arg@entry=0x234b920) at /home/cborntra/REPOS/qemu/cpus.c:1218
#21 0x00000000013891fa in qemu_thread_start (args=0x2372f30) at /home/cborntra/REPOS/qemu/util/qemu-thread-posix.c:519
#22 0x000003ff93809ed6 in start_thread () from target:/lib64/libpthread.so.0
#23 0x000003ff93705e46 in thread_start () from target:/lib64/libc.so.6
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v3 0/8] vhost: Reset batched descriptors on SET_VRING_BASE call
2020-04-01 19:13 ` Christian Borntraeger
@ 2020-04-02 8:17 ` Eugenio Perez Martin
0 siblings, 0 replies; 15+ messages in thread
From: Eugenio Perez Martin @ 2020-04-02 8:17 UTC (permalink / raw)
To: Christian Borntraeger
Cc: Michael S. Tsirkin, linux-kernel@vger.kernel.org,
Stephen Rothwell, kvm list, Linux Next Mailing List,
virtualization@lists.linux-foundation.org, Halil Pasic,
Cornelia Huck
On Wed, Apr 1, 2020 at 9:13 PM Christian Borntraeger
<borntraeger@de.ibm.com> wrote:
>
> >> Would it be possible to investigate when qemu launches the offending ioctls?
> >
> > During guest reboot. This is obvious, no?
> >
>
Got it. I thought you received it after the reboot AND once you sent
the firsts packets, since you said that some pings worked. But now
that this point is clear, I think we can move forward :). Thank you
very much!
>
> For example during reboot we do re-setup the virt queues:
>
> #1 0x00000000010f3e7a in vhost_kernel_set_vring_base (dev=0x21f5f30, ring=0x3ff84d74e88) at /home/cborntra/REPOS/qemu/hw/virtio/vhost-backend.c:126
> #2 0x00000000010f2f92 in vhost_virtqueue_start (idx=0, vq=0x21f6180, vdev=0x241d570, dev=0x21f5f30) at /home/cborntra/REPOS/qemu/hw/virtio/vhost.c:1016
> #3 vhost_dev_start (hdev=hdev@entry=0x21f5f30, vdev=vdev@entry=0x241d570) at /home/cborntra/REPOS/qemu/hw/virtio/vhost.c:1646
> #4 0x00000000011c265a in vhost_net_start_one (dev=0x241d570, net=0x21f5f30) at /home/cborntra/REPOS/qemu/hw/net/vhost_net.c:236
> #5 vhost_net_start (dev=dev@entry=0x241d570, ncs=0x2450f40, total_queues=total_queues@entry=1) at /home/cborntra/REPOS/qemu/hw/net/vhost_net.c:338
> #6 0x00000000010cfdfe in virtio_net_vhost_status (status=15 '\017', n=0x241d570) at /home/cborntra/REPOS/qemu/hw/net/virtio-net.c:250
> #7 virtio_net_set_status (vdev=0x241d570, status=<optimized out>) at /home/cborntra/REPOS/qemu/hw/net/virtio-net.c:331
> #8 0x00000000010eaef4 in virtio_set_status (vdev=vdev@entry=0x241d570, val=<optimized out>) at /home/cborntra/REPOS/qemu/hw/virtio/virtio.c:1956
> #9 0x000000000110ba78 in virtio_ccw_cb (sch=0x2422c30, ccw=...) at /home/cborntra/REPOS/qemu/hw/s390x/virtio-ccw.c:509
> #10 0x00000000011053fc in css_interpret_ccw (sch=sch@entry=0x2422c30, ccw_addr=<optimized out>, suspend_allowed=suspend_allowed@entry=false) at /home/cborntra/REPOS/qemu/hw/s390x/css.c:1108
> #11 0x000000000110557c in sch_handle_start_func_virtual (sch=0x2422c30) at /home/cborntra/REPOS/qemu/hw/s390x/css.c:1162
> #12 do_subchannel_work_virtual (sch=0x2422c30) at /home/cborntra/REPOS/qemu/hw/s390x/css.c:1256
> #13 0x0000000001168592 in ioinst_handle_ssch (cpu=cpu@entry=0x234b920, reg1=<optimized out>, ipb=<optimized out>, ra=ra@entry=0) at /home/cborntra/REPOS/qemu/target/s390x/ioinst.c:218
> #14 0x0000000001170012 in handle_b2 (ipa1=<optimized out>, run=0x3ff97880000, cpu=0x234b920) at /home/cborntra/REPOS/qemu/target/s390x/kvm.c:1279
> #15 handle_instruction (run=0x3ff97880000, cpu=0x234b920) at /home/cborntra/REPOS/qemu/target/s390x/kvm.c:1664
> #16 handle_intercept (cpu=0x234b920) at /home/cborntra/REPOS/qemu/target/s390x/kvm.c:1747
> #17 kvm_arch_handle_exit (cs=cs@entry=0x234b920, run=run@entry=0x3ff97880000) at /home/cborntra/REPOS/qemu/target/s390x/kvm.c:1937
> #18 0x00000000010972dc in kvm_cpu_exec (cpu=cpu@entry=0x234b920) at /home/cborntra/REPOS/qemu/accel/kvm/kvm-all.c:2445
> #19 0x00000000010784f6 in qemu_kvm_cpu_thread_fn (arg=0x234b920) at /home/cborntra/REPOS/qemu/cpus.c:1246
> #20 qemu_kvm_cpu_thread_fn (arg=arg@entry=0x234b920) at /home/cborntra/REPOS/qemu/cpus.c:1218
> #21 0x00000000013891fa in qemu_thread_start (args=0x2372f30) at /home/cborntra/REPOS/qemu/util/qemu-thread-posix.c:519
> #22 0x000003ff93809ed6 in start_thread () from target:/lib64/libpthread.so.0
> #23 0x000003ff93705e46 in thread_start () from target:/lib64/libc.so.6
>
^ permalink raw reply [flat|nested] 15+ messages in thread