virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
From: Andi Kleen <ak@linux.intel.com>
To: mst@redhat.com
Cc: sathyanarayanan.kuppuswamy@linux.intel.com,
	Andi Kleen <ak@linux.intel.com>,
	x86@kernel.org, linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org,
	iommu@lists.linux-foundation.org, jpoimboe@redhat.com,
	robin.murphy@arm.com, hch@lst.de, m.szyprowski@samsung.com
Subject: [PATCH v1 3/8] virtio: Harden split buffer detachment
Date: Wed,  2 Jun 2021 17:41:28 -0700	[thread overview]
Message-ID: <20210603004133.4079390-4-ak@linux.intel.com> (raw)
In-Reply-To: <20210603004133.4079390-1-ak@linux.intel.com>

Harden the split buffer detachment path by adding boundary checking. Note
that when this fails we may fail to unmap some swiotlb mapping, which could
result in a leak and a DOS. But that's acceptable because an malicious host
can DOS us anyways.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
---
 drivers/virtio/virtio_ring.c | 25 +++++++++++++++++++++----
 1 file changed, 21 insertions(+), 4 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index d37ff5a0ff58..1e9aa1e95e1b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -651,12 +651,19 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
 	return needs_kick;
 }
 
-static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
-			     void **ctx)
+static int detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
+			    void **ctx)
 {
 	unsigned int i, j;
 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
 
+	/* We'll leak DMA mappings when this happens, but nothing
+	 * can be done about that. In the worst case the host
+	 * could DOS us, but it can of course do that anyways.
+	 */
+	if (!inside_split_ring(vq, head))
+		return -EIO;
+
 	/* Clear data ptr. */
 	vq->split.desc_state[head].data = NULL;
 
@@ -666,6 +673,8 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
 	while (vq->split.vring.desc[i].flags & nextflag) {
 		vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
 		i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
+		if (!inside_split_ring(vq, i))
+			return -EIO;
 		vq->vq.num_free++;
 	}
 
@@ -684,7 +693,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
 
 		/* Free the indirect table, if any, now that it's unmapped. */
 		if (!indir_desc)
-			return;
+			return 0;
 
 		len = virtio32_to_cpu(vq->vq.vdev,
 				vq->split.vring.desc[head].len);
@@ -701,6 +710,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
 	} else if (ctx) {
 		*ctx = vq->split.desc_state[head].indir_desc;
 	}
+	return 0;
 }
 
 static inline bool more_used_split(const struct vring_virtqueue *vq)
@@ -717,6 +727,7 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
 	void *ret;
 	unsigned int i;
 	u16 last_used;
+	int err;
 
 	START_USE(vq);
 
@@ -751,7 +762,12 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
 
 	/* detach_buf_split clears data, so grab it now. */
 	ret = vq->split.desc_state[i].data;
-	detach_buf_split(vq, i, ctx);
+	err = detach_buf_split(vq, i, ctx);
+	if (err) {
+		END_USE(vq);
+		return NULL;
+	}
+
 	vq->last_used_idx++;
 	/* If we expect an interrupt for the next entry, tell host
 	 * by writing event index and flush out the write before
@@ -863,6 +879,7 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
 		/* detach_buf_split clears data, so grab it now. */
 		buf = vq->split.desc_state[i].data;
 		detach_buf_split(vq, i, NULL);
+		/* Don't need to check for error because nothing is returned */
 		vq->split.avail_idx_shadow--;
 		vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
 				vq->split.avail_idx_shadow);
-- 
2.25.4

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  parent reply	other threads:[~2021-06-03  0:41 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-03  0:41 Virtio hardening for TDX Andi Kleen
2021-06-03  0:41 ` [PATCH v1 1/8] virtio: Force only split mode with protected guest Andi Kleen
2021-06-03  1:36   ` Jason Wang
2021-06-03  1:48     ` Andi Kleen
2021-06-03  2:32       ` Jason Wang
2021-06-03  2:56         ` Andi Kleen
2021-06-03  3:02           ` Jason Wang
2021-06-03 13:55             ` Andi Kleen
2021-06-04  2:29               ` Jason Wang
2021-06-03 17:33   ` Andy Lutomirski
2021-06-03 18:00     ` Andi Kleen
2021-06-03 19:31       ` Andy Lutomirski
2021-06-03 19:53         ` Andi Kleen
2021-06-03 22:17           ` Andy Lutomirski
2021-06-03 23:32             ` Andi Kleen
2021-06-04  1:46               ` Andy Lutomirski
2021-06-04  1:54                 ` Andi Kleen
2021-06-04  1:22         ` Jason Wang
2021-06-04  1:29       ` Jason Wang
2021-06-04  2:20     ` Jason Wang
2021-06-03  0:41 ` [PATCH v1 2/8] virtio: Add boundary checks to virtio ring Andi Kleen
2021-06-03  2:14   ` Jason Wang
2021-06-03  2:18     ` Andi Kleen
2021-06-03  2:36       ` Jason Wang
2021-06-03  0:41 ` Andi Kleen [this message]
2021-06-03  2:29   ` [PATCH v1 3/8] virtio: Harden split buffer detachment Jason Wang
2021-06-03  0:41 ` [PATCH v1 4/8] x86/tdx: Add arch_has_restricted_memory_access for TDX Andi Kleen
2021-06-03  0:41 ` [PATCH v1 5/8] dma: Use size for swiotlb boundary checks Andi Kleen
2021-06-03  1:48   ` Konrad Rzeszutek Wilk
2021-06-03  2:03     ` Andi Kleen
2021-06-03  9:09   ` Robin Murphy
2021-06-03  0:41 ` [PATCH v1 6/8] dma: Add return value to dma_unmap_page Andi Kleen
2021-06-03  9:08   ` Robin Murphy
2021-06-03 12:36     ` Andi Kleen
2021-06-03  0:41 ` [PATCH v1 7/8] virtio: Abort IO when descriptor points outside forced swiotlb Andi Kleen
2021-06-03  0:41 ` [PATCH v1 8/8] virtio: Error out on endless free lists Andi Kleen
2021-06-03  1:34 ` Virtio hardening for TDX Jason Wang
2021-06-03  1:56   ` Andi Kleen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210603004133.4079390-4-ak@linux.intel.com \
    --to=ak@linux.intel.com \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jpoimboe@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=m.szyprowski@samsung.com \
    --cc=mst@redhat.com \
    --cc=robin.murphy@arm.com \
    --cc=sathyanarayanan.kuppuswamy@linux.intel.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).