From: Kevin Wolf <kwolf@redhat.com>
To: qemu-block@nongnu.org
Cc: kwolf@redhat.com, peter.maydell@linaro.org, qemu-devel@nongnu.org
Subject: [PULL 08/14] vhost-user-blk-test: test discard/write zeroes invalid inputs
Date: Fri, 14 May 2021 18:31:18 +0200 [thread overview]
Message-ID: <20210514163124.251741-9-kwolf@redhat.com> (raw)
In-Reply-To: <20210514163124.251741-1-kwolf@redhat.com>
From: Stefan Hajnoczi <stefanha@redhat.com>
Exercise input validation code paths in
block/export/vhost-user-blk-server.c.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20210309094106.196911-5-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20210322092327.150720-4-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
tests/qtest/vhost-user-blk-test.c | 124 ++++++++++++++++++++++++++++++
1 file changed, 124 insertions(+)
diff --git a/tests/qtest/vhost-user-blk-test.c b/tests/qtest/vhost-user-blk-test.c
index d37e1c30bd..8796c74ca4 100644
--- a/tests/qtest/vhost-user-blk-test.c
+++ b/tests/qtest/vhost-user-blk-test.c
@@ -94,6 +94,124 @@ static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d,
return addr;
}
+static void test_invalid_discard_write_zeroes(QVirtioDevice *dev,
+ QGuestAllocator *alloc,
+ QTestState *qts,
+ QVirtQueue *vq,
+ uint32_t type)
+{
+ QVirtioBlkReq req;
+ struct virtio_blk_discard_write_zeroes dwz_hdr;
+ struct virtio_blk_discard_write_zeroes dwz_hdr2[2];
+ uint64_t req_addr;
+ uint32_t free_head;
+ uint8_t status;
+
+ /* More than one dwz is not supported */
+ req.type = type;
+ req.data = (char *) dwz_hdr2;
+ dwz_hdr2[0].sector = 0;
+ dwz_hdr2[0].num_sectors = 1;
+ dwz_hdr2[0].flags = 0;
+ dwz_hdr2[1].sector = 1;
+ dwz_hdr2[1].num_sectors = 1;
+ dwz_hdr2[1].flags = 0;
+
+ virtio_blk_fix_dwz_hdr(dev, &dwz_hdr2[0]);
+ virtio_blk_fix_dwz_hdr(dev, &dwz_hdr2[1]);
+
+ req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr2));
+
+ free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
+ qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr2), false, true);
+ qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr2), 1, true,
+ false);
+
+ qvirtqueue_kick(qts, dev, vq, free_head);
+
+ qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
+ QVIRTIO_BLK_TIMEOUT_US);
+ status = readb(req_addr + 16 + sizeof(dwz_hdr2));
+ g_assert_cmpint(status, ==, VIRTIO_BLK_S_UNSUPP);
+
+ guest_free(alloc, req_addr);
+
+ /* num_sectors must be less than config->max_write_zeroes_sectors */
+ req.type = type;
+ req.data = (char *) &dwz_hdr;
+ dwz_hdr.sector = 0;
+ dwz_hdr.num_sectors = 0xffffffff;
+ dwz_hdr.flags = 0;
+
+ virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
+
+ req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
+
+ free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
+ qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true);
+ qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true,
+ false);
+
+ qvirtqueue_kick(qts, dev, vq, free_head);
+
+ qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
+ QVIRTIO_BLK_TIMEOUT_US);
+ status = readb(req_addr + 16 + sizeof(dwz_hdr));
+ g_assert_cmpint(status, ==, VIRTIO_BLK_S_IOERR);
+
+ guest_free(alloc, req_addr);
+
+ /* sector must be less than the device capacity */
+ req.type = type;
+ req.data = (char *) &dwz_hdr;
+ dwz_hdr.sector = TEST_IMAGE_SIZE / 512 + 1;
+ dwz_hdr.num_sectors = 1;
+ dwz_hdr.flags = 0;
+
+ virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
+
+ req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
+
+ free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
+ qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true);
+ qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true,
+ false);
+
+ qvirtqueue_kick(qts, dev, vq, free_head);
+
+ qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
+ QVIRTIO_BLK_TIMEOUT_US);
+ status = readb(req_addr + 16 + sizeof(dwz_hdr));
+ g_assert_cmpint(status, ==, VIRTIO_BLK_S_IOERR);
+
+ guest_free(alloc, req_addr);
+
+ /* reserved flag bits must be zero */
+ req.type = type;
+ req.data = (char *) &dwz_hdr;
+ dwz_hdr.sector = 0;
+ dwz_hdr.num_sectors = 1;
+ dwz_hdr.flags = ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
+
+ virtio_blk_fix_dwz_hdr(dev, &dwz_hdr);
+
+ req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr));
+
+ free_head = qvirtqueue_add(qts, vq, req_addr, 16, false, true);
+ qvirtqueue_add(qts, vq, req_addr + 16, sizeof(dwz_hdr), false, true);
+ qvirtqueue_add(qts, vq, req_addr + 16 + sizeof(dwz_hdr), 1, true,
+ false);
+
+ qvirtqueue_kick(qts, dev, vq, free_head);
+
+ qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL,
+ QVIRTIO_BLK_TIMEOUT_US);
+ status = readb(req_addr + 16 + sizeof(dwz_hdr));
+ g_assert_cmpint(status, ==, VIRTIO_BLK_S_UNSUPP);
+
+ guest_free(alloc, req_addr);
+}
+
/* Returns the request virtqueue so the caller can perform further tests */
static QVirtQueue *test_basic(QVirtioDevice *dev, QGuestAllocator *alloc)
{
@@ -235,6 +353,9 @@ static QVirtQueue *test_basic(QVirtioDevice *dev, QGuestAllocator *alloc)
g_free(data);
guest_free(alloc, req_addr);
+
+ test_invalid_discard_write_zeroes(dev, alloc, qts, vq,
+ VIRTIO_BLK_T_WRITE_ZEROES);
}
if (features & (1u << VIRTIO_BLK_F_DISCARD)) {
@@ -263,6 +384,9 @@ static QVirtQueue *test_basic(QVirtioDevice *dev, QGuestAllocator *alloc)
g_assert_cmpint(status, ==, 0);
guest_free(alloc, req_addr);
+
+ test_invalid_discard_write_zeroes(dev, alloc, qts, vq,
+ VIRTIO_BLK_T_DISCARD);
}
if (features & (1u << VIRTIO_F_ANY_LAYOUT)) {
--
2.30.2
next prev parent reply other threads:[~2021-05-14 17:14 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-14 16:31 [PULL 00/14] Block layer patches Kevin Wolf
2021-05-14 16:31 ` [PULL 01/14] qcow2: set bdi->is_dirty Kevin Wolf
2021-05-14 16:31 ` [PULL 02/14] block: Fix Transaction leak in bdrv_root_attach_child() Kevin Wolf
2021-05-14 16:31 ` [PULL 03/14] block: Fix Transaction leak in bdrv_reopen_multiple() Kevin Wolf
2021-05-14 16:31 ` [PULL 04/14] qapi: spelling fix (addtional) Kevin Wolf
2021-05-14 16:31 ` [PULL 05/14] block/export: improve vu_blk_sect_range_ok() Kevin Wolf
2021-05-14 16:31 ` [PULL 06/14] test: new qTest case to test the vhost-user-blk-server Kevin Wolf
2021-05-16 21:08 ` Philippe Mathieu-Daudé
2021-05-14 16:31 ` [PULL 07/14] tests/qtest: add multi-queue test case to vhost-user-blk-test Kevin Wolf
2021-05-14 16:31 ` Kevin Wolf [this message]
2021-05-14 16:31 ` [PULL 09/14] vhost-user-blk: Make sure to set Error on realize failure Kevin Wolf
2021-05-14 16:31 ` [PULL 10/14] vhost-user-blk: Don't reconnect during initialisation Kevin Wolf
2021-05-14 16:31 ` [PULL 11/14] vhost-user-blk: Improve error reporting in realize Kevin Wolf
2021-05-14 16:31 ` [PULL 12/14] vhost-user-blk: Get more feature flags from vhost device Kevin Wolf
2021-05-14 16:31 ` [PULL 13/14] virtio: Fail if iommu_platform is requested, but unsupported Kevin Wolf
2021-05-14 16:31 ` [PULL 14/14] vhost-user-blk: Check that num-queues is supported by backend Kevin Wolf
2021-05-16 21:09 ` [PULL 00/14] Block layer patches Philippe Mathieu-Daudé
2021-05-17 10:29 ` Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210514163124.251741-9-kwolf@redhat.com \
--to=kwolf@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).