From: Fam Zheng <famz@redhat.com>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Amit Shah <amit.shah@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>
Subject: [Qemu-devel] [PATCH 08/18] virtio: Return error from virtio_add_queue
Date: Fri, 17 Apr 2015 15:59:23 +0800 [thread overview]
Message-ID: <1429257573-7359-9-git-send-email-famz@redhat.com> (raw)
In-Reply-To: <1429257573-7359-1-git-send-email-famz@redhat.com>
All callers pass in error_abort for now. Error handling will be added in
separate patches later.
Signed-off-by: Fam Zheng <famz@redhat.com>
---
hw/9pfs/virtio-9p-device.c | 2 +-
hw/block/virtio-blk.c | 2 +-
hw/char/virtio-serial-bus.c | 14 ++++++++------
hw/net/virtio-net.c | 24 ++++++++++++++++--------
hw/scsi/virtio-scsi.c | 6 +++---
hw/virtio/virtio-balloon.c | 9 ++++++---
hw/virtio/virtio-rng.c | 2 +-
hw/virtio/virtio.c | 13 ++++++++++---
include/hw/virtio/virtio.h | 3 ++-
9 files changed, 48 insertions(+), 27 deletions(-)
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index 30492ec..6281b89 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -61,7 +61,7 @@ static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next);
}
- s->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output);
+ s->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output, &error_abort);
v9fs_path_init(&path);
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 0b66ee1..c72a1a3 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -904,7 +904,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
s->rq = NULL;
s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
- s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output);
+ s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output, &error_abort);
s->complete_request = virtio_blk_complete_request;
virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
if (err != NULL) {
diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index 76a934b..1e8062e 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -999,9 +999,9 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
* sizeof(VirtQueue *));
/* Add a queue for host to guest transfers for port 0 (backward compat) */
- vser->ivqs[0] = virtio_add_queue(vdev, 128, handle_input);
+ vser->ivqs[0] = virtio_add_queue(vdev, 128, handle_input, &error_abort);
/* Add a queue for guest to host transfers for port 0 (backward compat) */
- vser->ovqs[0] = virtio_add_queue(vdev, 128, handle_output);
+ vser->ovqs[0] = virtio_add_queue(vdev, 128, handle_output, &error_abort);
/* TODO: host to guest notifications can get dropped
* if the queue fills up. Implement queueing in host,
@@ -1010,15 +1010,17 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
* this will save 4Kbyte of guest memory per entry. */
/* control queue: host to guest */
- vser->c_ivq = virtio_add_queue(vdev, 32, control_in);
+ vser->c_ivq = virtio_add_queue(vdev, 32, control_in, &error_abort);
/* control queue: guest to host */
- vser->c_ovq = virtio_add_queue(vdev, 32, control_out);
+ vser->c_ovq = virtio_add_queue(vdev, 32, control_out, &error_abort);
for (i = 1; i < vser->bus.max_nr_ports; i++) {
/* Add a per-port queue for host to guest transfers */
- vser->ivqs[i] = virtio_add_queue(vdev, 128, handle_input);
+ vser->ivqs[i] = virtio_add_queue(vdev, 128, handle_input,
+ &error_abort);
/* Add a per-per queue for guest to host transfers */
- vser->ovqs[i] = virtio_add_queue(vdev, 128, handle_output);
+ vser->ovqs[i] = virtio_add_queue(vdev, 128, handle_output,
+ &error_abort);
}
vser->ports_map = g_malloc0(((vser->serial.max_virtserial_ports + 31) / 32)
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 5529b6f..74205b4 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1314,16 +1314,19 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
}
for (i = 1; i < max; i++) {
- n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
+ n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx,
+ &error_abort);
if (n->vqs[i].tx_timer) {
n->vqs[i].tx_vq =
- virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
+ virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer,
+ &error_abort);
n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
virtio_net_tx_timer,
&n->vqs[i]);
} else {
n->vqs[i].tx_vq =
- virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
+ virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh,
+ &error_abort);
n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
}
@@ -1335,7 +1338,8 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
* VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
* breaking them.
*/
- n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
+ n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl,
+ &error_abort);
virtio_net_set_queues(n);
}
@@ -1596,7 +1600,8 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
return;
}
n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
- n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
+ n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx,
+ &error_abort);
n->curr_queues = 1;
n->vqs[0].n = n;
n->tx_timeout = n->net_conf.txtimer;
@@ -1611,15 +1616,18 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
- virtio_net_handle_tx_timer);
+ virtio_net_handle_tx_timer,
+ &error_abort);
n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer,
&n->vqs[0]);
} else {
n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
- virtio_net_handle_tx_bh);
+ virtio_net_handle_tx_bh,
+ &error_abort);
n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
}
- n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
+ n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl,
+ &error_abort);
qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
n->status = VIRTIO_NET_S_LINK_UP;
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 40ba03d..ca7af2c 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -838,12 +838,12 @@ void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
s->ctrl_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE,
- ctrl);
+ ctrl, &error_abort);
s->event_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE,
- evt);
+ evt, &error_abort);
for (i = 0; i < s->conf.num_queues; i++) {
s->cmd_vqs[i] = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE,
- cmd);
+ cmd, &error_abort);
}
if (s->conf.iothread) {
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index e26c0a7..4479500 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -388,9 +388,12 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
return;
}
- s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
- s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
- s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats);
+ s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output,
+ &error_abort);
+ s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output,
+ &error_abort);
+ s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats,
+ &error_abort);
reset_stats(s);
diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c
index c3cbdc3..fc70525 100644
--- a/hw/virtio/virtio-rng.c
+++ b/hw/virtio/virtio-rng.c
@@ -194,7 +194,7 @@ static void virtio_rng_device_realize(DeviceState *dev, Error **errp)
virtio_init(vdev, "virtio-rng", VIRTIO_ID_RNG, 0);
- vrng->vq = virtio_add_queue(vdev, 8, handle_input);
+ vrng->vq = virtio_add_queue(vdev, 8, handle_input, &error_abort);
vrng->quota_remaining = vrng->conf.max_bytes;
vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 7c5ca07..b473f9d 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -862,7 +862,8 @@ void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
}
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
- void (*handle_output)(VirtIODevice *, VirtQueue *))
+ void (*handle_output)(VirtIODevice *, VirtQueue *),
+ Error **errp)
{
int i;
@@ -871,8 +872,14 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
break;
}
- if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
- abort();
+ if (i == VIRTIO_PCI_QUEUE_MAX) {
+ error_setg(errp, "Cannot find free vq");
+ return NULL;
+ }
+ if (queue_size > VIRTQUEUE_MAX_SIZE) {
+ error_setg(errp, "Queue size too big: %d", queue_size);
+ return NULL;
+ }
vdev->vq[i].vring.num = queue_size;
vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 34fb62c..b31465f 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -129,7 +129,8 @@ void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name);
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
void (*handle_output)(VirtIODevice *,
- VirtQueue *));
+ VirtQueue *),
+ Error **errp);
void virtio_del_queue(VirtIODevice *vdev, int n);
--
1.9.3
next prev parent reply other threads:[~2015-04-17 8:47 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-04-17 7:59 [Qemu-devel] [PATCH 00/18] virtio-blk: Support "VIRTIO_CONFIG_S_NEEDS_RESET" Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 01/18] virtio: Return error from virtqueue_map_sg Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 02/18] virtio: Return error from virtqueue_num_heads Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 03/18] virtio: Return error from virtqueue_get_head Fam Zheng
2015-04-21 6:27 ` Michael S. Tsirkin
2015-04-17 7:59 ` [Qemu-devel] [PATCH 04/18] virtio: Return error from virtqueue_next_desc Fam Zheng
2015-04-21 6:37 ` Michael S. Tsirkin
2015-04-21 7:30 ` Fam Zheng
2015-04-21 9:56 ` Michael S. Tsirkin
2015-04-17 7:59 ` [Qemu-devel] [PATCH 05/18] virtio: Return error from virtqueue_get_avail_bytes Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 06/18] virtio: Return error from virtqueue_pop Fam Zheng
2015-04-21 6:49 ` Michael S. Tsirkin
2015-04-21 7:24 ` Fam Zheng
2015-04-21 9:51 ` Michael S. Tsirkin
2015-04-17 7:59 ` [Qemu-devel] [PATCH 07/18] virtio: Return error from virtqueue_avail_bytes Fam Zheng
2015-04-17 7:59 ` Fam Zheng [this message]
2015-04-17 7:59 ` [Qemu-devel] [PATCH 09/18] virtio: Return error from virtio_del_queue Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 10/18] virtio: Add macro for VIRTIO_CONFIG_S_NEEDS_RESET Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 11/18] virtio: Add "needs_reset" flag to virtio device Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 12/18] virtio: Return -EINVAL if the vdev needs reset in virtqueue_pop Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 13/18] virtio-blk: Graceful error handling of virtqueue_pop Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 14/18] qtest: Add "QTEST_FILTER" to filter test cases Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 15/18] qtest: virtio-blk: Extract "setup" for future reuse Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 16/18] libqos: Add qvirtio_needs_reset Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 17/18] qtest: Add test case for "needs reset" of virtio-blk Fam Zheng
2015-04-17 7:59 ` [Qemu-devel] [PATCH 18/18] qtest: virtio-blk: Suppress virtio error messages in "make check" Fam Zheng
2015-04-20 15:13 ` [Qemu-devel] [PATCH 00/18] virtio-blk: Support "VIRTIO_CONFIG_S_NEEDS_RESET" Cornelia Huck
2015-04-21 7:44 ` Fam Zheng
2015-04-21 8:04 ` Cornelia Huck
2015-04-21 8:38 ` Fam Zheng
2015-04-21 9:08 ` Cornelia Huck
2015-04-21 9:16 ` Fam Zheng
2015-04-21 9:55 ` Cornelia Huck
2015-04-21 9:59 ` Michael S. Tsirkin
2015-04-20 17:36 ` Michael S. Tsirkin
2015-04-20 19:10 ` Paolo Bonzini
2015-04-20 20:34 ` Michael S. Tsirkin
2015-04-21 2:39 ` Fam Zheng
2015-04-21 6:52 ` Paolo Bonzini
2015-04-21 6:58 ` Michael S. Tsirkin
2015-04-21 2:37 ` Fam Zheng
2015-04-21 5:22 ` Michael S. Tsirkin
2015-04-21 5:50 ` Fam Zheng
2015-04-21 6:09 ` Michael S. Tsirkin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1429257573-7359-9-git-send-email-famz@redhat.com \
--to=famz@redhat.com \
--cc=amit.shah@redhat.com \
--cc=aneesh.kumar@linux.vnet.ibm.com \
--cc=kwolf@redhat.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).