qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: qemu-devel@nongnu.org
Cc: cornelia.huck@de.ibm.com, Amit Shah <amit.shah@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	mst@redhat.com
Subject: [Qemu-devel] [PATCH V8 8/9] virtio: rename VIRTIO_PCI_QUEUE_MAX to VIRTIO_QUEUE_MAX
Date: Fri, 29 May 2015 14:15:31 +0800	[thread overview]
Message-ID: <1432880132-28477-9-git-send-email-jasowang@redhat.com> (raw)
In-Reply-To: <1432880132-28477-1-git-send-email-jasowang@redhat.com>

VIRTIO_PCI_QUEUE_MAX is not only used for pci, so rename it be generic.

Cc: Amit Shah <amit.shah@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/char/virtio-serial-bus.c |  2 +-
 hw/net/virtio-net.c         |  4 ++--
 hw/scsi/virtio-scsi.c       |  4 ++--
 hw/virtio/virtio-mmio.c     |  4 ++--
 hw/virtio/virtio-pci.c      | 10 +++++-----
 hw/virtio/virtio.c          | 28 ++++++++++++++--------------
 include/hw/virtio/virtio.h  |  2 +-
 7 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index 6e2ad82..333339e 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -973,7 +973,7 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
     }
 
     /* Each port takes 2 queues, and one pair is for the control queue */
-    max_supported_ports = VIRTIO_PCI_QUEUE_MAX / 2 - 1;
+    max_supported_ports = VIRTIO_QUEUE_MAX / 2 - 1;
 
     if (vser->serial.max_virtserial_ports > max_supported_ports) {
         error_setg(errp, "maximum ports supported: %u", max_supported_ports);
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 3edb1c4..6dca726 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1555,10 +1555,10 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
     virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
 
     n->max_queues = MAX(n->nic_conf.peers.queues, 1);
-    if (n->max_queues * 2 + 1 > VIRTIO_PCI_QUEUE_MAX) {
+    if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
         error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
                    "must be a positive integer less than %d.",
-                   n->max_queues, (VIRTIO_PCI_QUEUE_MAX - 1) / 2);
+                   n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
         virtio_cleanup(vdev);
         return;
     }
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index e242fef..5818159 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -830,10 +830,10 @@ void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
                 sizeof(VirtIOSCSIConfig));
 
     if (s->conf.num_queues == 0 ||
-            s->conf.num_queues > VIRTIO_PCI_QUEUE_MAX - 2) {
+            s->conf.num_queues > VIRTIO_QUEUE_MAX - 2) {
         error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
                          "must be a positive integer less than %d.",
-                   s->conf.num_queues, VIRTIO_PCI_QUEUE_MAX - 2);
+                   s->conf.num_queues, VIRTIO_QUEUE_MAX - 2);
         virtio_cleanup(vdev);
         return;
     }
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index dd3f80b..bd2ee26 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -237,7 +237,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
                 proxy->guest_page_shift);
         break;
     case VIRTIO_MMIO_QUEUESEL:
-        if (value < VIRTIO_PCI_QUEUE_MAX) {
+        if (value < VIRTIO_QUEUE_MAX) {
             vdev->queue_sel = value;
         }
         break;
@@ -257,7 +257,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
         }
         break;
     case VIRTIO_MMIO_QUEUENOTIFY:
-        if (value < VIRTIO_PCI_QUEUE_MAX) {
+        if (value < VIRTIO_QUEUE_MAX) {
             virtio_queue_notify(vdev, value);
         }
         break;
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index b88c8e0..5eb5f46 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -171,7 +171,7 @@ static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
         return;
     }
 
-    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
+    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
         if (!virtio_queue_get_num(vdev, n)) {
             continue;
         }
@@ -207,7 +207,7 @@ static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
         return;
     }
 
-    for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
+    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
         if (!virtio_queue_get_num(vdev, n)) {
             continue;
         }
@@ -243,11 +243,11 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
             virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
         break;
     case VIRTIO_PCI_QUEUE_SEL:
-        if (val < VIRTIO_PCI_QUEUE_MAX)
+        if (val < VIRTIO_QUEUE_MAX)
             vdev->queue_sel = val;
         break;
     case VIRTIO_PCI_QUEUE_NOTIFY:
-        if (val < VIRTIO_PCI_QUEUE_MAX) {
+        if (val < VIRTIO_QUEUE_MAX) {
             virtio_queue_notify(vdev, val);
         }
         break;
@@ -750,7 +750,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
     bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
         kvm_msi_via_irqfd_enabled();
 
-    nvqs = MIN(nvqs, VIRTIO_PCI_QUEUE_MAX);
+    nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
 
     /* When deassigning, pass a consistent nvqs value
      * to avoid leaking notifiers.
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 81d8905..7092aa5 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -600,7 +600,7 @@ void virtio_reset(void *opaque)
     vdev->config_vector = VIRTIO_NO_VECTOR;
     virtio_notify_vector(vdev, vdev->config_vector);
 
-    for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
         vdev->vq[i].vring.desc = 0;
         vdev->vq[i].vring.avail = 0;
         vdev->vq[i].vring.used = 0;
@@ -750,7 +750,7 @@ int virtio_get_num_queues(VirtIODevice *vdev)
 {
     int i;
 
-    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
         if (!virtio_queue_get_num(vdev, i)) {
             break;
         }
@@ -762,7 +762,7 @@ int virtio_get_num_queues(VirtIODevice *vdev)
 int virtio_queue_get_id(VirtQueue *vq)
 {
     VirtIODevice *vdev = vq->vdev;
-    assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
+    assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_QUEUE_MAX]);
     return vq - &vdev->vq[0];
 }
 
@@ -798,7 +798,7 @@ void virtio_queue_notify(VirtIODevice *vdev, int n)
 
 uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
 {
-    return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
+    return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
         VIRTIO_NO_VECTOR;
 }
 
@@ -806,7 +806,7 @@ void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
 {
     VirtQueue *vq = &vdev->vq[n];
 
-    if (n < VIRTIO_PCI_QUEUE_MAX) {
+    if (n < VIRTIO_QUEUE_MAX) {
         if (vdev->vector_queues &&
             vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
             QLIST_REMOVE(vq, node);
@@ -824,12 +824,12 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
 {
     int i;
 
-    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
         if (vdev->vq[i].vring.num == 0)
             break;
     }
 
-    if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
+    if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
         abort();
 
     vdev->vq[i].vring.num = queue_size;
@@ -841,7 +841,7 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
 
 void virtio_del_queue(VirtIODevice *vdev, int n)
 {
-    if (n < 0 || n >= VIRTIO_PCI_QUEUE_MAX) {
+    if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
         abort();
     }
 
@@ -951,14 +951,14 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
     qemu_put_be32(f, vdev->config_len);
     qemu_put_buffer(f, vdev->config, vdev->config_len);
 
-    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
         if (vdev->vq[i].vring.num == 0)
             break;
     }
 
     qemu_put_be32(f, i);
 
-    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
         if (vdev->vq[i].vring.num == 0)
             break;
 
@@ -1023,7 +1023,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
     qemu_get_8s(f, &vdev->status);
     qemu_get_8s(f, &vdev->isr);
     qemu_get_be16s(f, &vdev->queue_sel);
-    if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
+    if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
         return -1;
     }
     qemu_get_be32s(f, &features);
@@ -1050,7 +1050,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
 
     num = qemu_get_be32(f);
 
-    if (num > VIRTIO_PCI_QUEUE_MAX) {
+    if (num > VIRTIO_QUEUE_MAX) {
         error_report("Invalid number of PCI queues: 0x%x", num);
         return -1;
     }
@@ -1176,9 +1176,9 @@ void virtio_init(VirtIODevice *vdev, const char *name,
     vdev->isr = 0;
     vdev->queue_sel = 0;
     vdev->config_vector = VIRTIO_NO_VECTOR;
-    vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
+    vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
     vdev->vm_running = runstate_is_running();
-    for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
         vdev->vq[i].vector = VIRTIO_NO_VECTOR;
         vdev->vq[i].vdev = vdev;
         vdev->vq[i].queue_index = i;
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 175195d..8fdaa72 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -48,7 +48,7 @@ typedef struct VirtQueueElement
     struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
 } VirtQueueElement;
 
-#define VIRTIO_PCI_QUEUE_MAX 64
+#define VIRTIO_QUEUE_MAX 64
 
 #define VIRTIO_NO_VECTOR 0xffff
 
-- 
2.1.4

  parent reply	other threads:[~2015-05-29  6:16 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-29  6:15 [Qemu-devel] [PATCH V8 0/9] Support more virtio queues Jason Wang
2015-05-29  6:15 ` [Qemu-devel] [PATCH V8 1/9] virtio-net: adding all queues in .realize() Jason Wang
2015-05-29  6:15 ` [Qemu-devel] [PATCH V8 2/9] virtio: device_plugged() can fail Jason Wang
2015-05-29  6:15 ` [Qemu-devel] [PATCH V8 3/9] virtio: introduce virtio_get_num_queues() Jason Wang
2015-05-29  6:15 ` [Qemu-devel] [PATCH V8 4/9] virtio-ccw: introduce ccw specific queue limit Jason Wang
2015-05-29  6:15 ` [Qemu-devel] [PATCH V8 5/9] virtio-ccw: validate the number of queues against bus limitation Jason Wang
2015-05-29  6:15 ` [Qemu-devel] [PATCH V8 6/9] virtio-s390: introduce virito s390 queue limit Jason Wang
2015-05-29  6:15 ` [Qemu-devel] [PATCH V8 7/9] virtio-s390: introduce virtio_s390_device_plugged() Jason Wang
2015-05-29  6:15 ` Jason Wang [this message]
2015-05-29  6:15 ` [Qemu-devel] [PATCH V8 9/9] virtio: increase the queue limit to 1024 Jason Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1432880132-28477-9-git-send-email-jasowang@redhat.com \
    --to=jasowang@redhat.com \
    --cc=amit.shah@redhat.com \
    --cc=cornelia.huck@de.ibm.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).