* [Qemu-devel] [PATCH V5 01/18] virtio-net: fix the upper bound when trying to delete queues
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
@ 2015-04-01 8:14 ` Jason Wang
2015-04-01 8:14 ` [Qemu-devel] [PATCH V5 02/18] pc: add 2.4 machine types Jason Wang
` (17 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:14 UTC (permalink / raw)
To: qemu-devel; +Cc: cornelia.huck, Jason Wang, qemu-stable, mst
Virtqueue were indexed from zero, so don't delete virtqueue whose
index is n->max_queues * 2 + 1.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: qemu-stable <qemu-stable@nongnu.org>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/net/virtio-net.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 59f76bc..b6fac9c 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1309,7 +1309,7 @@ static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
n->multiqueue = multiqueue;
- for (i = 2; i <= n->max_queues * 2 + 1; i++) {
+ for (i = 2; i < n->max_queues * 2 + 1; i++) {
virtio_del_queue(vdev, i);
}
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 02/18] pc: add 2.4 machine types
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
2015-04-01 8:14 ` [Qemu-devel] [PATCH V5 01/18] virtio-net: fix the upper bound when trying to delete queues Jason Wang
@ 2015-04-01 8:14 ` Jason Wang
2015-04-01 8:14 ` [Qemu-devel] [PATCH V5 03/18] spapr: add machine type specific instance init function Jason Wang
` (16 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:14 UTC (permalink / raw)
To: qemu-devel
Cc: cornelia.huck, Paolo Bonzini, Jason Wang, Richard Henderson, mst
The following patches will limit the following things to legacy
machine type:
- maximum number of virtqueues for virtio-pci were limited to 64
- auto msix bar size for virtio-net-pci were disabled by default
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/i386/pc_piix.c | 29 +++++++++++++++++++++++++----
hw/i386/pc_q35.c | 26 +++++++++++++++++++++++---
2 files changed, 48 insertions(+), 7 deletions(-)
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 1fe7bfb..212e263 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -310,8 +310,13 @@ static void pc_init_pci(MachineState *machine)
pc_init1(machine, 1, 1);
}
+static void pc_compat_2_3(MachineState *machine)
+{
+}
+
static void pc_compat_2_2(MachineState *machine)
{
+ pc_compat_2_3(machine);
rsdp_in_ram = false;
x86_cpu_compat_set_features("kvm64", FEAT_1_EDX, 0, CPUID_VME);
x86_cpu_compat_set_features("kvm32", FEAT_1_EDX, 0, CPUID_VME);
@@ -413,6 +418,12 @@ static void pc_compat_1_2(MachineState *machine)
x86_cpu_compat_kvm_no_autoenable(FEAT_KVM, 1 << KVM_FEATURE_PV_EOI);
}
+static void pc_init_pci_2_3(MachineState *machine)
+{
+ pc_compat_2_3(machine);
+ pc_init_pci(machine);
+}
+
static void pc_init_pci_2_2(MachineState *machine)
{
pc_compat_2_2(machine);
@@ -512,19 +523,28 @@ static void pc_xen_hvm_init(MachineState *machine)
.desc = "Standard PC (i440FX + PIIX, 1996)", \
.hot_add_cpu = pc_hot_add_cpu
-#define PC_I440FX_2_3_MACHINE_OPTIONS \
+#define PC_I440FX_2_4_MACHINE_OPTIONS \
PC_I440FX_MACHINE_OPTIONS, \
.default_machine_opts = "firmware=bios-256k.bin", \
.default_display = "std"
-static QEMUMachine pc_i440fx_machine_v2_3 = {
- PC_I440FX_2_3_MACHINE_OPTIONS,
- .name = "pc-i440fx-2.3",
+
+static QEMUMachine pc_i440fx_machine_v2_4 = {
+ PC_I440FX_2_4_MACHINE_OPTIONS,
+ .name = "pc-i440fx-2.4",
.alias = "pc",
.init = pc_init_pci,
.is_default = 1,
};
+#define PC_I440FX_2_3_MACHINE_OPTIONS PC_I440FX_2_4_MACHINE_OPTIONS
+
+static QEMUMachine pc_i440fx_machine_v2_3 = {
+ PC_I440FX_2_3_MACHINE_OPTIONS,
+ .name = "pc-i440fx-2.3",
+ .init = pc_init_pci_2_3,
+};
+
#define PC_I440FX_2_2_MACHINE_OPTIONS PC_I440FX_2_3_MACHINE_OPTIONS
static QEMUMachine pc_i440fx_machine_v2_2 = {
@@ -970,6 +990,7 @@ static QEMUMachine xenfv_machine = {
static void pc_machine_init(void)
{
+ qemu_register_pc_machine(&pc_i440fx_machine_v2_4);
qemu_register_pc_machine(&pc_i440fx_machine_v2_3);
qemu_register_pc_machine(&pc_i440fx_machine_v2_2);
qemu_register_pc_machine(&pc_i440fx_machine_v2_1);
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index dcc17c0..e67f2de 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -289,8 +289,13 @@ static void pc_q35_init(MachineState *machine)
}
}
+static void pc_compat_2_3(MachineState *machine)
+{
+}
+
static void pc_compat_2_2(MachineState *machine)
{
+ pc_compat_2_3(machine);
rsdp_in_ram = false;
x86_cpu_compat_set_features("kvm64", FEAT_1_EDX, 0, CPUID_VME);
x86_cpu_compat_set_features("kvm32", FEAT_1_EDX, 0, CPUID_VME);
@@ -361,6 +366,12 @@ static void pc_compat_1_4(MachineState *machine)
x86_cpu_compat_set_features("Westmere", FEAT_1_ECX, 0, CPUID_EXT_PCLMULQDQ);
}
+static void pc_q35_init_2_3(MachineState *machine)
+{
+ pc_compat_2_3(machine);
+ pc_q35_init(machine);
+}
+
static void pc_q35_init_2_2(MachineState *machine)
{
pc_compat_2_2(machine);
@@ -410,16 +421,24 @@ static void pc_q35_init_1_4(MachineState *machine)
.hot_add_cpu = pc_hot_add_cpu, \
.units_per_default_bus = 1
-#define PC_Q35_2_3_MACHINE_OPTIONS \
+#define PC_Q35_2_4_MACHINE_OPTIONS \
PC_Q35_MACHINE_OPTIONS, \
.default_machine_opts = "firmware=bios-256k.bin", \
.default_display = "std"
+static QEMUMachine pc_q35_machine_v2_4 = {
+ PC_Q35_2_4_MACHINE_OPTIONS,
+ .name = "pc-q35-2.4",
+ .alias = "q35",
+ .init = pc_q35_init,
+};
+
+#define PC_Q35_2_3_MACHINE_OPTIONS PC_Q35_2_4_MACHINE_OPTIONS
+
static QEMUMachine pc_q35_machine_v2_3 = {
PC_Q35_2_3_MACHINE_OPTIONS,
.name = "pc-q35-2.3",
- .alias = "q35",
- .init = pc_q35_init,
+ .init = pc_q35_init_2_3,
};
#define PC_Q35_2_2_MACHINE_OPTIONS PC_Q35_2_3_MACHINE_OPTIONS
@@ -506,6 +525,7 @@ static QEMUMachine pc_q35_machine_v1_4 = {
static void pc_q35_machine_init(void)
{
+ qemu_register_pc_machine(&pc_q35_machine_v2_4);
qemu_register_pc_machine(&pc_q35_machine_v2_3);
qemu_register_pc_machine(&pc_q35_machine_v2_2);
qemu_register_pc_machine(&pc_q35_machine_v2_1);
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 03/18] spapr: add machine type specific instance init function
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
2015-04-01 8:14 ` [Qemu-devel] [PATCH V5 01/18] virtio-net: fix the upper bound when trying to delete queues Jason Wang
2015-04-01 8:14 ` [Qemu-devel] [PATCH V5 02/18] pc: add 2.4 machine types Jason Wang
@ 2015-04-01 8:14 ` Jason Wang
2015-04-01 8:14 ` [Qemu-devel] [PATCH V5 04/18] ppc: spapr: add 2.4 machine type Jason Wang
` (15 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:14 UTC (permalink / raw)
To: qemu-devel; +Cc: cornelia.huck, Jason Wang, qemu-ppc, Alexander Graf, mst
This patches adds machine type specific instance initialization
functions. Those functions will be used by following patches to compat
class properties for legacy machine types.
Cc: Alexander Graf <agraf@suse.de>
Cc: qemu-ppc@nongnu.org
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/ppc/spapr.c | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 61ddc79..9e676ef 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1825,6 +1825,27 @@ static const TypeInfo spapr_machine_info = {
#define SPAPR_COMPAT_2_1 \
SPAPR_COMPAT_2_2
+static void spapr_compat_2_2(Object *obj)
+{
+}
+
+static void spapr_compat_2_1(Object *obj)
+{
+ spapr_compat_2_2(obj);
+}
+
+static void spapr_machine_2_2_instance_init(Object *obj)
+{
+ spapr_compat_2_2(obj);
+ spapr_machine_initfn(obj);
+}
+
+static void spapr_machine_2_1_instance_init(Object *obj)
+{
+ spapr_compat_2_1(obj);
+ spapr_machine_initfn(obj);
+}
+
static void spapr_machine_2_1_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -1843,6 +1864,7 @@ static const TypeInfo spapr_machine_2_1_info = {
.name = TYPE_SPAPR_MACHINE "2.1",
.parent = TYPE_SPAPR_MACHINE,
.class_init = spapr_machine_2_1_class_init,
+ .instance_init = spapr_machine_2_1_instance_init,
};
static void spapr_machine_2_2_class_init(ObjectClass *oc, void *data)
@@ -1862,6 +1884,7 @@ static const TypeInfo spapr_machine_2_2_info = {
.name = TYPE_SPAPR_MACHINE "2.2",
.parent = TYPE_SPAPR_MACHINE,
.class_init = spapr_machine_2_2_class_init,
+ .instance_init = spapr_machine_2_2_instance_init,
};
static void spapr_machine_2_3_class_init(ObjectClass *oc, void *data)
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 04/18] ppc: spapr: add 2.4 machine type
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (2 preceding siblings ...)
2015-04-01 8:14 ` [Qemu-devel] [PATCH V5 03/18] spapr: add machine type specific instance init function Jason Wang
@ 2015-04-01 8:14 ` Jason Wang
2015-04-01 8:14 ` [Qemu-devel] [PATCH V5 05/18] monitor: replace the magic number 255 with MAX_QUEUE_NUM Jason Wang
` (14 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:14 UTC (permalink / raw)
To: qemu-devel; +Cc: cornelia.huck, Jason Wang, qemu-ppc, Alexander Graf, mst
The following patches will limit the following things to legacy
machine type:
- maximum number of virtqueues for virtio-pci were limited to 64
- auto msix bar size for virtio-net-pci were disabled by default
Cc: Alexander Graf <agraf@suse.de>
Cc: qemu-ppc@nongnu.org
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/ppc/spapr.c | 31 +++++++++++++++++++++++++++++--
1 file changed, 29 insertions(+), 2 deletions(-)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 9e676ef..8e43aa2 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1825,8 +1825,13 @@ static const TypeInfo spapr_machine_info = {
#define SPAPR_COMPAT_2_1 \
SPAPR_COMPAT_2_2
+static void spapr_compat_2_3(Object *obj)
+{
+}
+
static void spapr_compat_2_2(Object *obj)
{
+ spapr_compat_2_3(obj);
}
static void spapr_compat_2_1(Object *obj)
@@ -1834,6 +1839,12 @@ static void spapr_compat_2_1(Object *obj)
spapr_compat_2_2(obj);
}
+static void spapr_machine_2_3_instance_init(Object *obj)
+{
+ spapr_compat_2_3(obj);
+ spapr_machine_initfn(obj);
+}
+
static void spapr_machine_2_2_instance_init(Object *obj)
{
spapr_compat_2_2(obj);
@@ -1893,14 +1904,29 @@ static void spapr_machine_2_3_class_init(ObjectClass *oc, void *data)
mc->name = "pseries-2.3";
mc->desc = "pSeries Logical Partition (PAPR compliant) v2.3";
- mc->alias = "pseries";
- mc->is_default = 1;
}
static const TypeInfo spapr_machine_2_3_info = {
.name = TYPE_SPAPR_MACHINE "2.3",
.parent = TYPE_SPAPR_MACHINE,
.class_init = spapr_machine_2_3_class_init,
+ .instance_init = spapr_machine_2_3_instance_init,
+};
+
+static void spapr_machine_2_4_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->name = "pseries-2.4";
+ mc->desc = "pSeries Logical Partition (PAPR compliant) v2.4";
+ mc->alias = "pseries";
+ mc->is_default = 1;
+}
+
+static const TypeInfo spapr_machine_2_4_info = {
+ .name = TYPE_SPAPR_MACHINE "2.4",
+ .parent = TYPE_SPAPR_MACHINE,
+ .class_init = spapr_machine_2_4_class_init,
};
static void spapr_machine_register_types(void)
@@ -1909,6 +1935,7 @@ static void spapr_machine_register_types(void)
type_register_static(&spapr_machine_2_1_info);
type_register_static(&spapr_machine_2_2_info);
type_register_static(&spapr_machine_2_3_info);
+ type_register_static(&spapr_machine_2_4_info);
}
type_init(spapr_machine_register_types)
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 05/18] monitor: replace the magic number 255 with MAX_QUEUE_NUM
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (3 preceding siblings ...)
2015-04-01 8:14 ` [Qemu-devel] [PATCH V5 04/18] ppc: spapr: add 2.4 machine type Jason Wang
@ 2015-04-01 8:14 ` Jason Wang
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 06/18] monitor: check return value of qemu_find_net_clients_except() Jason Wang
` (13 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:14 UTC (permalink / raw)
To: qemu-devel; +Cc: cornelia.huck, Jason Wang, Luiz Capitulino, mst
This patch replace the magic number 255, and increase it to
MAX_QUEUE_NUM which is maximum number of queues supported by a nic.
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
monitor.c | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/monitor.c b/monitor.c
index 68873ec..a039edf 100644
--- a/monitor.c
+++ b/monitor.c
@@ -4472,10 +4472,11 @@ void set_link_completion(ReadLineState *rs, int nb_args, const char *str)
len = strlen(str);
readline_set_completion_index(rs, len);
if (nb_args == 2) {
- NetClientState *ncs[255];
+ NetClientState *ncs[MAX_QUEUE_NUM];
int count, i;
count = qemu_find_net_clients_except(NULL, ncs,
- NET_CLIENT_OPTIONS_KIND_NONE, 255);
+ NET_CLIENT_OPTIONS_KIND_NONE,
+ MAX_QUEUE_NUM);
for (i = 0; i < count; i++) {
const char *name = ncs[i]->name;
if (!strncmp(str, name, len)) {
@@ -4491,7 +4492,7 @@ void set_link_completion(ReadLineState *rs, int nb_args, const char *str)
void netdev_del_completion(ReadLineState *rs, int nb_args, const char *str)
{
int len, count, i;
- NetClientState *ncs[255];
+ NetClientState *ncs[MAX_QUEUE_NUM];
if (nb_args != 2) {
return;
@@ -4500,7 +4501,7 @@ void netdev_del_completion(ReadLineState *rs, int nb_args, const char *str)
len = strlen(str);
readline_set_completion_index(rs, len);
count = qemu_find_net_clients_except(NULL, ncs, NET_CLIENT_OPTIONS_KIND_NIC,
- 255);
+ MAX_QUEUE_NUM);
for (i = 0; i < count; i++) {
QemuOpts *opts;
const char *name = ncs[i]->name;
@@ -4566,14 +4567,15 @@ void host_net_add_completion(ReadLineState *rs, int nb_args, const char *str)
void host_net_remove_completion(ReadLineState *rs, int nb_args, const char *str)
{
- NetClientState *ncs[255];
+ NetClientState *ncs[MAX_QUEUE_NUM];
int count, i, len;
len = strlen(str);
readline_set_completion_index(rs, len);
if (nb_args == 2) {
count = qemu_find_net_clients_except(NULL, ncs,
- NET_CLIENT_OPTIONS_KIND_NONE, 255);
+ NET_CLIENT_OPTIONS_KIND_NONE,
+ MAX_QUEUE_NUM);
for (i = 0; i < count; i++) {
int id;
char name[16];
@@ -4589,7 +4591,8 @@ void host_net_remove_completion(ReadLineState *rs, int nb_args, const char *str)
return;
} else if (nb_args == 3) {
count = qemu_find_net_clients_except(NULL, ncs,
- NET_CLIENT_OPTIONS_KIND_NIC, 255);
+ NET_CLIENT_OPTIONS_KIND_NIC,
+ MAX_QUEUE_NUM);
for (i = 0; i < count; i++) {
int id;
const char *name;
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 06/18] monitor: check return value of qemu_find_net_clients_except()
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (4 preceding siblings ...)
2015-04-01 8:14 ` [Qemu-devel] [PATCH V5 05/18] monitor: replace the magic number 255 with MAX_QUEUE_NUM Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 07/18] virtio-ccw: using VIRTIO_NO_VECTOR instead of 0 for invalid virtqueue Jason Wang
` (12 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel; +Cc: cornelia.huck, Jason Wang, Luiz Capitulino, mst
qemu_find_net_clients_except() may return a value which is greater
than the size of array we provided. So we should check this value
before using it, otherwise this may cause unexpected memory access.
This patch fixes the net related command completion when we have a
virtio-net nic with more than 255 queues.
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
monitor.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/monitor.c b/monitor.c
index a039edf..2b5643d 100644
--- a/monitor.c
+++ b/monitor.c
@@ -4477,7 +4477,7 @@ void set_link_completion(ReadLineState *rs, int nb_args, const char *str)
count = qemu_find_net_clients_except(NULL, ncs,
NET_CLIENT_OPTIONS_KIND_NONE,
MAX_QUEUE_NUM);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < MIN(count, MAX_QUEUE_NUM); i++) {
const char *name = ncs[i]->name;
if (!strncmp(str, name, len)) {
readline_add_completion(rs, name);
@@ -4502,7 +4502,7 @@ void netdev_del_completion(ReadLineState *rs, int nb_args, const char *str)
readline_set_completion_index(rs, len);
count = qemu_find_net_clients_except(NULL, ncs, NET_CLIENT_OPTIONS_KIND_NIC,
MAX_QUEUE_NUM);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < MIN(count, MAX_QUEUE_NUM); i++) {
QemuOpts *opts;
const char *name = ncs[i]->name;
if (strncmp(str, name, len)) {
@@ -4576,7 +4576,7 @@ void host_net_remove_completion(ReadLineState *rs, int nb_args, const char *str)
count = qemu_find_net_clients_except(NULL, ncs,
NET_CLIENT_OPTIONS_KIND_NONE,
MAX_QUEUE_NUM);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < MIN(count, MAX_QUEUE_NUM); i++) {
int id;
char name[16];
@@ -4593,7 +4593,7 @@ void host_net_remove_completion(ReadLineState *rs, int nb_args, const char *str)
count = qemu_find_net_clients_except(NULL, ncs,
NET_CLIENT_OPTIONS_KIND_NIC,
MAX_QUEUE_NUM);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < MIN(count, MAX_QUEUE_NUM); i++) {
int id;
const char *name;
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 07/18] virtio-ccw: using VIRTIO_NO_VECTOR instead of 0 for invalid virtqueue
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (5 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 06/18] monitor: check return value of qemu_find_net_clients_except() Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 08/18] virtio: introduce bus specific queue limit Jason Wang
` (11 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel
Cc: mst, Jason Wang, Alexander Graf, Christian Borntraeger,
cornelia.huck, Richard Henderson
There's no need to use vector 0 for invalid virtqueue. So this patch
changes to use VIRTIO_NO_VECTOR instead.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
CC: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Alexander Graf <agraf@suse.de>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/s390x/virtio-ccw.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 130535c..c8b87aa 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -281,7 +281,7 @@ static int virtio_ccw_set_vqs(SubchDev *sch, uint64_t addr, uint32_t align,
virtio_queue_set_addr(vdev, index, addr);
if (!addr) {
- virtio_queue_set_vector(vdev, index, 0);
+ virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR);
} else {
/* Fail if we don't have a big enough queue. */
/* TODO: Add interface to handle vring.num changing */
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 08/18] virtio: introduce bus specific queue limit
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (6 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 07/18] virtio-ccw: using VIRTIO_NO_VECTOR instead of 0 for invalid virtqueue Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 09/18] virtio-ccw: introduce ccw " Jason Wang
` (10 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel
Cc: mst, Jason Wang, Alexander Graf, Christian Borntraeger,
cornelia.huck, Paolo Bonzini, Richard Henderson
This patch introduces a bus specific queue limitation. It will be
useful for increasing the limit for one of the bus without disturbing
other buses.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Alexander Graf <agraf@suse.de>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/char/virtio-serial-bus.c | 2 +-
hw/net/virtio-net.c | 4 ++--
hw/s390x/s390-virtio-bus.c | 1 +
hw/s390x/virtio-ccw.c | 1 +
hw/scsi/virtio-scsi.c | 4 ++--
hw/virtio/virtio-mmio.c | 1 +
hw/virtio/virtio-pci.c | 1 +
hw/virtio/virtio.c | 40 +++++++++++++++++++++++++---------------
include/hw/virtio/virtio-bus.h | 1 +
include/hw/virtio/virtio.h | 1 +
10 files changed, 36 insertions(+), 20 deletions(-)
diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index e336bdb..0694831 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -973,7 +973,7 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp)
}
/* Each port takes 2 queues, and one pair is for the control queue */
- max_supported_ports = VIRTIO_PCI_QUEUE_MAX / 2 - 1;
+ max_supported_ports = virtio_get_queue_max(vdev) / 2 - 1;
if (vser->serial.max_virtserial_ports > max_supported_ports) {
error_setg(errp, "maximum ports supported: %u", max_supported_ports);
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index b6fac9c..bf286f5 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1588,10 +1588,10 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
n->max_queues = MAX(n->nic_conf.peers.queues, 1);
- if (n->max_queues * 2 + 1 > VIRTIO_PCI_QUEUE_MAX) {
+ if (n->max_queues * 2 + 1 > virtio_get_queue_max(vdev)) {
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
"must be a postive integer less than %d.",
- n->max_queues, (VIRTIO_PCI_QUEUE_MAX - 1) / 2);
+ n->max_queues, (virtio_get_queue_max(vdev) - 1) / 2);
virtio_cleanup(vdev);
return;
}
diff --git a/hw/s390x/s390-virtio-bus.c b/hw/s390x/s390-virtio-bus.c
index 047c963..2b41e32 100644
--- a/hw/s390x/s390-virtio-bus.c
+++ b/hw/s390x/s390-virtio-bus.c
@@ -715,6 +715,7 @@ static void virtio_s390_bus_class_init(ObjectClass *klass, void *data)
bus_class->max_dev = 1;
k->notify = virtio_s390_notify;
k->get_features = virtio_s390_get_features;
+ k->queue_max = VIRTIO_PCI_QUEUE_MAX;
}
static const TypeInfo virtio_s390_bus_info = {
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index c8b87aa..dfe6ded 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -1711,6 +1711,7 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
k->load_queue = virtio_ccw_load_queue;
k->save_config = virtio_ccw_save_config;
k->load_config = virtio_ccw_load_config;
+ k->queue_max = VIRTIO_PCI_QUEUE_MAX;
}
static const TypeInfo virtio_ccw_bus_info = {
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index c9bea06..fbdde2b 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -826,10 +826,10 @@ void virtio_scsi_common_realize(DeviceState *dev, Error **errp,
sizeof(VirtIOSCSIConfig));
if (s->conf.num_queues == 0 ||
- s->conf.num_queues > VIRTIO_PCI_QUEUE_MAX - 2) {
+ s->conf.num_queues > virtio_get_queue_max(vdev) - 2) {
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
"must be a positive integer less than %d.",
- s->conf.num_queues, VIRTIO_PCI_QUEUE_MAX - 2);
+ s->conf.num_queues, virtio_get_queue_max(vdev) - 2);
virtio_cleanup(vdev);
return;
}
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index 10123f3..2ae6942 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -403,6 +403,7 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
k->device_plugged = virtio_mmio_device_plugged;
k->has_variable_vring_alignment = true;
bus_class->max_dev = 1;
+ k->queue_max = VIRTIO_PCI_QUEUE_MAX;
}
static const TypeInfo virtio_mmio_bus_info = {
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index c7c3f72..075b13b 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -1498,6 +1498,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
k->vmstate_change = virtio_pci_vmstate_change;
k->device_plugged = virtio_pci_device_plugged;
k->device_unplugged = virtio_pci_device_unplugged;
+ k->queue_max = VIRTIO_PCI_QUEUE_MAX;
}
static const TypeInfo virtio_pci_bus_info = {
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 17c1260..bbb224f 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -541,6 +541,14 @@ void virtio_update_irq(VirtIODevice *vdev)
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
}
+int virtio_get_queue_max(VirtIODevice *vdev)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+ return k->queue_max;
+}
+
void virtio_set_status(VirtIODevice *vdev, uint8_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
@@ -599,7 +607,7 @@ void virtio_reset(void *opaque)
vdev->config_vector = VIRTIO_NO_VECTOR;
virtio_notify_vector(vdev, vdev->config_vector);
- for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+ for (i = 0; i < virtio_get_queue_max(vdev); i++) {
vdev->vq[i].vring.desc = 0;
vdev->vq[i].vring.avail = 0;
vdev->vq[i].vring.used = 0;
@@ -738,7 +746,8 @@ int virtio_queue_get_num(VirtIODevice *vdev, int n)
int virtio_queue_get_id(VirtQueue *vq)
{
VirtIODevice *vdev = vq->vdev;
- assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
+
+ assert(vq >= &vdev->vq[0] && vq < &vdev->vq[virtio_get_queue_max(vdev)]);
return vq - &vdev->vq[0];
}
@@ -774,28 +783,29 @@ void virtio_queue_notify(VirtIODevice *vdev, int n)
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
{
- return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
+ return n < virtio_get_queue_max(vdev) ? vdev->vq[n].vector :
VIRTIO_NO_VECTOR;
}
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
{
- if (n < VIRTIO_PCI_QUEUE_MAX)
+ if (n < virtio_get_queue_max(vdev))
vdev->vq[n].vector = vector;
}
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
void (*handle_output)(VirtIODevice *, VirtQueue *))
{
- int i;
+ int i, queue_max = virtio_get_queue_max(vdev);
- for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+ for (i = 0; i < queue_max; i++) {
if (vdev->vq[i].vring.num == 0)
break;
}
- if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
+ if (i == queue_max || queue_size > VIRTQUEUE_MAX_SIZE) {
abort();
+ }
vdev->vq[i].vring.num = queue_size;
vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
@@ -806,7 +816,7 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
void virtio_del_queue(VirtIODevice *vdev, int n)
{
- if (n < 0 || n >= VIRTIO_PCI_QUEUE_MAX) {
+ if (n < 0 || n >= virtio_get_queue_max(vdev)) {
abort();
}
@@ -916,14 +926,14 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
qemu_put_be32(f, vdev->config_len);
qemu_put_buffer(f, vdev->config, vdev->config_len);
- for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+ for (i = 0; i < k->queue_max; i++) {
if (vdev->vq[i].vring.num == 0)
break;
}
qemu_put_be32(f, i);
- for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+ for (i = 0; i < k->queue_max; i++) {
if (vdev->vq[i].vring.num == 0)
break;
@@ -988,7 +998,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
qemu_get_8s(f, &vdev->status);
qemu_get_8s(f, &vdev->isr);
qemu_get_be16s(f, &vdev->queue_sel);
- if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
+ if (vdev->queue_sel >= k->queue_max) {
return -1;
}
qemu_get_be32s(f, &features);
@@ -1015,7 +1025,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
num = qemu_get_be32(f);
- if (num > VIRTIO_PCI_QUEUE_MAX) {
+ if (num > k->queue_max) {
error_report("Invalid number of PCI queues: 0x%x", num);
return -1;
}
@@ -1125,15 +1135,15 @@ void virtio_instance_init_common(Object *proxy_obj, void *data,
void virtio_init(VirtIODevice *vdev, const char *name,
uint16_t device_id, size_t config_size)
{
- int i;
+ int i, queue_max = virtio_get_queue_max(vdev);
vdev->device_id = device_id;
vdev->status = 0;
vdev->isr = 0;
vdev->queue_sel = 0;
vdev->config_vector = VIRTIO_NO_VECTOR;
- vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
+ vdev->vq = g_malloc0(sizeof(VirtQueue) * queue_max);
vdev->vm_running = runstate_is_running();
- for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+ for (i = 0; i < queue_max; i++) {
vdev->vq[i].vector = VIRTIO_NO_VECTOR;
vdev->vq[i].vdev = vdev;
vdev->vq[i].queue_index = i;
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 0d2e7b4..4da8022 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -68,6 +68,7 @@ typedef struct VirtioBusClass {
* Note that changing this will break migration for this transport.
*/
bool has_variable_vring_alignment;
+ uint16_t queue_max;
} VirtioBusClass;
struct VirtioBusState {
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index d95f8b6..91fd673 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -179,6 +179,7 @@ void virtio_queue_notify(VirtIODevice *vdev, int n);
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n);
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector);
void virtio_set_status(VirtIODevice *vdev, uint8_t val);
+int virtio_get_queue_max(VirtIODevice *vdev);
void virtio_reset(void *opaque);
void virtio_update_irq(VirtIODevice *vdev);
int virtio_set_features(VirtIODevice *vdev, uint32_t val);
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 09/18] virtio-ccw: introduce ccw specific queue limit
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (7 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 08/18] virtio: introduce bus specific queue limit Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-02 13:47 ` Cornelia Huck
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 10/18] virtio-s390: switch to bus " Jason Wang
` (9 subsequent siblings)
18 siblings, 1 reply; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel
Cc: mst, Jason Wang, Alexander Graf, Christian Borntraeger,
cornelia.huck, Richard Henderson
Instead of depending on marco, using a bus specific limit. Also make
it clear that the number of gsis per I/O adapter is not directly
depending on the number of virtio queues, but rather the other way
around.
Cc: Alexander Graf <agraf@suse.de>
Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/s390x/s390-virtio-ccw.c | 7 +++++--
hw/s390x/virtio-ccw.c | 19 ++++++++++++-------
include/hw/s390x/s390_flic.h | 4 +++-
3 files changed, 20 insertions(+), 10 deletions(-)
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index afb539a..96b84ab 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -55,6 +55,7 @@ void io_subsystem_reset(void)
static int virtio_ccw_hcall_notify(const uint64_t *args)
{
+ VirtIODevice *vdev;
uint64_t subch_id = args[0];
uint64_t queue = args[1];
SubchDev *sch;
@@ -67,10 +68,12 @@ static int virtio_ccw_hcall_notify(const uint64_t *args)
if (!sch || !css_subch_visible(sch)) {
return -EINVAL;
}
- if (queue >= VIRTIO_PCI_QUEUE_MAX) {
+
+ vdev = virtio_ccw_get_vdev(sch);
+ if (queue >= virtio_get_queue_max(vdev)) {
return -EINVAL;
}
- virtio_queue_notify(virtio_ccw_get_vdev(sch), queue);
+ virtio_queue_notify(vdev, queue);
return 0;
}
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index dfe6ded..935b880 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -32,6 +32,8 @@
static QTAILQ_HEAD(, IndAddr) indicator_addresses =
QTAILQ_HEAD_INITIALIZER(indicator_addresses);
+#define VIRTIO_CCW_QUEUE_MAX ADAPTER_ROUTES_MAX_GSI
+
static IndAddr *get_indicator(hwaddr ind_addr, int len)
{
IndAddr *indicator;
@@ -170,7 +172,7 @@ static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
return;
}
vdev = virtio_bus_get_device(&dev->bus);
- for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
+ for (n = 0; n < virtio_get_queue_max(vdev); n++) {
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
@@ -205,7 +207,7 @@ static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
return;
}
vdev = virtio_bus_get_device(&dev->bus);
- for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
+ for (n = 0; n < virtio_get_queue_max(vdev); n++) {
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
@@ -265,8 +267,9 @@ static int virtio_ccw_set_vqs(SubchDev *sch, uint64_t addr, uint32_t align,
uint16_t index, uint16_t num)
{
VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
+ int queue_max = virtio_get_queue_max(vdev);
- if (index > VIRTIO_PCI_QUEUE_MAX) {
+ if (index > queue_max) {
return -EINVAL;
}
@@ -291,7 +294,7 @@ static int virtio_ccw_set_vqs(SubchDev *sch, uint64_t addr, uint32_t align,
virtio_queue_set_vector(vdev, index, index);
}
/* tell notify handler in case of config change */
- vdev->config_vector = VIRTIO_PCI_QUEUE_MAX;
+ vdev->config_vector = queue_max;
return 0;
}
@@ -1036,14 +1039,16 @@ static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc,
static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
{
VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
SubchDev *sch = dev->sch;
uint64_t indicators;
- if (vector >= 128) {
+ /* queue indicators + secondary indicators */
+ if (vector >= virtio_get_queue_max(vdev) + 64) {
return;
}
- if (vector < VIRTIO_PCI_QUEUE_MAX) {
+ if (vector < virtio_get_queue_max(vdev)) {
if (!dev->indicators) {
return;
}
@@ -1711,7 +1716,7 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
k->load_queue = virtio_ccw_load_queue;
k->save_config = virtio_ccw_save_config;
k->load_config = virtio_ccw_load_config;
- k->queue_max = VIRTIO_PCI_QUEUE_MAX;
+ k->queue_max = VIRTIO_CCW_QUEUE_MAX;
}
static const TypeInfo virtio_ccw_bus_info = {
diff --git a/include/hw/s390x/s390_flic.h b/include/hw/s390x/s390_flic.h
index 489d73b..e1b0389 100644
--- a/include/hw/s390x/s390_flic.h
+++ b/include/hw/s390x/s390_flic.h
@@ -17,10 +17,12 @@
#include "hw/s390x/adapter.h"
#include "hw/virtio/virtio.h"
+#define ADAPTER_ROUTES_MAX_GSI 64
+
typedef struct AdapterRoutes {
AdapterInfo adapter;
int num_routes;
- int gsi[VIRTIO_PCI_QUEUE_MAX];
+ int gsi[ADAPTER_ROUTES_MAX_GSI];
} AdapterRoutes;
#define TYPE_S390_FLIC_COMMON "s390-flic"
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 09/18] virtio-ccw: introduce ccw specific queue limit
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 09/18] virtio-ccw: introduce ccw " Jason Wang
@ 2015-04-02 13:47 ` Cornelia Huck
2015-04-03 8:53 ` Jason Wang
0 siblings, 1 reply; 39+ messages in thread
From: Cornelia Huck @ 2015-04-02 13:47 UTC (permalink / raw)
To: Jason Wang
Cc: Alexander Graf, Richard Henderson, qemu-devel,
Christian Borntraeger, mst
On Wed, 1 Apr 2015 16:15:03 +0800
Jason Wang <jasowang@redhat.com> wrote:
> Instead of depending on marco, using a bus specific limit. Also make
> it clear that the number of gsis per I/O adapter is not directly
> depending on the number of virtio queues, but rather the other way
> around.
>
> Cc: Alexander Graf <agraf@suse.de>
> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
> Cc: Richard Henderson <rth@twiddle.net>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> hw/s390x/s390-virtio-ccw.c | 7 +++++--
> hw/s390x/virtio-ccw.c | 19 ++++++++++++-------
> include/hw/s390x/s390_flic.h | 4 +++-
> 3 files changed, 20 insertions(+), 10 deletions(-)
>
> diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
> index dfe6ded..935b880 100644
> --- a/hw/s390x/virtio-ccw.c
> +++ b/hw/s390x/virtio-ccw.c
> @@ -265,8 +267,9 @@ static int virtio_ccw_set_vqs(SubchDev *sch, uint64_t addr, uint32_t align,
> uint16_t index, uint16_t num)
> {
> VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
> + int queue_max = virtio_get_queue_max(vdev);
>
> - if (index > VIRTIO_PCI_QUEUE_MAX) {
> + if (index > queue_max) {
trivial conflict on master (>= instead of >)
> return -EINVAL;
> }
>
In master, there's also a new instance of VIRTIO_PCI_QUEUE_MAX (in
virtio_ccw_cb()) which needs a similar treatment.
But on the whole, looks good.
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 09/18] virtio-ccw: introduce ccw specific queue limit
2015-04-02 13:47 ` Cornelia Huck
@ 2015-04-03 8:53 ` Jason Wang
0 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-03 8:53 UTC (permalink / raw)
To: Cornelia Huck
Cc: Alexander Graf, Richard Henderson, qemu-devel,
Christian Borntraeger, mst
On Thu, Apr 2, 2015 at 9:47 PM, Cornelia Huck
<cornelia.huck@de.ibm.com> wrote:
> On Wed, 1 Apr 2015 16:15:03 +0800
> Jason Wang <jasowang@redhat.com> wrote:
>
>> Instead of depending on marco, using a bus specific limit. Also make
>> it clear that the number of gsis per I/O adapter is not directly
>> depending on the number of virtio queues, but rather the other way
>> around.
>>
>> Cc: Alexander Graf <agraf@suse.de>
>> Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
>> Cc: Christian Borntraeger <borntraeger@de.ibm.com>
>> Cc: Richard Henderson <rth@twiddle.net>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>> hw/s390x/s390-virtio-ccw.c | 7 +++++--
>> hw/s390x/virtio-ccw.c | 19 ++++++++++++-------
>> include/hw/s390x/s390_flic.h | 4 +++-
>> 3 files changed, 20 insertions(+), 10 deletions(-)
>>
>
>> diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
>> index dfe6ded..935b880 100644
>> --- a/hw/s390x/virtio-ccw.c
>> +++ b/hw/s390x/virtio-ccw.c
>
>> @@ -265,8 +267,9 @@ static int virtio_ccw_set_vqs(SubchDev *sch,
>> uint64_t addr, uint32_t align,
>> uint16_t index, uint16_t num)
>> {
>> VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
>> + int queue_max = virtio_get_queue_max(vdev);
>>
>> - if (index > VIRTIO_PCI_QUEUE_MAX) {
>> + if (index > queue_max) {
>
> trivial conflict on master (>= instead of >)
Yes, because of 590fe5722b522e492a9c78adadae4def35b137dd.
>
>> return -EINVAL;
>> }
>>
>
> In master, there's also a new instance of VIRTIO_PCI_QUEUE_MAX (in
> virtio_ccw_cb()) which needs a similar treatment.
>
> But on the whole, looks good.
Thanks for the reminding, will address this in V6.
^ permalink raw reply [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 10/18] virtio-s390: switch to bus specific queue limit
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (8 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 09/18] virtio-ccw: introduce ccw " Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 11/18] virtio-mmio: " Jason Wang
` (8 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel
Cc: mst, Jason Wang, Alexander Graf, Christian Borntraeger,
cornelia.huck, Richard Henderson
Instead of depending on marco, switch to use a bus specific queue
limit.
Cc: Alexander Graf <agraf@suse.de>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Cornelia Huck <cornelia.huck@de.ibm.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/s390x/s390-virtio-bus.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/hw/s390x/s390-virtio-bus.c b/hw/s390x/s390-virtio-bus.c
index 2b41e32..9d6028d 100644
--- a/hw/s390x/s390-virtio-bus.c
+++ b/hw/s390x/s390-virtio-bus.c
@@ -45,6 +45,8 @@
do { } while (0)
#endif
+#define VIRTIO_S390_QUEUE_MAX 64
+
static void virtio_s390_bus_new(VirtioBusState *bus, size_t bus_size,
VirtIOS390Device *dev);
@@ -344,7 +346,7 @@ static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev)
VirtIODevice *vdev = dev->vdev;
int num_vq;
- for (num_vq = 0; num_vq < VIRTIO_PCI_QUEUE_MAX; num_vq++) {
+ for (num_vq = 0; num_vq < virtio_get_queue_max(vdev); num_vq++) {
if (!virtio_queue_get_num(vdev, num_vq)) {
break;
}
@@ -446,7 +448,7 @@ VirtIOS390Device *s390_virtio_bus_find_vring(VirtIOS390Bus *bus,
QTAILQ_FOREACH(kid, &bus->bus.children, sibling) {
VirtIOS390Device *dev = (VirtIOS390Device *)kid->child;
- for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+ for (i = 0; i < virtio_get_queue_max(dev->vdev); i++) {
if (!virtio_queue_get_addr(dev->vdev, i))
break;
if (virtio_queue_get_addr(dev->vdev, i) == mem) {
@@ -715,7 +717,7 @@ static void virtio_s390_bus_class_init(ObjectClass *klass, void *data)
bus_class->max_dev = 1;
k->notify = virtio_s390_notify;
k->get_features = virtio_s390_get_features;
- k->queue_max = VIRTIO_PCI_QUEUE_MAX;
+ k->queue_max = VIRTIO_S390_QUEUE_MAX;
}
static const TypeInfo virtio_s390_bus_info = {
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 11/18] virtio-mmio: switch to bus specific queue limit
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (9 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 10/18] virtio-s390: switch to bus " Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 12/18] virtio-pci: switch to use " Jason Wang
` (7 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel; +Cc: cornelia.huck, Jason Wang, mst
Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/virtio/virtio-mmio.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index 2ae6942..dbd44b6 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -34,6 +34,8 @@ do { printf("virtio_mmio: " fmt , ## __VA_ARGS__); } while (0)
#define DPRINTF(fmt, ...) do {} while (0)
#endif
+#define VIRTIO_MMIO_QUEUE_MAX 64
+
/* QOM macros */
/* virtio-mmio-bus */
#define TYPE_VIRTIO_MMIO_BUS "virtio-mmio-bus"
@@ -237,7 +239,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
proxy->guest_page_shift);
break;
case VIRTIO_MMIO_QUEUESEL:
- if (value < VIRTIO_PCI_QUEUE_MAX) {
+ if (value < virtio_get_queue_max(vdev)) {
vdev->queue_sel = value;
}
break;
@@ -257,7 +259,7 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
}
break;
case VIRTIO_MMIO_QUEUENOTIFY:
- if (value < VIRTIO_PCI_QUEUE_MAX) {
+ if (value < virtio_get_queue_max(vdev)) {
virtio_queue_notify(vdev, value);
}
break;
@@ -403,7 +405,7 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
k->device_plugged = virtio_mmio_device_plugged;
k->has_variable_vring_alignment = true;
bus_class->max_dev = 1;
- k->queue_max = VIRTIO_PCI_QUEUE_MAX;
+ k->queue_max = VIRTIO_MMIO_QUEUE_MAX;
}
static const TypeInfo virtio_mmio_bus_info = {
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 12/18] virtio-pci: switch to use bus specific queue limit
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (10 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 11/18] virtio-mmio: " Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 13/18] virtio: introduce vector to virtqueues mapping Jason Wang
` (6 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel; +Cc: cornelia.huck, Jason Wang, mst
Instead of depending on a macro, switch to use a bus specific queue
limit.
Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/virtio/virtio-pci.c | 12 +++++++-----
include/hw/virtio/virtio.h | 2 --
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 075b13b..e556919 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -42,6 +42,8 @@
* configuration space */
#define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
+#define VIRTIO_PCI_QUEUE_MAX 64
+
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
VirtIOPCIProxy *dev);
@@ -171,7 +173,7 @@ static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
return;
}
- for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
+ for (n = 0; n < virtio_get_queue_max(vdev); n++) {
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
@@ -207,7 +209,7 @@ static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
return;
}
- for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
+ for (n = 0; n < virtio_get_queue_max(vdev); n++) {
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
@@ -243,11 +245,11 @@ static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
break;
case VIRTIO_PCI_QUEUE_SEL:
- if (val < VIRTIO_PCI_QUEUE_MAX)
+ if (val < virtio_get_queue_max(vdev))
vdev->queue_sel = val;
break;
case VIRTIO_PCI_QUEUE_NOTIFY:
- if (val < VIRTIO_PCI_QUEUE_MAX) {
+ if (val < virtio_get_queue_max(vdev)) {
virtio_queue_notify(vdev, val);
}
break;
@@ -748,7 +750,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
kvm_msi_via_irqfd_enabled();
- nvqs = MIN(nvqs, VIRTIO_PCI_QUEUE_MAX);
+ nvqs = MIN(nvqs, virtio_get_queue_max(vdev));
/* When deassigning, pass a consistent nvqs value
* to avoid leaking notifiers.
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 91fd673..7ff40ac 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -48,8 +48,6 @@ typedef struct VirtQueueElement
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
} VirtQueueElement;
-#define VIRTIO_PCI_QUEUE_MAX 64
-
#define VIRTIO_NO_VECTOR 0xffff
#define TYPE_VIRTIO_DEVICE "virtio-device"
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 13/18] virtio: introduce vector to virtqueues mapping
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (11 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 12/18] virtio-pci: switch to use " Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 14/18] virtio: introduce virtio_queue_get_index() Jason Wang
` (5 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel; +Cc: cornelia.huck, Jason Wang, mst
Currently we will try to traverse all virtqueues to find a subset that
using a specific vector. This is sub optimal when we will support
hundreds or even thousands of virtqueues. So this patch introduces a
method which could be used by transport to get all virtqueues that
using a same vector. This is done through QLISTs and the number of
QLISTs was queried through a transport specific method. When guest
setting vectors, the virtqueue will be linked and helpers for traverse
the list was also introduced.
The first user will be virtio pci which will use this to speed up
MSI-X masking and unmasking handling.
Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/virtio/virtio-pci.c | 8 ++++++++
hw/virtio/virtio.c | 36 ++++++++++++++++++++++++++++++++++--
include/hw/virtio/virtio-bus.h | 1 +
include/hw/virtio/virtio.h | 3 +++
4 files changed, 46 insertions(+), 2 deletions(-)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index e556919..c38f33f 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -910,6 +910,13 @@ static const TypeInfo virtio_9p_pci_info = {
* virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
*/
+static int virtio_pci_query_nvectors(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
+
+ return proxy->nvectors;
+}
+
/* This is called by virtio-bus just after the device is plugged. */
static void virtio_pci_device_plugged(DeviceState *d)
{
@@ -1500,6 +1507,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
k->vmstate_change = virtio_pci_vmstate_change;
k->device_plugged = virtio_pci_device_plugged;
k->device_unplugged = virtio_pci_device_unplugged;
+ k->query_nvectors = virtio_pci_query_nvectors;
k->queue_max = VIRTIO_PCI_QUEUE_MAX;
}
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index bbb224f..159e5c6 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -89,6 +89,7 @@ struct VirtQueue
VirtIODevice *vdev;
EventNotifier guest_notifier;
EventNotifier host_notifier;
+ QLIST_ENTRY(VirtQueue) node;
};
/* virt queue functions */
@@ -613,7 +614,7 @@ void virtio_reset(void *opaque)
vdev->vq[i].vring.used = 0;
vdev->vq[i].last_avail_idx = 0;
vdev->vq[i].pa = 0;
- vdev->vq[i].vector = VIRTIO_NO_VECTOR;
+ virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
vdev->vq[i].signalled_used = 0;
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
@@ -738,6 +739,16 @@ void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
virtqueue_init(&vdev->vq[n]);
}
+VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
+{
+ return QLIST_FIRST(&vdev->vector_queues[vector]);
+}
+
+VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
+{
+ return QLIST_NEXT(vq, node);
+}
+
int virtio_queue_get_num(VirtIODevice *vdev, int n)
{
return vdev->vq[n].vring.num;
@@ -789,8 +800,19 @@ uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
{
- if (n < virtio_get_queue_max(vdev))
+ VirtQueue *vq = &vdev->vq[n];
+
+ if (n < virtio_get_queue_max(vdev)) {
+ if (vdev->vector_queues &&
+ vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
+ QLIST_REMOVE(vq, node);
+ }
vdev->vq[n].vector = vector;
+ if (vdev->vector_queues &&
+ vector != VIRTIO_NO_VECTOR) {
+ QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
+ }
+ }
}
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
@@ -1098,6 +1120,7 @@ void virtio_cleanup(VirtIODevice *vdev)
qemu_del_vm_change_state_handler(vdev->vmstate);
g_free(vdev->config);
g_free(vdev->vq);
+ g_free(vdev->vector_queues);
}
static void virtio_vmstate_change(void *opaque, int running, RunState state)
@@ -1135,7 +1158,16 @@ void virtio_instance_init_common(Object *proxy_obj, void *data,
void virtio_init(VirtIODevice *vdev, const char *name,
uint16_t device_id, size_t config_size)
{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int i, queue_max = virtio_get_queue_max(vdev);
+ int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
+
+ if (nvectors) {
+ vdev->vector_queues =
+ g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
+ }
+
vdev->device_id = device_id;
vdev->status = 0;
vdev->isr = 0;
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 4da8022..12386d5 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -62,6 +62,7 @@ typedef struct VirtioBusClass {
* This is called by virtio-bus just before the device is unplugged.
*/
void (*device_unplugged)(DeviceState *d);
+ int (*query_nvectors)(DeviceState *d);
/*
* Does the transport have variable vring alignment?
* (ie can it ever call virtio_queue_set_align()?)
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 7ff40ac..e3adb1d 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -82,6 +82,7 @@ struct VirtIODevice
VMChangeStateEntry *vmstate;
char *bus_name;
uint8_t device_endian;
+ QLIST_HEAD(, VirtQueue) * vector_queues;
};
typedef struct VirtioDeviceClass {
@@ -217,6 +218,8 @@ void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
bool set_handler);
void virtio_queue_notify_vq(VirtQueue *vq);
void virtio_irq(VirtQueue *vq);
+VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
+VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
static inline void virtio_add_feature(uint32_t *features, unsigned int fbit)
{
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 14/18] virtio: introduce virtio_queue_get_index()
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (12 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 13/18] virtio: introduce vector to virtqueues mapping Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 15/18] virtio-pci: speedup MSI-X masking and unmasking Jason Wang
` (4 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel; +Cc: cornelia.huck, Jason Wang, mst
This patch introduces a helper that can get the queue index of a
VirtQueue. This is useful when traversing the list of VirtQueues.
Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/virtio/virtio.c | 5 +++++
include/hw/virtio/virtio.h | 1 +
2 files changed, 6 insertions(+)
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 159e5c6..ee3a66d 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -725,6 +725,11 @@ hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
return vdev->vq[n].pa;
}
+int virtio_queue_get_index(VirtIODevice *vdev, VirtQueue *vq)
+{
+ return vq - vdev->vq;
+}
+
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
{
/* Don't allow guest to flip queue between existent and
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index e3adb1d..f148590 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -171,6 +171,7 @@ void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data);
void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data);
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr);
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n);
+int virtio_queue_get_index(VirtIODevice *vdev, VirtQueue *vq);
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num);
int virtio_queue_get_num(VirtIODevice *vdev, int n);
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align);
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 15/18] virtio-pci: speedup MSI-X masking and unmasking
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (13 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 14/18] virtio: introduce virtio_queue_get_index() Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513 Jason Wang
` (3 subsequent siblings)
18 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel; +Cc: cornelia.huck, Jason Wang, mst
This patch tries to speed up the MSI-X masking and unmasking through
the mapping between vector and queues. With this patch it will there's
no need to go through all possible virtqueues, which may help to
reduce the time spent when doing MSI-X masking/unmasking a single
vector when more than hundreds or even thousands of virtqueues were
supported.
Tested with 80 queue pairs virito-net-pci by changing the smp affinity
in the background and doing netperf in the same time:
Before the patch:
5711.70 Gbits/sec
After the patch:
6830.98 Gbits/sec
About 19.6% improvements in throughput.
Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/virtio/virtio-pci.c | 40 +++++++++++++++++++++-------------------
1 file changed, 21 insertions(+), 19 deletions(-)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index c38f33f..9a5242a 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -632,28 +632,30 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
{
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- int ret, queue_no;
+ VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+ int ret, index, unmasked = 0;
- for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
- if (!virtio_queue_get_num(vdev, queue_no)) {
+ while (vq) {
+ index = virtio_queue_get_index(vdev, vq);
+ if (!virtio_queue_get_num(vdev, index)) {
break;
}
- if (virtio_queue_vector(vdev, queue_no) != vector) {
- continue;
- }
- ret = virtio_pci_vq_vector_unmask(proxy, queue_no, vector, msg);
+ ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg);
if (ret < 0) {
goto undo;
}
+ vq = virtio_vector_next_queue(vq);
+ ++unmasked;
}
+
return 0;
undo:
- while (--queue_no >= 0) {
- if (virtio_queue_vector(vdev, queue_no) != vector) {
- continue;
- }
- virtio_pci_vq_vector_mask(proxy, queue_no, vector);
+ vq = virtio_vector_first_queue(vdev, vector);
+ while (vq && --unmasked >= 0) {
+ index = virtio_queue_get_index(vdev, vq);
+ virtio_pci_vq_vector_mask(proxy, index, vector);
+ vq = virtio_vector_next_queue(vq);
}
return ret;
}
@@ -662,16 +664,16 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
{
VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- int queue_no;
+ VirtQueue *vq = virtio_vector_first_queue(vdev, vector);
+ int index;
- for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
- if (!virtio_queue_get_num(vdev, queue_no)) {
+ while (vq) {
+ index = virtio_queue_get_index(vdev, vq);
+ if (!virtio_queue_get_num(vdev, index)) {
break;
}
- if (virtio_queue_vector(vdev, queue_no) != vector) {
- continue;
- }
- virtio_pci_vq_vector_mask(proxy, queue_no, vector);
+ virtio_pci_vq_vector_mask(proxy, index, vector);
+ vq = virtio_vector_next_queue(vq);
}
}
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (14 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 15/18] virtio-pci: speedup MSI-X masking and unmasking Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-07 16:54 ` Alexander Graf
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 17/18] pci: remove hard-coded bar size in msix_init_exclusive_bar() Jason Wang
` (2 subsequent siblings)
18 siblings, 1 reply; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel
Cc: mst, Jason Wang, Alexander Graf, qemu-ppc, cornelia.huck,
Paolo Bonzini, Richard Henderson
This patch increases the maximum number of virtqueues for pci from 64
to 513. This will allow booting a virtio-net-pci device with 256 queue
pairs.
To keep migration compatibility, 64 was kept for legacy machine
types. This is because qemu in fact allows guest to probe the limit of
virtqueues through trying to set queue_sel. So for legacy machine
type, we should make sure setting queue_sel to more than 63 won't
make effect.
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Alexander Graf <agraf@suse.de>
Cc: qemu-ppc@nongnu.org
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/i386/pc_piix.c | 5 +++++
hw/i386/pc_q35.c | 5 +++++
hw/ppc/spapr.c | 5 +++++
hw/virtio/virtio-pci.c | 2 +-
4 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 212e263..6e098ce 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -49,6 +49,7 @@
#include "hw/acpi/acpi.h"
#include "cpu.h"
#include "qemu/error-report.h"
+#include "hw/virtio/virtio-bus.h"
#ifdef CONFIG_XEN
# include <xen/hvm/hvm_info_table.h>
#endif
@@ -312,6 +313,10 @@ static void pc_init_pci(MachineState *machine)
static void pc_compat_2_3(MachineState *machine)
{
+ ObjectClass *klass = object_class_by_name("virtio-pci-bus");
+ VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
+
+ k->queue_max = 64;
}
static void pc_compat_2_2(MachineState *machine)
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index e67f2de..ff7c414 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -45,6 +45,7 @@
#include "hw/usb.h"
#include "hw/cpu/icc_bus.h"
#include "qemu/error-report.h"
+#include "include/hw/virtio/virtio-bus.h"
/* ICH9 AHCI has 6 ports */
#define MAX_SATA_PORTS 6
@@ -291,6 +292,10 @@ static void pc_q35_init(MachineState *machine)
static void pc_compat_2_3(MachineState *machine)
{
+ ObjectClass *klass = object_class_by_name("virtio-pci-bus");
+ VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
+
+ k->queue_max = 64;
}
static void pc_compat_2_2(MachineState *machine)
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 8e43aa2..ee8f6a3 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -50,6 +50,7 @@
#include "hw/pci/pci.h"
#include "hw/scsi/scsi.h"
#include "hw/virtio/virtio-scsi.h"
+#include "hw/virtio/virtio-bus.h"
#include "exec/address-spaces.h"
#include "hw/usb.h"
@@ -1827,6 +1828,10 @@ static const TypeInfo spapr_machine_info = {
static void spapr_compat_2_3(Object *obj)
{
+ ObjectClass *klass = object_class_by_name("virtio-pci-bus");
+ VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
+
+ k->queue_max = 64;
}
static void spapr_compat_2_2(Object *obj)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 9a5242a..02e3ce8 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -42,7 +42,7 @@
* configuration space */
#define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
-#define VIRTIO_PCI_QUEUE_MAX 64
+#define VIRTIO_PCI_QUEUE_MAX 513
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
VirtIOPCIProxy *dev);
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513 Jason Wang
@ 2015-04-07 16:54 ` Alexander Graf
2015-04-07 18:06 ` Luigi Rizzo
2015-04-08 8:10 ` Jason Wang
0 siblings, 2 replies; 39+ messages in thread
From: Alexander Graf @ 2015-04-07 16:54 UTC (permalink / raw)
To: Jason Wang, qemu-devel
Cc: cornelia.huck, Paolo Bonzini, Richard Henderson, qemu-ppc, mst
On 04/01/2015 10:15 AM, Jason Wang wrote:
> This patch increases the maximum number of virtqueues for pci from 64
> to 513. This will allow booting a virtio-net-pci device with 256 queue
> pairs.
>
> To keep migration compatibility, 64 was kept for legacy machine
> types. This is because qemu in fact allows guest to probe the limit of
> virtqueues through trying to set queue_sel. So for legacy machine
> type, we should make sure setting queue_sel to more than 63 won't
> make effect.
>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Richard Henderson <rth@twiddle.net>
> Cc: Michael S. Tsirkin <mst@redhat.com>
> Cc: Alexander Graf <agraf@suse.de>
> Cc: qemu-ppc@nongnu.org
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> hw/i386/pc_piix.c | 5 +++++
> hw/i386/pc_q35.c | 5 +++++
> hw/ppc/spapr.c | 5 +++++
> hw/virtio/virtio-pci.c | 2 +-
> 4 files changed, 16 insertions(+), 1 deletion(-)
>
> diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
> index 212e263..6e098ce 100644
> --- a/hw/i386/pc_piix.c
> +++ b/hw/i386/pc_piix.c
> @@ -49,6 +49,7 @@
> #include "hw/acpi/acpi.h"
> #include "cpu.h"
> #include "qemu/error-report.h"
> +#include "hw/virtio/virtio-bus.h"
> #ifdef CONFIG_XEN
> # include <xen/hvm/hvm_info_table.h>
> #endif
> @@ -312,6 +313,10 @@ static void pc_init_pci(MachineState *machine)
>
> static void pc_compat_2_3(MachineState *machine)
> {
> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
> +
> + k->queue_max = 64;
> }
>
> static void pc_compat_2_2(MachineState *machine)
> diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
> index e67f2de..ff7c414 100644
> --- a/hw/i386/pc_q35.c
> +++ b/hw/i386/pc_q35.c
> @@ -45,6 +45,7 @@
> #include "hw/usb.h"
> #include "hw/cpu/icc_bus.h"
> #include "qemu/error-report.h"
> +#include "include/hw/virtio/virtio-bus.h"
>
> /* ICH9 AHCI has 6 ports */
> #define MAX_SATA_PORTS 6
> @@ -291,6 +292,10 @@ static void pc_q35_init(MachineState *machine)
>
> static void pc_compat_2_3(MachineState *machine)
> {
> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
> +
> + k->queue_max = 64;
> }
>
> static void pc_compat_2_2(MachineState *machine)
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index 8e43aa2..ee8f6a3 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -50,6 +50,7 @@
> #include "hw/pci/pci.h"
> #include "hw/scsi/scsi.h"
> #include "hw/virtio/virtio-scsi.h"
> +#include "hw/virtio/virtio-bus.h"
>
> #include "exec/address-spaces.h"
> #include "hw/usb.h"
> @@ -1827,6 +1828,10 @@ static const TypeInfo spapr_machine_info = {
>
> static void spapr_compat_2_3(Object *obj)
> {
> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
> +
> + k->queue_max = 64;
> }
>
> static void spapr_compat_2_2(Object *obj)
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 9a5242a..02e3ce8 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -42,7 +42,7 @@
> * configuration space */
> #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
>
> -#define VIRTIO_PCI_QUEUE_MAX 64
> +#define VIRTIO_PCI_QUEUE_MAX 513
513 is an interesting number. Any particular reason for it? Maybe this
was mentioned before and I just missed it ;)
Alex
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-07 16:54 ` Alexander Graf
@ 2015-04-07 18:06 ` Luigi Rizzo
2015-04-07 18:33 ` Alexander Graf
` (2 more replies)
2015-04-08 8:10 ` Jason Wang
1 sibling, 3 replies; 39+ messages in thread
From: Luigi Rizzo @ 2015-04-07 18:06 UTC (permalink / raw)
To: Alexander Graf
Cc: Michael S. Tsirkin, Jason Wang, qemu-devel, qemu-ppc,
cornelia.huck, Paolo Bonzini, Richard Henderson
[-- Attachment #1: Type: text/plain, Size: 896 bytes --]
On Tue, Apr 7, 2015 at 6:54 PM, Alexander Graf <agraf@suse.de> wrote:
> On 04/01/2015 10:15 AM, Jason Wang wrote:
>
>> This patch increases the maximum number of virtqueues for pci from 64
>> to 513. This will allow booting a virtio-net-pci device with 256 queue
>> pairs.
>> ...
>>
>> * configuration space */
>> #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_
>> enabled(dev))
>> -#define VIRTIO_PCI_QUEUE_MAX 64
>> +#define VIRTIO_PCI_QUEUE_MAX 513
>>
>
> 513 is an interesting number. Any particular reason for it? Maybe this was
> mentioned before and I just missed it ;)
>
>
quite large, too. I thought multiple queue pairs were useful
to split the load for multicore machines, but targeting VMs with
up to 256 cores (and presumably an equal number in the host)
seems really forward looking.
On the other hand, the message is dated april first...
cheers
luigi
[-- Attachment #2: Type: text/html, Size: 2829 bytes --]
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-07 18:06 ` Luigi Rizzo
@ 2015-04-07 18:33 ` Alexander Graf
2015-04-08 8:29 ` Jason Wang
2015-04-08 8:27 ` Jason Wang
2015-04-08 13:23 ` Michael S. Tsirkin
2 siblings, 1 reply; 39+ messages in thread
From: Alexander Graf @ 2015-04-07 18:33 UTC (permalink / raw)
To: Luigi Rizzo
Cc: Michael S. Tsirkin, Jason Wang, qemu-devel, qemu-ppc,
cornelia.huck, Paolo Bonzini, Richard Henderson
[-- Attachment #1: Type: text/plain, Size: 1286 bytes --]
On 04/07/2015 08:06 PM, Luigi Rizzo wrote:
>
>
> On Tue, Apr 7, 2015 at 6:54 PM, Alexander Graf <agraf@suse.de
> <mailto:agraf@suse.de>> wrote:
>
> On 04/01/2015 10:15 AM, Jason Wang wrote:
>
> This patch increases the maximum number of virtqueues for pci
> from 64
> to 513. This will allow booting a virtio-net-pci device with
> 256 queue
> pairs.
> ...
>
> * configuration space */
> #define VIRTIO_PCI_CONFIG_SIZE(dev)
> VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
> -#define VIRTIO_PCI_QUEUE_MAX 64
> +#define VIRTIO_PCI_QUEUE_MAX 513
>
>
> 513 is an interesting number. Any particular reason for it? Maybe
> this was mentioned before and I just missed it ;)
>
>
> quite large, too. I thought multiple queue pairs were useful
> to split the load for multicore machines, but targeting VMs with
> up to 256 cores (and presumably an equal number in the host)
> seems really forward looking.
They can also be useful in case your host tap queue is full, so going
higher than the host core count may make sense for throughput.
However, I am in doubt that there is a one-size-fits-all answer to this.
Could we maybe make the queue size configurable via a qdev property?
Alex
[-- Attachment #2: Type: text/html, Size: 3745 bytes --]
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-07 18:33 ` Alexander Graf
@ 2015-04-08 8:29 ` Jason Wang
2015-04-08 8:41 ` Alexander Graf
0 siblings, 1 reply; 39+ messages in thread
From: Jason Wang @ 2015-04-08 8:29 UTC (permalink / raw)
To: Alexander Graf
Cc: Michael S. Tsirkin, qemu-devel, qemu-ppc, cornelia.huck,
Paolo Bonzini, Luigi Rizzo, Richard Henderson
On Wed, Apr 8, 2015 at 2:33 AM, Alexander Graf <agraf@suse.de> wrote:
> On 04/07/2015 08:06 PM, Luigi Rizzo wrote:
>>
>>
>> On Tue, Apr 7, 2015 at 6:54 PM, Alexander Graf <agraf@suse.de> wrote:
>>> On 04/01/2015 10:15 AM, Jason Wang wrote:
>>>> This patch increases the maximum number of virtqueues for pci from
>>>> 64
>>>> to 513. This will allow booting a virtio-net-pci device with 256
>>>> queue
>>>> pairs.
>>>> ...
>>>> * configuration space */
>>>> #define VIRTIO_PCI_CONFIG_SIZE(dev)
>>>> VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
>>>> -#define VIRTIO_PCI_QUEUE_MAX 64
>>>> +#define VIRTIO_PCI_QUEUE_MAX 513
>>>
>>> 513 is an interesting number. Any particular reason for it? Maybe
>>> this was mentioned before and I just missed it ;)
>>>
>>
>> quite large, too. I thought multiple queue pairs were useful
>> to split the load for multicore machines, but targeting VMs with
>> up to 256 cores (and presumably an equal number in the host)
>> seems really forward looking.
>
> They can also be useful in case your host tap queue is full, so going
> higher than the host core count may make sense for throughput.
>
> However, I am in doubt that there is a one-size-fits-all answer to
> this. Could we maybe make the queue size configurable via a qdev
> property?
We can do this on top but I'm not sure I understand the question. Do
you mean a per-device limitation?
>
>
> Alex
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-08 8:29 ` Jason Wang
@ 2015-04-08 8:41 ` Alexander Graf
2015-04-08 9:04 ` Jason Wang
0 siblings, 1 reply; 39+ messages in thread
From: Alexander Graf @ 2015-04-08 8:41 UTC (permalink / raw)
To: Jason Wang
Cc: Michael S. Tsirkin, qemu-devel, qemu-ppc, cornelia.huck,
Paolo Bonzini, Luigi Rizzo, Richard Henderson
On 08.04.15 10:29, Jason Wang wrote:
>
>
> On Wed, Apr 8, 2015 at 2:33 AM, Alexander Graf <agraf@suse.de> wrote:
>> On 04/07/2015 08:06 PM, Luigi Rizzo wrote:
>>>
>>>
>>> On Tue, Apr 7, 2015 at 6:54 PM, Alexander Graf <agraf@suse.de> wrote:
>>>> On 04/01/2015 10:15 AM, Jason Wang wrote:
>>>>> This patch increases the maximum number of virtqueues for pci from 64
>>>>> to 513. This will allow booting a virtio-net-pci device with 256 queue
>>>>> pairs.
>>>>> ... * configuration space */
>>>>> #define VIRTIO_PCI_CONFIG_SIZE(dev)
>>>>> VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
>>>>> -#define VIRTIO_PCI_QUEUE_MAX 64
>>>>> +#define VIRTIO_PCI_QUEUE_MAX 513
>>>>
>>>> 513 is an interesting number. Any particular reason for it? Maybe
>>>> this was mentioned before and I just missed it ;)
>>>>
>>>
>>> quite large, too. I thought multiple queue pairs were useful
>>> to split the load for multicore machines, but targeting VMs with
>>> up to 256 cores (and presumably an equal number in the host)
>>> seems really forward looking.
>>
>> They can also be useful in case your host tap queue is full, so going
>> higher than the host core count may make sense for throughput.
>>
>> However, I am in doubt that there is a one-size-fits-all answer to
>> this. Could we maybe make the queue size configurable via a qdev
>> property?
>
> We can do this on top but I'm not sure I understand the question. Do you
> mean a per-device limitation?
Ok, let me rephrase the question. Why isn't the limit 64k? Would we hit
resource constraints? Imagine that in Linux 5.0 the kernel tap interface
extends to way more queues, we'd be stuck in the same situation as
today, no?
Alex
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-08 8:41 ` Alexander Graf
@ 2015-04-08 9:04 ` Jason Wang
0 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-08 9:04 UTC (permalink / raw)
To: Alexander Graf
Cc: Michael S. Tsirkin, qemu-devel, qemu-ppc, cornelia.huck,
Paolo Bonzini, Luigi Rizzo, Richard Henderson
On Wed, Apr 8, 2015 at 4:41 PM, Alexander Graf <agraf@suse.de> wrote:
>
>
> On 08.04.15 10:29, Jason Wang wrote:
>>
>>
>> On Wed, Apr 8, 2015 at 2:33 AM, Alexander Graf <agraf@suse.de>
>> wrote:
>>> On 04/07/2015 08:06 PM, Luigi Rizzo wrote:
>>>>
>>>>
>>>> On Tue, Apr 7, 2015 at 6:54 PM, Alexander Graf <agraf@suse.de>
>>>> wrote:
>>>>> On 04/01/2015 10:15 AM, Jason Wang wrote:
>>>>>> This patch increases the maximum number of virtqueues for pci
>>>>>> from 64
>>>>>> to 513. This will allow booting a virtio-net-pci device with
>>>>>> 256 queue
>>>>>> pairs.
>>>>>> ... * configuration space */
>>>>>> #define VIRTIO_PCI_CONFIG_SIZE(dev)
>>>>>> VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
>>>>>> -#define VIRTIO_PCI_QUEUE_MAX 64
>>>>>> +#define VIRTIO_PCI_QUEUE_MAX 513
>>>>>
>>>>> 513 is an interesting number. Any particular reason for it? Maybe
>>>>> this was mentioned before and I just missed it ;)
>>>>>
>>>>
>>>> quite large, too. I thought multiple queue pairs were useful
>>>> to split the load for multicore machines, but targeting VMs with
>>>> up to 256 cores (and presumably an equal number in the host)
>>>> seems really forward looking.
>>>
>>> They can also be useful in case your host tap queue is full, so
>>> going
>>> higher than the host core count may make sense for throughput.
>>>
>>> However, I am in doubt that there is a one-size-fits-all answer to
>>> this. Could we maybe make the queue size configurable via a qdev
>>> property?
>>
>> We can do this on top but I'm not sure I understand the question.
>> Do you
>> mean a per-device limitation?
>
> Ok, let me rephrase the question. Why isn't the limit 64k?
Because there's no real use case for 64k but I agree to make more space
for the future extension.
> Would we hit
> resource constraints?
Probably not since VirtQueue is rather small. 64K will consume about 4
or 5 MB, not sure it was too big but looks ok.
> Imagine that in Linux 5.0 the kernel tap interface
> extends to way more queues, we'd be stuck in the same situation as
> today, no?
Yes, but any limit will be exceeded in the future.
>
>
> Alex
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-07 18:06 ` Luigi Rizzo
2015-04-07 18:33 ` Alexander Graf
@ 2015-04-08 8:27 ` Jason Wang
2015-04-08 13:23 ` Michael S. Tsirkin
2 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-08 8:27 UTC (permalink / raw)
To: Luigi Rizzo
Cc: Michael S. Tsirkin, Alexander Graf, qemu-devel, qemu-ppc,
cornelia.huck, Paolo Bonzini, Richard Henderson
On Wed, Apr 8, 2015 at 2:06 AM, Luigi Rizzo <rizzo@iet.unipi.it> wrote:
>
>
> On Tue, Apr 7, 2015 at 6:54 PM, Alexander Graf <agraf@suse.de> wrote:
>> On 04/01/2015 10:15 AM, Jason Wang wrote:
>>> This patch increases the maximum number of virtqueues for pci from
>>> 64
>>> to 513. This will allow booting a virtio-net-pci device with 256
>>> queue
>>> pairs.
>>> ...
>>> * configuration space */
>>> #define VIRTIO_PCI_CONFIG_SIZE(dev)
>>> VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
>>> -#define VIRTIO_PCI_QUEUE_MAX 64
>>> +#define VIRTIO_PCI_QUEUE_MAX 513
>>
>> 513 is an interesting number. Any particular reason for it? Maybe
>> this was mentioned before and I just missed it ;)
>>
>
> quite large, too. I thought multiple queue pairs were useful
> to split the load for multicore machines, but targeting VMs with
> up to 256 cores (and presumably an equal number in the host)
> seems really forward looking.
Probably not, since KVM can support up to 255 vcpus now.
>
> On the other hand, the message is dated april first...
> cheers
> luigi
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-07 18:06 ` Luigi Rizzo
2015-04-07 18:33 ` Alexander Graf
2015-04-08 8:27 ` Jason Wang
@ 2015-04-08 13:23 ` Michael S. Tsirkin
2 siblings, 0 replies; 39+ messages in thread
From: Michael S. Tsirkin @ 2015-04-08 13:23 UTC (permalink / raw)
To: Luigi Rizzo
Cc: Jason Wang, Alexander Graf, qemu-devel, qemu-ppc, cornelia.huck,
Paolo Bonzini, Richard Henderson
On Tue, Apr 07, 2015 at 08:06:14PM +0200, Luigi Rizzo wrote:
>
>
> On Tue, Apr 7, 2015 at 6:54 PM, Alexander Graf <agraf@suse.de> wrote:
>
> On 04/01/2015 10:15 AM, Jason Wang wrote:
>
> This patch increases the maximum number of virtqueues for pci from 64
> to 513. This will allow booting a virtio-net-pci device with 256 queue
> pairs.
> ...
>
> * configuration space */
> #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF
> (msix_enabled(dev))
> -#define VIRTIO_PCI_QUEUE_MAX 64
> +#define VIRTIO_PCI_QUEUE_MAX 513
>
>
> 513 is an interesting number. Any particular reason for it? Maybe this was
> mentioned before and I just missed it ;)
>
>
>
> quite large, too. I thought multiple queue pairs were useful
> to split the load for multicore machines, but targeting VMs with
> up to 256 cores (and presumably an equal number in the host)
> seems really forward looking.
Well, that's the # of VCPUs QEMU supports ATM, no?
Seems silly to have limit on vqs block us from creating
such VMs.
Maybe just define it as max cpus * 2 + 1.
> On the other hand, the message is dated april first...
> cheers
> luigi
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-07 16:54 ` Alexander Graf
2015-04-07 18:06 ` Luigi Rizzo
@ 2015-04-08 8:10 ` Jason Wang
2015-04-08 8:13 ` Alexander Graf
1 sibling, 1 reply; 39+ messages in thread
From: Jason Wang @ 2015-04-08 8:10 UTC (permalink / raw)
To: Alexander Graf, qemu-devel
Cc: cornelia.huck, Paolo Bonzini, mst, qemu-ppc, Richard Henderson
On 04/08/2015 12:54 AM, Alexander Graf wrote:
> On 04/01/2015 10:15 AM, Jason Wang wrote:
>> This patch increases the maximum number of virtqueues for pci from 64
>> to 513. This will allow booting a virtio-net-pci device with 256 queue
>> pairs.
>>
>> To keep migration compatibility, 64 was kept for legacy machine
>> types. This is because qemu in fact allows guest to probe the limit of
>> virtqueues through trying to set queue_sel. So for legacy machine
>> type, we should make sure setting queue_sel to more than 63 won't
>> make effect.
>>
>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>> Cc: Richard Henderson <rth@twiddle.net>
>> Cc: Michael S. Tsirkin <mst@redhat.com>
>> Cc: Alexander Graf <agraf@suse.de>
>> Cc: qemu-ppc@nongnu.org
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>> hw/i386/pc_piix.c | 5 +++++
>> hw/i386/pc_q35.c | 5 +++++
>> hw/ppc/spapr.c | 5 +++++
>> hw/virtio/virtio-pci.c | 2 +-
>> 4 files changed, 16 insertions(+), 1 deletion(-)
>>
>> diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
>> index 212e263..6e098ce 100644
>> --- a/hw/i386/pc_piix.c
>> +++ b/hw/i386/pc_piix.c
>> @@ -49,6 +49,7 @@
>> #include "hw/acpi/acpi.h"
>> #include "cpu.h"
>> #include "qemu/error-report.h"
>> +#include "hw/virtio/virtio-bus.h"
>> #ifdef CONFIG_XEN
>> # include <xen/hvm/hvm_info_table.h>
>> #endif
>> @@ -312,6 +313,10 @@ static void pc_init_pci(MachineState *machine)
>> static void pc_compat_2_3(MachineState *machine)
>> {
>> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
>> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
>> +
>> + k->queue_max = 64;
>> }
>> static void pc_compat_2_2(MachineState *machine)
>> diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
>> index e67f2de..ff7c414 100644
>> --- a/hw/i386/pc_q35.c
>> +++ b/hw/i386/pc_q35.c
>> @@ -45,6 +45,7 @@
>> #include "hw/usb.h"
>> #include "hw/cpu/icc_bus.h"
>> #include "qemu/error-report.h"
>> +#include "include/hw/virtio/virtio-bus.h"
>> /* ICH9 AHCI has 6 ports */
>> #define MAX_SATA_PORTS 6
>> @@ -291,6 +292,10 @@ static void pc_q35_init(MachineState *machine)
>> static void pc_compat_2_3(MachineState *machine)
>> {
>> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
>> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
>> +
>> + k->queue_max = 64;
>> }
>> static void pc_compat_2_2(MachineState *machine)
>> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
>> index 8e43aa2..ee8f6a3 100644
>> --- a/hw/ppc/spapr.c
>> +++ b/hw/ppc/spapr.c
>> @@ -50,6 +50,7 @@
>> #include "hw/pci/pci.h"
>> #include "hw/scsi/scsi.h"
>> #include "hw/virtio/virtio-scsi.h"
>> +#include "hw/virtio/virtio-bus.h"
>> #include "exec/address-spaces.h"
>> #include "hw/usb.h"
>> @@ -1827,6 +1828,10 @@ static const TypeInfo spapr_machine_info = {
>> static void spapr_compat_2_3(Object *obj)
>> {
>> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
>> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
>> +
>> + k->queue_max = 64;
>> }
>> static void spapr_compat_2_2(Object *obj)
>> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
>> index 9a5242a..02e3ce8 100644
>> --- a/hw/virtio/virtio-pci.c
>> +++ b/hw/virtio/virtio-pci.c
>> @@ -42,7 +42,7 @@
>> * configuration space */
>> #define VIRTIO_PCI_CONFIG_SIZE(dev)
>> VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
>> -#define VIRTIO_PCI_QUEUE_MAX 64
>> +#define VIRTIO_PCI_QUEUE_MAX 513
>
> 513 is an interesting number. Any particular reason for it? Maybe this
> was mentioned before and I just missed it ;)
>
>
> Alex
>
>
The number were chose to match kernel limit for tuntap queues. Recent
kernel allow up to 256 queues for tuntap, so 513 is in fact 256 * 2 plus
1 control vq.
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-08 8:10 ` Jason Wang
@ 2015-04-08 8:13 ` Alexander Graf
2015-04-08 8:30 ` Jason Wang
0 siblings, 1 reply; 39+ messages in thread
From: Alexander Graf @ 2015-04-08 8:13 UTC (permalink / raw)
To: Jason Wang
Cc: mst@redhat.com, qemu-devel@nongnu.org, qemu-ppc@nongnu.org,
cornelia.huck@de.ibm.com, Paolo Bonzini, Richard Henderson
> Am 08.04.2015 um 10:10 schrieb Jason Wang <jasowang@redhat.com>:
>
>
>
>> On 04/08/2015 12:54 AM, Alexander Graf wrote:
>>> On 04/01/2015 10:15 AM, Jason Wang wrote:
>>> This patch increases the maximum number of virtqueues for pci from 64
>>> to 513. This will allow booting a virtio-net-pci device with 256 queue
>>> pairs.
>>>
>>> To keep migration compatibility, 64 was kept for legacy machine
>>> types. This is because qemu in fact allows guest to probe the limit of
>>> virtqueues through trying to set queue_sel. So for legacy machine
>>> type, we should make sure setting queue_sel to more than 63 won't
>>> make effect.
>>>
>>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>>> Cc: Richard Henderson <rth@twiddle.net>
>>> Cc: Michael S. Tsirkin <mst@redhat.com>
>>> Cc: Alexander Graf <agraf@suse.de>
>>> Cc: qemu-ppc@nongnu.org
>>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>>> ---
>>> hw/i386/pc_piix.c | 5 +++++
>>> hw/i386/pc_q35.c | 5 +++++
>>> hw/ppc/spapr.c | 5 +++++
>>> hw/virtio/virtio-pci.c | 2 +-
>>> 4 files changed, 16 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
>>> index 212e263..6e098ce 100644
>>> --- a/hw/i386/pc_piix.c
>>> +++ b/hw/i386/pc_piix.c
>>> @@ -49,6 +49,7 @@
>>> #include "hw/acpi/acpi.h"
>>> #include "cpu.h"
>>> #include "qemu/error-report.h"
>>> +#include "hw/virtio/virtio-bus.h"
>>> #ifdef CONFIG_XEN
>>> # include <xen/hvm/hvm_info_table.h>
>>> #endif
>>> @@ -312,6 +313,10 @@ static void pc_init_pci(MachineState *machine)
>>> static void pc_compat_2_3(MachineState *machine)
>>> {
>>> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
>>> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
>>> +
>>> + k->queue_max = 64;
>>> }
>>> static void pc_compat_2_2(MachineState *machine)
>>> diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
>>> index e67f2de..ff7c414 100644
>>> --- a/hw/i386/pc_q35.c
>>> +++ b/hw/i386/pc_q35.c
>>> @@ -45,6 +45,7 @@
>>> #include "hw/usb.h"
>>> #include "hw/cpu/icc_bus.h"
>>> #include "qemu/error-report.h"
>>> +#include "include/hw/virtio/virtio-bus.h"
>>> /* ICH9 AHCI has 6 ports */
>>> #define MAX_SATA_PORTS 6
>>> @@ -291,6 +292,10 @@ static void pc_q35_init(MachineState *machine)
>>> static void pc_compat_2_3(MachineState *machine)
>>> {
>>> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
>>> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
>>> +
>>> + k->queue_max = 64;
>>> }
>>> static void pc_compat_2_2(MachineState *machine)
>>> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
>>> index 8e43aa2..ee8f6a3 100644
>>> --- a/hw/ppc/spapr.c
>>> +++ b/hw/ppc/spapr.c
>>> @@ -50,6 +50,7 @@
>>> #include "hw/pci/pci.h"
>>> #include "hw/scsi/scsi.h"
>>> #include "hw/virtio/virtio-scsi.h"
>>> +#include "hw/virtio/virtio-bus.h"
>>> #include "exec/address-spaces.h"
>>> #include "hw/usb.h"
>>> @@ -1827,6 +1828,10 @@ static const TypeInfo spapr_machine_info = {
>>> static void spapr_compat_2_3(Object *obj)
>>> {
>>> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
>>> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
>>> +
>>> + k->queue_max = 64;
>>> }
>>> static void spapr_compat_2_2(Object *obj)
>>> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
>>> index 9a5242a..02e3ce8 100644
>>> --- a/hw/virtio/virtio-pci.c
>>> +++ b/hw/virtio/virtio-pci.c
>>> @@ -42,7 +42,7 @@
>>> * configuration space */
>>> #define VIRTIO_PCI_CONFIG_SIZE(dev)
>>> VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
>>> -#define VIRTIO_PCI_QUEUE_MAX 64
>>> +#define VIRTIO_PCI_QUEUE_MAX 513
>>
>> 513 is an interesting number. Any particular reason for it? Maybe this
>> was mentioned before and I just missed it ;)
>>
>>
>> Alex
>
> The number were chose to match kernel limit for tuntap queues. Recent
> kernel allow up to 256 queues for tuntap, so 513 is in fact 256 * 2 plus
> 1 control vq.
I see. I'm not fully convinced that it's a good idea to bake that limit into qemu, but we have to have a limit somewhere.
However, could you please make it more obvious in the code where thid number stems from? Either by using defines or with a comment.
Alex
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513
2015-04-08 8:13 ` Alexander Graf
@ 2015-04-08 8:30 ` Jason Wang
0 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-08 8:30 UTC (permalink / raw)
To: Alexander Graf
Cc: mst@redhat.com, qemu-devel@nongnu.org, qemu-ppc@nongnu.org,
cornelia.huck@de.ibm.com, Paolo Bonzini, Richard Henderson
On Wed, Apr 8, 2015 at 4:13 PM, Alexander Graf <agraf@suse.de> wrote:
>
>
>
>> Am 08.04.2015 um 10:10 schrieb Jason Wang <jasowang@redhat.com>:
>>
>>
>>
>>> On 04/08/2015 12:54 AM, Alexander Graf wrote:
>>>> On 04/01/2015 10:15 AM, Jason Wang wrote:
>>>> This patch increases the maximum number of virtqueues for pci
>>>> from 64
>>>> to 513. This will allow booting a virtio-net-pci device with 256
>>>> queue
>>>> pairs.
>>>>
>>>> To keep migration compatibility, 64 was kept for legacy machine
>>>> types. This is because qemu in fact allows guest to probe the
>>>> limit of
>>>> virtqueues through trying to set queue_sel. So for legacy machine
>>>> type, we should make sure setting queue_sel to more than 63 won't
>>>> make effect.
>>>>
>>>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>>>> Cc: Richard Henderson <rth@twiddle.net>
>>>> Cc: Michael S. Tsirkin <mst@redhat.com>
>>>> Cc: Alexander Graf <agraf@suse.de>
>>>> Cc: qemu-ppc@nongnu.org
>>>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>>>> ---
>>>> hw/i386/pc_piix.c | 5 +++++
>>>> hw/i386/pc_q35.c | 5 +++++
>>>> hw/ppc/spapr.c | 5 +++++
>>>> hw/virtio/virtio-pci.c | 2 +-
>>>> 4 files changed, 16 insertions(+), 1 deletion(-)
>>>>
>>>> diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
>>>> index 212e263..6e098ce 100644
>>>> --- a/hw/i386/pc_piix.c
>>>> +++ b/hw/i386/pc_piix.c
>>>> @@ -49,6 +49,7 @@
>>>> #include "hw/acpi/acpi.h"
>>>> #include "cpu.h"
>>>> #include "qemu/error-report.h"
>>>> +#include "hw/virtio/virtio-bus.h"
>>>> #ifdef CONFIG_XEN
>>>> # include <xen/hvm/hvm_info_table.h>
>>>> #endif
>>>> @@ -312,6 +313,10 @@ static void pc_init_pci(MachineState
>>>> *machine)
>>>> static void pc_compat_2_3(MachineState *machine)
>>>> {
>>>> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
>>>> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
>>>> +
>>>> + k->queue_max = 64;
>>>> }
>>>> static void pc_compat_2_2(MachineState *machine)
>>>> diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
>>>> index e67f2de..ff7c414 100644
>>>> --- a/hw/i386/pc_q35.c
>>>> +++ b/hw/i386/pc_q35.c
>>>> @@ -45,6 +45,7 @@
>>>> #include "hw/usb.h"
>>>> #include "hw/cpu/icc_bus.h"
>>>> #include "qemu/error-report.h"
>>>> +#include "include/hw/virtio/virtio-bus.h"
>>>> /* ICH9 AHCI has 6 ports */
>>>> #define MAX_SATA_PORTS 6
>>>> @@ -291,6 +292,10 @@ static void pc_q35_init(MachineState
>>>> *machine)
>>>> static void pc_compat_2_3(MachineState *machine)
>>>> {
>>>> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
>>>> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
>>>> +
>>>> + k->queue_max = 64;
>>>> }
>>>> static void pc_compat_2_2(MachineState *machine)
>>>> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
>>>> index 8e43aa2..ee8f6a3 100644
>>>> --- a/hw/ppc/spapr.c
>>>> +++ b/hw/ppc/spapr.c
>>>> @@ -50,6 +50,7 @@
>>>> #include "hw/pci/pci.h"
>>>> #include "hw/scsi/scsi.h"
>>>> #include "hw/virtio/virtio-scsi.h"
>>>> +#include "hw/virtio/virtio-bus.h"
>>>> #include "exec/address-spaces.h"
>>>> #include "hw/usb.h"
>>>> @@ -1827,6 +1828,10 @@ static const TypeInfo spapr_machine_info =
>>>> {
>>>> static void spapr_compat_2_3(Object *obj)
>>>> {
>>>> + ObjectClass *klass = object_class_by_name("virtio-pci-bus");
>>>> + VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
>>>> +
>>>> + k->queue_max = 64;
>>>> }
>>>> static void spapr_compat_2_2(Object *obj)
>>>> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
>>>> index 9a5242a..02e3ce8 100644
>>>> --- a/hw/virtio/virtio-pci.c
>>>> +++ b/hw/virtio/virtio-pci.c
>>>> @@ -42,7 +42,7 @@
>>>> * configuration space */
>>>> #define VIRTIO_PCI_CONFIG_SIZE(dev)
>>>> VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
>>>> -#define VIRTIO_PCI_QUEUE_MAX 64
>>>> +#define VIRTIO_PCI_QUEUE_MAX 513
>>>
>>> 513 is an interesting number. Any particular reason for it? Maybe
>>> this
>>> was mentioned before and I just missed it ;)
>>>
>>>
>>> Alex
>>
>> The number were chose to match kernel limit for tuntap queues.
>> Recent
>> kernel allow up to 256 queues for tuntap, so 513 is in fact 256 * 2
>> plus
>> 1 control vq.
>
> I see. I'm not fully convinced that it's a good idea to bake that
> limit into qemu, but we have to have a limit somewhere.
>
> However, could you please make it more obvious in the code where thid
> number stems from? Either by using defines or with a comment.
>
>
> Alex
Will add a comment for this.
Thanks.
^ permalink raw reply [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 17/18] pci: remove hard-coded bar size in msix_init_exclusive_bar()
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (15 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 16/18] virtio-pci: increase the maximum number of virtqueues to 513 Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 9:18 ` Michael S. Tsirkin
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 18/18] virtio-pci: introduce auto_msix_bar_size property Jason Wang
2015-04-02 13:43 ` [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Cornelia Huck
18 siblings, 1 reply; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel
Cc: Kevin Wolf, mst, Jason Wang, Keith Busch, Stefan Hajnoczi,
cornelia.huck
This patch lets msix_init_exclusive_bar() can calculate the bar and
pba size according to the number of MSI-X vectors other than using a
hard-coded limit 4096. This is needed to allow device to have more
than 128 MSI_X vectors. An extra legacy_layout parameter was
introduced to use legacy static 4096 bar size to keep the migration
compatibility.
Virtio device will be the first user for this.
Cc: Keith Busch <keith.busch@intel.com>
Cc: Kevin Wolf <kwolf@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/block/nvme.c | 2 +-
hw/misc/ivshmem.c | 2 +-
hw/pci/msix.c | 47 +++++++++++++++++++++++++++++------------------
hw/virtio/virtio-pci.c | 2 +-
include/hw/pci/msix.h | 2 +-
5 files changed, 33 insertions(+), 22 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 1e07166..9af6e1e 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -787,7 +787,7 @@ static int nvme_init(PCIDevice *pci_dev)
pci_register_bar(&n->parent_obj, 0,
PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
&n->iomem);
- msix_init_exclusive_bar(&n->parent_obj, n->num_queues, 4);
+ msix_init_exclusive_bar(&n->parent_obj, n->num_queues, 4, true);
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index 5d272c8..6ae48ae 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -631,7 +631,7 @@ static uint64_t ivshmem_get_size(IVShmemState * s) {
static void ivshmem_setup_msi(IVShmemState * s)
{
- if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1)) {
+ if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1, true)) {
IVSHMEM_DPRINTF("msix initialization failed\n");
exit(1);
}
diff --git a/hw/pci/msix.c b/hw/pci/msix.c
index 24de260..8c6d8f3 100644
--- a/hw/pci/msix.c
+++ b/hw/pci/msix.c
@@ -291,33 +291,44 @@ int msix_init(struct PCIDevice *dev, unsigned short nentries,
}
int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
- uint8_t bar_nr)
+ uint8_t bar_nr, bool legacy_layout)
{
int ret;
char *name;
-
- /*
- * Migration compatibility dictates that this remains a 4k
- * BAR with the vector table in the lower half and PBA in
- * the upper half. Do not use these elsewhere!
- */
-#define MSIX_EXCLUSIVE_BAR_SIZE 4096
-#define MSIX_EXCLUSIVE_BAR_TABLE_OFFSET 0
-#define MSIX_EXCLUSIVE_BAR_PBA_OFFSET (MSIX_EXCLUSIVE_BAR_SIZE / 2)
-#define MSIX_EXCLUSIVE_CAP_OFFSET 0
-
- if (nentries * PCI_MSIX_ENTRY_SIZE > MSIX_EXCLUSIVE_BAR_PBA_OFFSET) {
- return -EINVAL;
+ uint32_t bar_size;
+ uint32_t bar_pba_offset;
+
+ if (legacy_layout) {
+ /*
+ * Migration compatibility dictates that this remains a 4k
+ * BAR with the vector table in the lower half and PBA in
+ * the upper half. Do not use these elsewhere!
+ */
+ bar_size = 4096;
+ bar_pba_offset = bar_size / 2;
+
+ if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) {
+ return -EINVAL;
+ }
+ } else {
+ bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE;
+ bar_size = bar_pba_offset + (nentries / 8 + 1) * 8;
+ if (bar_size & (bar_size - 1)) {
+ bar_size = 1 << qemu_fls(bar_size);
+ }
+ if (bar_size < 4096) {
+ bar_size = 4096;
+ }
}
name = g_strdup_printf("%s-msix", dev->name);
- memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, MSIX_EXCLUSIVE_BAR_SIZE);
+ memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size);
g_free(name);
ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
- MSIX_EXCLUSIVE_BAR_TABLE_OFFSET, &dev->msix_exclusive_bar,
- bar_nr, MSIX_EXCLUSIVE_BAR_PBA_OFFSET,
- MSIX_EXCLUSIVE_CAP_OFFSET);
+ 0, &dev->msix_exclusive_bar,
+ bar_nr, bar_pba_offset,
+ 0);
if (ret) {
return ret;
}
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 02e3ce8..816a706 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -937,7 +937,7 @@ static void virtio_pci_device_plugged(DeviceState *d)
config[PCI_INTERRUPT_PIN] = 1;
if (proxy->nvectors &&
- msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
+ msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1, true)) {
error_report("unable to init msix vectors to %" PRIu32,
proxy->nvectors);
proxy->nvectors = 0;
diff --git a/include/hw/pci/msix.h b/include/hw/pci/msix.h
index 954d82b..6c19535 100644
--- a/include/hw/pci/msix.h
+++ b/include/hw/pci/msix.h
@@ -11,7 +11,7 @@ int msix_init(PCIDevice *dev, unsigned short nentries,
unsigned table_offset, MemoryRegion *pba_bar,
uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos);
int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
- uint8_t bar_nr);
+ uint8_t bar_nr, bool legacy_layout);
void msix_write_config(PCIDevice *dev, uint32_t address, uint32_t val, int len);
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 17/18] pci: remove hard-coded bar size in msix_init_exclusive_bar()
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 17/18] pci: remove hard-coded bar size in msix_init_exclusive_bar() Jason Wang
@ 2015-04-01 9:18 ` Michael S. Tsirkin
2015-04-01 10:12 ` Jason Wang
0 siblings, 1 reply; 39+ messages in thread
From: Michael S. Tsirkin @ 2015-04-01 9:18 UTC (permalink / raw)
To: Jason Wang
Cc: cornelia.huck, Keith Busch, Stefan Hajnoczi, qemu-devel,
Kevin Wolf
On Wed, Apr 01, 2015 at 04:15:11PM +0800, Jason Wang wrote:
> This patch lets msix_init_exclusive_bar() can calculate the bar and
> pba size according to the number of MSI-X vectors other than using a
> hard-coded limit 4096. This is needed to allow device to have more
> than 128 MSI_X vectors. An extra legacy_layout parameter was
> introduced to use legacy static 4096 bar size to keep the migration
> compatibility.
>
> Virtio device will be the first user for this.
>
> Cc: Keith Busch <keith.busch@intel.com>
> Cc: Kevin Wolf <kwolf@redhat.com>
> Cc: Stefan Hajnoczi <stefanha@redhat.com>
> Cc: Michael S. Tsirkin <mst@redhat.com>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> hw/block/nvme.c | 2 +-
> hw/misc/ivshmem.c | 2 +-
> hw/pci/msix.c | 47 +++++++++++++++++++++++++++++------------------
> hw/virtio/virtio-pci.c | 2 +-
> include/hw/pci/msix.h | 2 +-
> 5 files changed, 33 insertions(+), 22 deletions(-)
>
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index 1e07166..9af6e1e 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -787,7 +787,7 @@ static int nvme_init(PCIDevice *pci_dev)
> pci_register_bar(&n->parent_obj, 0,
> PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
> &n->iomem);
> - msix_init_exclusive_bar(&n->parent_obj, n->num_queues, 4);
> + msix_init_exclusive_bar(&n->parent_obj, n->num_queues, 4, true);
>
> id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
> id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
> diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
> index 5d272c8..6ae48ae 100644
> --- a/hw/misc/ivshmem.c
> +++ b/hw/misc/ivshmem.c
> @@ -631,7 +631,7 @@ static uint64_t ivshmem_get_size(IVShmemState * s) {
>
> static void ivshmem_setup_msi(IVShmemState * s)
> {
> - if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1)) {
> + if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1, true)) {
> IVSHMEM_DPRINTF("msix initialization failed\n");
> exit(1);
> }
> diff --git a/hw/pci/msix.c b/hw/pci/msix.c
> index 24de260..8c6d8f3 100644
> --- a/hw/pci/msix.c
> +++ b/hw/pci/msix.c
> @@ -291,33 +291,44 @@ int msix_init(struct PCIDevice *dev, unsigned short nentries,
> }
>
> int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
> - uint8_t bar_nr)
> + uint8_t bar_nr, bool legacy_layout)
> {
> int ret;
> char *name;
> -
> - /*
> - * Migration compatibility dictates that this remains a 4k
> - * BAR with the vector table in the lower half and PBA in
> - * the upper half. Do not use these elsewhere!
> - */
> -#define MSIX_EXCLUSIVE_BAR_SIZE 4096
> -#define MSIX_EXCLUSIVE_BAR_TABLE_OFFSET 0
> -#define MSIX_EXCLUSIVE_BAR_PBA_OFFSET (MSIX_EXCLUSIVE_BAR_SIZE / 2)
> -#define MSIX_EXCLUSIVE_CAP_OFFSET 0
> -
> - if (nentries * PCI_MSIX_ENTRY_SIZE > MSIX_EXCLUSIVE_BAR_PBA_OFFSET) {
> - return -EINVAL;
> + uint32_t bar_size;
> + uint32_t bar_pba_offset;
> +
> + if (legacy_layout) {
> + /*
> + * Migration compatibility dictates that this remains a 4k
> + * BAR with the vector table in the lower half and PBA in
> + * the upper half. Do not use these elsewhere!
> + */
> + bar_size = 4096;
> + bar_pba_offset = bar_size / 2;
> +
> + if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) {
> + return -EINVAL;
> + }
So this takes pains to behave in a compatible
way when more than 128 vectors are requested.
But since we only had up to 64 VQs, why does
some a configuration make sense?
I suggest we just ignore this configuration
for migration compatibility.
> + } else {
> + bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE;
> + bar_size = bar_pba_offset + (nentries / 8 + 1) * 8;
> + if (bar_size & (bar_size - 1)) {
> + bar_size = 1 << qemu_fls(bar_size);
> + }
> + if (bar_size < 4096) {
> + bar_size = 4096;
> + }
> }
Don't duplicate code like this please.
Just do this:
/*
* We always did it like this, so we have to keep doing this to
* avoid breaking migration.
*/
if (bar_pba_offset < 4096 / 2)
bar_pba_offset = 4096 / 2;
Will save a ton of churn.
with 2 above suggestions you no longer
need to worry about machine compatibility.
>
> name = g_strdup_printf("%s-msix", dev->name);
> - memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, MSIX_EXCLUSIVE_BAR_SIZE);
> + memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size);
> g_free(name);
>
> ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
> - MSIX_EXCLUSIVE_BAR_TABLE_OFFSET, &dev->msix_exclusive_bar,
> - bar_nr, MSIX_EXCLUSIVE_BAR_PBA_OFFSET,
> - MSIX_EXCLUSIVE_CAP_OFFSET);
> + 0, &dev->msix_exclusive_bar,
> + bar_nr, bar_pba_offset,
> + 0);
> if (ret) {
> return ret;
> }
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 02e3ce8..816a706 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -937,7 +937,7 @@ static void virtio_pci_device_plugged(DeviceState *d)
> config[PCI_INTERRUPT_PIN] = 1;
>
> if (proxy->nvectors &&
> - msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
> + msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1, true)) {
> error_report("unable to init msix vectors to %" PRIu32,
> proxy->nvectors);
> proxy->nvectors = 0;
> diff --git a/include/hw/pci/msix.h b/include/hw/pci/msix.h
> index 954d82b..6c19535 100644
> --- a/include/hw/pci/msix.h
> +++ b/include/hw/pci/msix.h
> @@ -11,7 +11,7 @@ int msix_init(PCIDevice *dev, unsigned short nentries,
> unsigned table_offset, MemoryRegion *pba_bar,
> uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos);
> int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
> - uint8_t bar_nr);
> + uint8_t bar_nr, bool legacy_layout);
>
> void msix_write_config(PCIDevice *dev, uint32_t address, uint32_t val, int len);
>
> --
> 2.1.0
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 17/18] pci: remove hard-coded bar size in msix_init_exclusive_bar()
2015-04-01 9:18 ` Michael S. Tsirkin
@ 2015-04-01 10:12 ` Jason Wang
2015-04-01 10:20 ` Michael S. Tsirkin
0 siblings, 1 reply; 39+ messages in thread
From: Jason Wang @ 2015-04-01 10:12 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: cornelia.huck, Keith Busch, Kevin Wolf, qemu-devel,
Stefan Hajnoczi
On Wed, Apr 1, 2015 at 5:18 PM, Michael S. Tsirkin <mst@redhat.com>
wrote:
> On Wed, Apr 01, 2015 at 04:15:11PM +0800, Jason Wang wrote:
>> This patch lets msix_init_exclusive_bar() can calculate the bar and
>> pba size according to the number of MSI-X vectors other than using a
>> hard-coded limit 4096. This is needed to allow device to have more
>> than 128 MSI_X vectors. An extra legacy_layout parameter was
>> introduced to use legacy static 4096 bar size to keep the migration
>> compatibility.
>>
>> Virtio device will be the first user for this.
>>
>> Cc: Keith Busch <keith.busch@intel.com>
>> Cc: Kevin Wolf <kwolf@redhat.com>
>> Cc: Stefan Hajnoczi <stefanha@redhat.com>
>> Cc: Michael S. Tsirkin <mst@redhat.com>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>> hw/block/nvme.c | 2 +-
>> hw/misc/ivshmem.c | 2 +-
>> hw/pci/msix.c | 47
>> +++++++++++++++++++++++++++++------------------
>> hw/virtio/virtio-pci.c | 2 +-
>> include/hw/pci/msix.h | 2 +-
>> 5 files changed, 33 insertions(+), 22 deletions(-)
>>
>> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
>> index 1e07166..9af6e1e 100644
>> --- a/hw/block/nvme.c
>> +++ b/hw/block/nvme.c
>> @@ -787,7 +787,7 @@ static int nvme_init(PCIDevice *pci_dev)
>> pci_register_bar(&n->parent_obj, 0,
>> PCI_BASE_ADDRESS_SPACE_MEMORY |
>> PCI_BASE_ADDRESS_MEM_TYPE_64,
>> &n->iomem);
>> - msix_init_exclusive_bar(&n->parent_obj, n->num_queues, 4);
>> + msix_init_exclusive_bar(&n->parent_obj, n->num_queues, 4,
>> true);
>>
>> id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
>> id->ssvid = cpu_to_le16(pci_get_word(pci_conf +
>> PCI_SUBSYSTEM_VENDOR_ID));
>> diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
>> index 5d272c8..6ae48ae 100644
>> --- a/hw/misc/ivshmem.c
>> +++ b/hw/misc/ivshmem.c
>> @@ -631,7 +631,7 @@ static uint64_t ivshmem_get_size(IVShmemState *
>> s) {
>>
>> static void ivshmem_setup_msi(IVShmemState * s)
>> {
>> - if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1)) {
>> + if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1,
>> true)) {
>> IVSHMEM_DPRINTF("msix initialization failed\n");
>> exit(1);
>> }
>> diff --git a/hw/pci/msix.c b/hw/pci/msix.c
>> index 24de260..8c6d8f3 100644
>> --- a/hw/pci/msix.c
>> +++ b/hw/pci/msix.c
>> @@ -291,33 +291,44 @@ int msix_init(struct PCIDevice *dev, unsigned
>> short nentries,
>> }
>>
>> int msix_init_exclusive_bar(PCIDevice *dev, unsigned short
>> nentries,
>> - uint8_t bar_nr)
>> + uint8_t bar_nr, bool legacy_layout)
>> {
>> int ret;
>> char *name;
>> -
>> - /*
>> - * Migration compatibility dictates that this remains a 4k
>> - * BAR with the vector table in the lower half and PBA in
>> - * the upper half. Do not use these elsewhere!
>> - */
>> -#define MSIX_EXCLUSIVE_BAR_SIZE 4096
>> -#define MSIX_EXCLUSIVE_BAR_TABLE_OFFSET 0
>> -#define MSIX_EXCLUSIVE_BAR_PBA_OFFSET (MSIX_EXCLUSIVE_BAR_SIZE / 2)
>> -#define MSIX_EXCLUSIVE_CAP_OFFSET 0
>> -
>> - if (nentries * PCI_MSIX_ENTRY_SIZE >
>> MSIX_EXCLUSIVE_BAR_PBA_OFFSET) {
>> - return -EINVAL;
>> + uint32_t bar_size;
>> + uint32_t bar_pba_offset;
>> +
>> + if (legacy_layout) {
>> + /*
>> + * Migration compatibility dictates that this remains a 4k
>> + * BAR with the vector table in the lower half and PBA in
>> + * the upper half. Do not use these elsewhere!
>> + */
>> + bar_size = 4096;
>> + bar_pba_offset = bar_size / 2;
>> +
>> + if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) {
>> + return -EINVAL;
>> + }
>
> So this takes pains to behave in a compatible
> way when more than 128 vectors are requested.
> But since we only had up to 64 VQs, why does
> some a configuration make sense?
This question could also be asked for the code even without this patch.
Spec does not clarify this and if we think vectors>=128 is not a valid
configuration with only 64 VQs, qemu should fail and quit instead of a
warning. Unfortunately we don't do this and leave a chance for user to
use it.
>
>
> I suggest we just ignore this configuration
> for migration compatibility.
>
Consider this only affects that calling qemu with vectors more than 128
for 64 VQs limitation. I'm fine with this.
>
>
>> + } else {
>> + bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE;
>> + bar_size = bar_pba_offset + (nentries / 8 + 1) * 8;
>> + if (bar_size & (bar_size - 1)) {
>> + bar_size = 1 << qemu_fls(bar_size);
>> + }
>> + if (bar_size < 4096) {
>> + bar_size = 4096;
>> + }
>> }
>
> Don't duplicate code like this please.
> Just do this:
> /*
> * We always did it like this, so we have to keep doing this to
> * avoid breaking migration.
> */
> if (bar_pba_offset < 4096 / 2)
> bar_pba_offset = 4096 / 2;
>
>
> Will save a ton of churn.
Ok, so you're suggesting to use the same layout as 2.3 for vectors <=
128? Should work.
>
>
> with 2 above suggestions you no longer
> need to worry about machine compatibility.
>
>>
>> name = g_strdup_printf("%s-msix", dev->name);
>> - memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev),
>> name, MSIX_EXCLUSIVE_BAR_SIZE);
>> + memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev),
>> name, bar_size);
>> g_free(name);
>>
>> ret = msix_init(dev, nentries, &dev->msix_exclusive_bar,
>> bar_nr,
>> - MSIX_EXCLUSIVE_BAR_TABLE_OFFSET,
>> &dev->msix_exclusive_bar,
>> - bar_nr, MSIX_EXCLUSIVE_BAR_PBA_OFFSET,
>> - MSIX_EXCLUSIVE_CAP_OFFSET);
>> + 0, &dev->msix_exclusive_bar,
>> + bar_nr, bar_pba_offset,
>> + 0);
>> if (ret) {
>> return ret;
>> }
>> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
>> index 02e3ce8..816a706 100644
>> --- a/hw/virtio/virtio-pci.c
>> +++ b/hw/virtio/virtio-pci.c
>> @@ -937,7 +937,7 @@ static void
>> virtio_pci_device_plugged(DeviceState *d)
>> config[PCI_INTERRUPT_PIN] = 1;
>>
>> if (proxy->nvectors &&
>> - msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
>> 1)) {
>> + msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
>> 1, true)) {
>> error_report("unable to init msix vectors to %" PRIu32,
>> proxy->nvectors);
>> proxy->nvectors = 0;
>> diff --git a/include/hw/pci/msix.h b/include/hw/pci/msix.h
>> index 954d82b..6c19535 100644
>> --- a/include/hw/pci/msix.h
>> +++ b/include/hw/pci/msix.h
>> @@ -11,7 +11,7 @@ int msix_init(PCIDevice *dev, unsigned short
>> nentries,
>> unsigned table_offset, MemoryRegion *pba_bar,
>> uint8_t pba_bar_nr, unsigned pba_offset, uint8_t
>> cap_pos);
>> int msix_init_exclusive_bar(PCIDevice *dev, unsigned short
>> nentries,
>> - uint8_t bar_nr);
>> + uint8_t bar_nr, bool legacy_layout);
>>
>> void msix_write_config(PCIDevice *dev, uint32_t address, uint32_t
>> val, int len);
>>
>> --
>> 2.1.0
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 17/18] pci: remove hard-coded bar size in msix_init_exclusive_bar()
2015-04-01 10:12 ` Jason Wang
@ 2015-04-01 10:20 ` Michael S. Tsirkin
0 siblings, 0 replies; 39+ messages in thread
From: Michael S. Tsirkin @ 2015-04-01 10:20 UTC (permalink / raw)
To: Jason Wang
Cc: cornelia.huck, Keith Busch, Kevin Wolf, qemu-devel,
Stefan Hajnoczi
On Wed, Apr 01, 2015 at 06:12:18PM +0800, Jason Wang wrote:
> >> diff --git a/hw/pci/msix.c b/hw/pci/msix.c
> >> index 24de260..8c6d8f3 100644
> >> --- a/hw/pci/msix.c
> >> +++ b/hw/pci/msix.c
> >> @@ -291,33 +291,44 @@ int msix_init(struct PCIDevice *dev, unsigned
> >>short nentries,
> >> }
> >> int msix_init_exclusive_bar(PCIDevice *dev, unsigned short
> >>nentries,
> >> - uint8_t bar_nr)
> >> + uint8_t bar_nr, bool legacy_layout)
> >> {
> >> int ret;
> >> char *name;
> >> -
> >> - /*
> >> - * Migration compatibility dictates that this remains a 4k
> >> - * BAR with the vector table in the lower half and PBA in
> >> - * the upper half. Do not use these elsewhere!
> >> - */
> >> -#define MSIX_EXCLUSIVE_BAR_SIZE 4096
> >> -#define MSIX_EXCLUSIVE_BAR_TABLE_OFFSET 0
> >> -#define MSIX_EXCLUSIVE_BAR_PBA_OFFSET (MSIX_EXCLUSIVE_BAR_SIZE / 2)
> >> -#define MSIX_EXCLUSIVE_CAP_OFFSET 0
> >> -
> >> - if (nentries * PCI_MSIX_ENTRY_SIZE >
> >>MSIX_EXCLUSIVE_BAR_PBA_OFFSET) {
> >> - return -EINVAL;
> >> + uint32_t bar_size;
> >> + uint32_t bar_pba_offset;
> >> +
> >> + if (legacy_layout) {
> >> + /*
> >> + * Migration compatibility dictates that this remains a 4k
> >> + * BAR with the vector table in the lower half and PBA in
> >> + * the upper half. Do not use these elsewhere!
> >> + */
> >> + bar_size = 4096;
> >> + bar_pba_offset = bar_size / 2;
> >> +
> >> + if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) {
> >> + return -EINVAL;
> >> + }
> >
> >So this takes pains to behave in a compatible
> >way when more than 128 vectors are requested.
> >But since we only had up to 64 VQs, why does
> >some a configuration make sense?
>
> This question could also be asked for the code even without this patch. Spec
> does not clarify this and if we think vectors>=128 is not a valid
> configuration with only 64 VQs, qemu should fail and quit instead of a
> warning. Unfortunately we don't do this and leave a chance for user to use
> it.
I agree with this as a general design principle.
And it's typically even better to just support what
the user requested, even if it doesn't make sense.
--
MST
^ permalink raw reply [flat|nested] 39+ messages in thread
* [Qemu-devel] [PATCH V5 18/18] virtio-pci: introduce auto_msix_bar_size property
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (16 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 17/18] pci: remove hard-coded bar size in msix_init_exclusive_bar() Jason Wang
@ 2015-04-01 8:15 ` Jason Wang
2015-04-01 9:20 ` Michael S. Tsirkin
2015-04-02 13:43 ` [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Cornelia Huck
18 siblings, 1 reply; 39+ messages in thread
From: Jason Wang @ 2015-04-01 8:15 UTC (permalink / raw)
To: qemu-devel
Cc: mst, Jason Wang, Alexander Graf, qemu-ppc, cornelia.huck,
Paolo Bonzini, Richard Henderson
Currently we don't support more than 128 MSI-X vectors for a pci
devices, trying to use vector=129 for a virtio-net-pci device may get:
qemu-system-x86_64: -device virtio-net-pci,netdev=hn0,vectors=129:
unable to init msix vectors to 129
This this because the MSI-X bar size were hard-coded as 4096. So this
patch introduces boolean auto_msix_bar_size property for
virito-pci devices. Enable this will let virtio pci device to let msix
core to calculate the MSI-X bar size dynamically based on the number
of vectors.
This is a must to let virtio-net can up to 256 queues and each queue
were associated with a specific MSI-X entry.
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Alexander Graf <agraf@suse.de>
Cc: qemu-ppc@nongnu.org
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
hw/i386/pc_piix.c | 8 ++++++++
hw/i386/pc_q35.c | 8 ++++++++
hw/ppc/spapr.c | 11 ++++++++++-
hw/virtio/virtio-pci.c | 6 +++++-
hw/virtio/virtio-pci.h | 3 +++
include/hw/compat.h | 11 +++++++++++
6 files changed, 45 insertions(+), 2 deletions(-)
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 6e098ce..5da6ef5 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -548,6 +548,10 @@ static QEMUMachine pc_i440fx_machine_v2_3 = {
PC_I440FX_2_3_MACHINE_OPTIONS,
.name = "pc-i440fx-2.3",
.init = pc_init_pci_2_3,
+ .compat_props = (GlobalProperty[]) {
+ HW_COMPAT_2_3,
+ { /* end of list */ }
+ },
};
#define PC_I440FX_2_2_MACHINE_OPTIONS PC_I440FX_2_3_MACHINE_OPTIONS
@@ -556,6 +560,10 @@ static QEMUMachine pc_i440fx_machine_v2_2 = {
PC_I440FX_2_2_MACHINE_OPTIONS,
.name = "pc-i440fx-2.2",
.init = pc_init_pci_2_2,
+ .compat_props = (GlobalProperty[]) {
+ HW_COMPAT_2_2,
+ { /* end of list */ }
+ },
};
#define PC_I440FX_2_1_MACHINE_OPTIONS \
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index ff7c414..e38fda0 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -444,6 +444,10 @@ static QEMUMachine pc_q35_machine_v2_3 = {
PC_Q35_2_3_MACHINE_OPTIONS,
.name = "pc-q35-2.3",
.init = pc_q35_init_2_3,
+ .compat_props = (GlobalProperty[]) {
+ HW_COMPAT_2_3,
+ { /* end of list */ }
+ },
};
#define PC_Q35_2_2_MACHINE_OPTIONS PC_Q35_2_3_MACHINE_OPTIONS
@@ -452,6 +456,10 @@ static QEMUMachine pc_q35_machine_v2_2 = {
PC_Q35_2_2_MACHINE_OPTIONS,
.name = "pc-q35-2.2",
.init = pc_q35_init_2_2,
+ .compat_props = (GlobalProperty[]) {
+ HW_COMPAT_2_2,
+ { /* end of list */ }
+ },
};
#define PC_Q35_2_1_MACHINE_OPTIONS \
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index ee8f6a3..109c4ff 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1816,12 +1816,16 @@ static const TypeInfo spapr_machine_info = {
},
};
+#define SPAPR_COMPAT_2_3 \
+ HW_COMPAT_2_3
+
#define SPAPR_COMPAT_2_2 \
+ SPAPR_COMPAT_2_3, \
{\
.driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
.property = "mem_win_size",\
.value = "0x20000000",\
- }
+ } \
#define SPAPR_COMPAT_2_1 \
SPAPR_COMPAT_2_2
@@ -1905,10 +1909,15 @@ static const TypeInfo spapr_machine_2_2_info = {
static void spapr_machine_2_3_class_init(ObjectClass *oc, void *data)
{
+ static GlobalProperty compat_props[] = {
+ SPAPR_COMPAT_2_3,
+ { /* end of list */ }
+ };
MachineClass *mc = MACHINE_CLASS(oc);
mc->name = "pseries-2.3";
mc->desc = "pSeries Logical Partition (PAPR compliant) v2.3";
+ mc->compat_props = compat_props;
}
static const TypeInfo spapr_machine_2_3_info = {
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 816a706..9c6082a 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -924,6 +924,7 @@ static void virtio_pci_device_plugged(DeviceState *d)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
VirtioBusState *bus = &proxy->bus;
+ bool legacy_layout = !(proxy->flags & VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE);
uint8_t *config;
uint32_t size;
@@ -937,7 +938,8 @@ static void virtio_pci_device_plugged(DeviceState *d)
config[PCI_INTERRUPT_PIN] = 1;
if (proxy->nvectors &&
- msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1, true)) {
+ msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1,
+ legacy_layout)) {
error_report("unable to init msix vectors to %" PRIu32,
proxy->nvectors);
proxy->nvectors = 0;
@@ -1370,6 +1372,8 @@ static const TypeInfo virtio_serial_pci_info = {
static Property virtio_net_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
+ DEFINE_PROP_BIT("auto_msix_bar_size", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
DEFINE_PROP_END_OF_LIST(),
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index 3bac016..82a6782 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -62,6 +62,9 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
* vcpu thread using ioeventfd for some devices. */
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
+#define VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE_BIT 2
+#define VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE \
+ (1 << VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE_BIT)
typedef struct {
MSIMessage msg;
diff --git a/include/hw/compat.h b/include/hw/compat.h
index 313682a..3186275 100644
--- a/include/hw/compat.h
+++ b/include/hw/compat.h
@@ -1,7 +1,18 @@
#ifndef HW_COMPAT_H
#define HW_COMPAT_H
+#define HW_COMPAT_2_3 \
+ {\
+ .driver = "virtio-net-pci",\
+ .property = "auto_msix_bar_size",\
+ .value = "off",\
+ }
+
+#define HW_COMPAT_2_2 \
+ HW_COMPAT_2_3
+
#define HW_COMPAT_2_1 \
+ HW_COMPAT_2_2, \
{\
.driver = "intel-hda",\
.property = "old_msi_addr",\
--
2.1.0
^ permalink raw reply related [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 18/18] virtio-pci: introduce auto_msix_bar_size property
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 18/18] virtio-pci: introduce auto_msix_bar_size property Jason Wang
@ 2015-04-01 9:20 ` Michael S. Tsirkin
2015-04-01 10:14 ` Jason Wang
0 siblings, 1 reply; 39+ messages in thread
From: Michael S. Tsirkin @ 2015-04-01 9:20 UTC (permalink / raw)
To: Jason Wang
Cc: Alexander Graf, qemu-devel, qemu-ppc, cornelia.huck,
Paolo Bonzini, Richard Henderson
On Wed, Apr 01, 2015 at 04:15:12PM +0800, Jason Wang wrote:
> Currently we don't support more than 128 MSI-X vectors for a pci
> devices, trying to use vector=129 for a virtio-net-pci device may get:
>
> qemu-system-x86_64: -device virtio-net-pci,netdev=hn0,vectors=129:
> unable to init msix vectors to 129
So you want to keep producing this error for
compat machine types? Given that we only support
up to 64 queues, why is vector=129 worth supporting
for these machine types?
> This this because the MSI-X bar size were hard-coded as 4096. So this
> patch introduces boolean auto_msix_bar_size property for
> virito-pci devices. Enable this will let virtio pci device to let msix
> core to calculate the MSI-X bar size dynamically based on the number
> of vectors.
>
> This is a must to let virtio-net can up to 256 queues and each queue
> were associated with a specific MSI-X entry.
>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Richard Henderson <rth@twiddle.net>
> Cc: Michael S. Tsirkin <mst@redhat.com>
> Cc: Alexander Graf <agraf@suse.de>
> Cc: qemu-ppc@nongnu.org
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> hw/i386/pc_piix.c | 8 ++++++++
> hw/i386/pc_q35.c | 8 ++++++++
> hw/ppc/spapr.c | 11 ++++++++++-
> hw/virtio/virtio-pci.c | 6 +++++-
> hw/virtio/virtio-pci.h | 3 +++
> include/hw/compat.h | 11 +++++++++++
> 6 files changed, 45 insertions(+), 2 deletions(-)
>
> diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
> index 6e098ce..5da6ef5 100644
> --- a/hw/i386/pc_piix.c
> +++ b/hw/i386/pc_piix.c
> @@ -548,6 +548,10 @@ static QEMUMachine pc_i440fx_machine_v2_3 = {
> PC_I440FX_2_3_MACHINE_OPTIONS,
> .name = "pc-i440fx-2.3",
> .init = pc_init_pci_2_3,
> + .compat_props = (GlobalProperty[]) {
> + HW_COMPAT_2_3,
> + { /* end of list */ }
> + },
> };
>
> #define PC_I440FX_2_2_MACHINE_OPTIONS PC_I440FX_2_3_MACHINE_OPTIONS
> @@ -556,6 +560,10 @@ static QEMUMachine pc_i440fx_machine_v2_2 = {
> PC_I440FX_2_2_MACHINE_OPTIONS,
> .name = "pc-i440fx-2.2",
> .init = pc_init_pci_2_2,
> + .compat_props = (GlobalProperty[]) {
> + HW_COMPAT_2_2,
> + { /* end of list */ }
> + },
> };
>
> #define PC_I440FX_2_1_MACHINE_OPTIONS \
> diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
> index ff7c414..e38fda0 100644
> --- a/hw/i386/pc_q35.c
> +++ b/hw/i386/pc_q35.c
> @@ -444,6 +444,10 @@ static QEMUMachine pc_q35_machine_v2_3 = {
> PC_Q35_2_3_MACHINE_OPTIONS,
> .name = "pc-q35-2.3",
> .init = pc_q35_init_2_3,
> + .compat_props = (GlobalProperty[]) {
> + HW_COMPAT_2_3,
> + { /* end of list */ }
> + },
> };
>
> #define PC_Q35_2_2_MACHINE_OPTIONS PC_Q35_2_3_MACHINE_OPTIONS
> @@ -452,6 +456,10 @@ static QEMUMachine pc_q35_machine_v2_2 = {
> PC_Q35_2_2_MACHINE_OPTIONS,
> .name = "pc-q35-2.2",
> .init = pc_q35_init_2_2,
> + .compat_props = (GlobalProperty[]) {
> + HW_COMPAT_2_2,
> + { /* end of list */ }
> + },
> };
>
> #define PC_Q35_2_1_MACHINE_OPTIONS \
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index ee8f6a3..109c4ff 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -1816,12 +1816,16 @@ static const TypeInfo spapr_machine_info = {
> },
> };
>
> +#define SPAPR_COMPAT_2_3 \
> + HW_COMPAT_2_3
> +
> #define SPAPR_COMPAT_2_2 \
> + SPAPR_COMPAT_2_3, \
> {\
> .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
> .property = "mem_win_size",\
> .value = "0x20000000",\
> - }
> + } \
>
> #define SPAPR_COMPAT_2_1 \
> SPAPR_COMPAT_2_2
> @@ -1905,10 +1909,15 @@ static const TypeInfo spapr_machine_2_2_info = {
>
> static void spapr_machine_2_3_class_init(ObjectClass *oc, void *data)
> {
> + static GlobalProperty compat_props[] = {
> + SPAPR_COMPAT_2_3,
> + { /* end of list */ }
> + };
> MachineClass *mc = MACHINE_CLASS(oc);
>
> mc->name = "pseries-2.3";
> mc->desc = "pSeries Logical Partition (PAPR compliant) v2.3";
> + mc->compat_props = compat_props;
> }
>
> static const TypeInfo spapr_machine_2_3_info = {
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 816a706..9c6082a 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -924,6 +924,7 @@ static void virtio_pci_device_plugged(DeviceState *d)
> {
> VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
> VirtioBusState *bus = &proxy->bus;
> + bool legacy_layout = !(proxy->flags & VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE);
> uint8_t *config;
> uint32_t size;
>
> @@ -937,7 +938,8 @@ static void virtio_pci_device_plugged(DeviceState *d)
> config[PCI_INTERRUPT_PIN] = 1;
>
> if (proxy->nvectors &&
> - msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1, true)) {
> + msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1,
> + legacy_layout)) {
> error_report("unable to init msix vectors to %" PRIu32,
> proxy->nvectors);
> proxy->nvectors = 0;
> @@ -1370,6 +1372,8 @@ static const TypeInfo virtio_serial_pci_info = {
> static Property virtio_net_properties[] = {
> DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
> VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
> + DEFINE_PROP_BIT("auto_msix_bar_size", VirtIOPCIProxy, flags,
> + VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE_BIT, true),
> DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
> DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
> DEFINE_PROP_END_OF_LIST(),
> diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
> index 3bac016..82a6782 100644
> --- a/hw/virtio/virtio-pci.h
> +++ b/hw/virtio/virtio-pci.h
> @@ -62,6 +62,9 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
> * vcpu thread using ioeventfd for some devices. */
> #define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
> #define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
> +#define VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE_BIT 2
> +#define VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE \
> + (1 << VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE_BIT)
>
> typedef struct {
> MSIMessage msg;
> diff --git a/include/hw/compat.h b/include/hw/compat.h
> index 313682a..3186275 100644
> --- a/include/hw/compat.h
> +++ b/include/hw/compat.h
> @@ -1,7 +1,18 @@
> #ifndef HW_COMPAT_H
> #define HW_COMPAT_H
>
> +#define HW_COMPAT_2_3 \
> + {\
> + .driver = "virtio-net-pci",\
> + .property = "auto_msix_bar_size",\
> + .value = "off",\
> + }
> +
> +#define HW_COMPAT_2_2 \
> + HW_COMPAT_2_3
> +
> #define HW_COMPAT_2_1 \
> + HW_COMPAT_2_2, \
> {\
> .driver = "intel-hda",\
> .property = "old_msi_addr",\
> --
> 2.1.0
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 18/18] virtio-pci: introduce auto_msix_bar_size property
2015-04-01 9:20 ` Michael S. Tsirkin
@ 2015-04-01 10:14 ` Jason Wang
0 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-01 10:14 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Alexander Graf, qemu-devel, qemu-ppc, cornelia.huck,
Paolo Bonzini, Richard Henderson
On Wed, Apr 1, 2015 at 5:20 PM, Michael S. Tsirkin <mst@redhat.com>
wrote:
> On Wed, Apr 01, 2015 at 04:15:12PM +0800, Jason Wang wrote:
>> Currently we don't support more than 128 MSI-X vectors for a pci
>> devices, trying to use vector=129 for a virtio-net-pci device may
>> get:
>>
>> qemu-system-x86_64: -device virtio-net-pci,netdev=hn0,vectors=129:
>> unable to init msix vectors to 129
>
> So you want to keep producing this error for
> compat machine types? Given that we only support
> up to 64 queues, why is vector=129 worth supporting
> for these machine types?
I'm ok to ignore this issue. As I replied in the previous mail, we'd
better fail and quit early in this case. But it looks too late to do
this now.
>
>
>
>> This this because the MSI-X bar size were hard-coded as 4096. So
>> this
>> patch introduces boolean auto_msix_bar_size property for
>> virito-pci devices. Enable this will let virtio pci device to let
>> msix
>> core to calculate the MSI-X bar size dynamically based on the number
>> of vectors.
>>
>> This is a must to let virtio-net can up to 256 queues and each queue
>> were associated with a specific MSI-X entry.
>>
>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>> Cc: Richard Henderson <rth@twiddle.net>
>> Cc: Michael S. Tsirkin <mst@redhat.com>
>> Cc: Alexander Graf <agraf@suse.de>
>> Cc: qemu-ppc@nongnu.org
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>> hw/i386/pc_piix.c | 8 ++++++++
>> hw/i386/pc_q35.c | 8 ++++++++
>> hw/ppc/spapr.c | 11 ++++++++++-
>> hw/virtio/virtio-pci.c | 6 +++++-
>> hw/virtio/virtio-pci.h | 3 +++
>> include/hw/compat.h | 11 +++++++++++
>> 6 files changed, 45 insertions(+), 2 deletions(-)
>>
>> diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
>> index 6e098ce..5da6ef5 100644
>> --- a/hw/i386/pc_piix.c
>> +++ b/hw/i386/pc_piix.c
>> @@ -548,6 +548,10 @@ static QEMUMachine pc_i440fx_machine_v2_3 = {
>> PC_I440FX_2_3_MACHINE_OPTIONS,
>> .name = "pc-i440fx-2.3",
>> .init = pc_init_pci_2_3,
>> + .compat_props = (GlobalProperty[]) {
>> + HW_COMPAT_2_3,
>> + { /* end of list */ }
>> + },
>> };
>>
>> #define PC_I440FX_2_2_MACHINE_OPTIONS PC_I440FX_2_3_MACHINE_OPTIONS
>> @@ -556,6 +560,10 @@ static QEMUMachine pc_i440fx_machine_v2_2 = {
>> PC_I440FX_2_2_MACHINE_OPTIONS,
>> .name = "pc-i440fx-2.2",
>> .init = pc_init_pci_2_2,
>> + .compat_props = (GlobalProperty[]) {
>> + HW_COMPAT_2_2,
>> + { /* end of list */ }
>> + },
>> };
>>
>> #define PC_I440FX_2_1_MACHINE_OPTIONS \
>> diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
>> index ff7c414..e38fda0 100644
>> --- a/hw/i386/pc_q35.c
>> +++ b/hw/i386/pc_q35.c
>> @@ -444,6 +444,10 @@ static QEMUMachine pc_q35_machine_v2_3 = {
>> PC_Q35_2_3_MACHINE_OPTIONS,
>> .name = "pc-q35-2.3",
>> .init = pc_q35_init_2_3,
>> + .compat_props = (GlobalProperty[]) {
>> + HW_COMPAT_2_3,
>> + { /* end of list */ }
>> + },
>> };
>>
>> #define PC_Q35_2_2_MACHINE_OPTIONS PC_Q35_2_3_MACHINE_OPTIONS
>> @@ -452,6 +456,10 @@ static QEMUMachine pc_q35_machine_v2_2 = {
>> PC_Q35_2_2_MACHINE_OPTIONS,
>> .name = "pc-q35-2.2",
>> .init = pc_q35_init_2_2,
>> + .compat_props = (GlobalProperty[]) {
>> + HW_COMPAT_2_2,
>> + { /* end of list */ }
>> + },
>> };
>>
>> #define PC_Q35_2_1_MACHINE_OPTIONS \
>> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
>> index ee8f6a3..109c4ff 100644
>> --- a/hw/ppc/spapr.c
>> +++ b/hw/ppc/spapr.c
>> @@ -1816,12 +1816,16 @@ static const TypeInfo spapr_machine_info = {
>> },
>> };
>>
>> +#define SPAPR_COMPAT_2_3 \
>> + HW_COMPAT_2_3
>> +
>> #define SPAPR_COMPAT_2_2 \
>> + SPAPR_COMPAT_2_3, \
>> {\
>> .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
>> .property = "mem_win_size",\
>> .value = "0x20000000",\
>> - }
>> + } \
>>
>> #define SPAPR_COMPAT_2_1 \
>> SPAPR_COMPAT_2_2
>> @@ -1905,10 +1909,15 @@ static const TypeInfo
>> spapr_machine_2_2_info = {
>>
>> static void spapr_machine_2_3_class_init(ObjectClass *oc, void
>> *data)
>> {
>> + static GlobalProperty compat_props[] = {
>> + SPAPR_COMPAT_2_3,
>> + { /* end of list */ }
>> + };
>> MachineClass *mc = MACHINE_CLASS(oc);
>>
>> mc->name = "pseries-2.3";
>> mc->desc = "pSeries Logical Partition (PAPR compliant) v2.3";
>> + mc->compat_props = compat_props;
>> }
>>
>> static const TypeInfo spapr_machine_2_3_info = {
>> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
>> index 816a706..9c6082a 100644
>> --- a/hw/virtio/virtio-pci.c
>> +++ b/hw/virtio/virtio-pci.c
>> @@ -924,6 +924,7 @@ static void
>> virtio_pci_device_plugged(DeviceState *d)
>> {
>> VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
>> VirtioBusState *bus = &proxy->bus;
>> + bool legacy_layout = !(proxy->flags &
>> VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE);
>> uint8_t *config;
>> uint32_t size;
>>
>> @@ -937,7 +938,8 @@ static void
>> virtio_pci_device_plugged(DeviceState *d)
>> config[PCI_INTERRUPT_PIN] = 1;
>>
>> if (proxy->nvectors &&
>> - msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
>> 1, true)) {
>> + msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
>> 1,
>> + legacy_layout)) {
>> error_report("unable to init msix vectors to %" PRIu32,
>> proxy->nvectors);
>> proxy->nvectors = 0;
>> @@ -1370,6 +1372,8 @@ static const TypeInfo virtio_serial_pci_info
>> = {
>> static Property virtio_net_properties[] = {
>> DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
>> VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
>> + DEFINE_PROP_BIT("auto_msix_bar_size", VirtIOPCIProxy, flags,
>> + VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE_BIT, true),
>> DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
>> DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
>> DEFINE_PROP_END_OF_LIST(),
>> diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
>> index 3bac016..82a6782 100644
>> --- a/hw/virtio/virtio-pci.h
>> +++ b/hw/virtio/virtio-pci.h
>> @@ -62,6 +62,9 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
>> * vcpu thread using ioeventfd for some devices. */
>> #define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
>> #define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 <<
>> VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
>> +#define VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE_BIT 2
>> +#define VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE \
>> + (1 << VIRTIO_PCI_FLAG_AUTO_MSIX_SIZE_BIT)
>>
>> typedef struct {
>> MSIMessage msg;
>> diff --git a/include/hw/compat.h b/include/hw/compat.h
>> index 313682a..3186275 100644
>> --- a/include/hw/compat.h
>> +++ b/include/hw/compat.h
>> @@ -1,7 +1,18 @@
>> #ifndef HW_COMPAT_H
>> #define HW_COMPAT_H
>>
>> +#define HW_COMPAT_2_3 \
>> + {\
>> + .driver = "virtio-net-pci",\
>> + .property = "auto_msix_bar_size",\
>> + .value = "off",\
>> + }
>> +
>> +#define HW_COMPAT_2_2 \
>> + HW_COMPAT_2_3
>> +
>> #define HW_COMPAT_2_1 \
>> + HW_COMPAT_2_2, \
>> {\
>> .driver = "intel-hda",\
>> .property = "old_msi_addr",\
>> --
>> 2.1.0
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 00/18] Support more virtio queues
2015-04-01 8:14 [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Jason Wang
` (17 preceding siblings ...)
2015-04-01 8:15 ` [Qemu-devel] [PATCH V5 18/18] virtio-pci: introduce auto_msix_bar_size property Jason Wang
@ 2015-04-02 13:43 ` Cornelia Huck
2015-04-03 8:52 ` Jason Wang
18 siblings, 1 reply; 39+ messages in thread
From: Cornelia Huck @ 2015-04-02 13:43 UTC (permalink / raw)
To: Jason Wang
Cc: Kevin Wolf, mst, Alexander Graf, qemu-devel, Keith Busch,
Christian Borntraeger, qemu-ppc, Stefan Hajnoczi, Amit Shah,
Paolo Bonzini, Richard Henderson
On Wed, 1 Apr 2015 16:14:54 +0800
Jason Wang <jasowang@redhat.com> wrote:
> Stress/migration test on virtio-pci, compile test on other
> targets. And make check on s390x-softmmu and ppc64-softmmu.
Patch 9 needs some slight modifications to fit on top of master, but
with that, the patch series passes basic smoke tests with both the
s390-virtio and virtio-ccw transports.
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [Qemu-devel] [PATCH V5 00/18] Support more virtio queues
2015-04-02 13:43 ` [Qemu-devel] [PATCH V5 00/18] Support more virtio queues Cornelia Huck
@ 2015-04-03 8:52 ` Jason Wang
0 siblings, 0 replies; 39+ messages in thread
From: Jason Wang @ 2015-04-03 8:52 UTC (permalink / raw)
To: Cornelia Huck
Cc: Kevin Wolf, mst, qemu-devel, Alexander Graf, Keith Busch,
Christian Borntraeger, qemu-ppc, Stefan Hajnoczi, Amit Shah,
Paolo Bonzini, Richard Henderson
On Thu, Apr 2, 2015 at 9:43 PM, Cornelia Huck
<cornelia.huck@de.ibm.com> wrote:
> On Wed, 1 Apr 2015 16:14:54 +0800
> Jason Wang <jasowang@redhat.com> wrote:
>
>> Stress/migration test on virtio-pci, compile test on other
>> targets. And make check on s390x-softmmu and ppc64-softmmu.
>
> Patch 9 needs some slight modifications to fit on top of master, but
> with that, the patch series passes basic smoke tests with both the
> s390-virtio and virtio-ccw transports.
>
Thanks a lot for the testing.
Will rebase in V6.
^ permalink raw reply [flat|nested] 39+ messages in thread