From: Marcel Apfelbaum <marcel@redhat.com>
To: qemu-devel@nongnu.org
Cc: ehabkost@redhat.com, mst@redhat.com, kraxel@redhat.com,
pbonzini@redhat.com, marcel@redhat.com, imammedo@redhat.com,
rth@twiddle.net
Subject: [Qemu-devel] [PATCH V2 2/4] hw/acpi: merge pxb adjacent memory/IO ranges
Date: Sun, 15 Nov 2015 17:39:04 +0200 [thread overview]
Message-ID: <1447601946-31248-3-git-send-email-marcel@redhat.com> (raw)
In-Reply-To: <1447601946-31248-1-git-send-email-marcel@redhat.com>
Since the PXB has no longer a built-in PCI bridge, the
ACPI will include IO/MEM ranges per device. Try to merge
adjacent resources to reduce the ACPI tables length.
Signed-off-by: Marcel Apfelbaum <marcel@redhat.com>
---
hw/i386/acpi-build.c | 123 +++++++++++++++++++++++++++++++--------------------
1 file changed, 74 insertions(+), 49 deletions(-)
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 95e0c65..736b252 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -762,16 +762,59 @@ static void crs_replace_with_free_ranges(GPtrArray *ranges,
g_ptr_array_free(free_ranges, false);
}
+/*
+ * crs_range_merge - merges adjacent ranges in the given array.
+ * Array elements are deleted and replaced with the merged ranges.
+ */
+static void crs_range_merge(GPtrArray *range)
+{
+ GPtrArray *tmp = g_ptr_array_new_with_free_func(crs_range_free);
+ CrsRangeEntry *entry;
+ uint64_t range_base, range_limit;
+ int i;
+
+ if (!range->len) {
+ return;
+ }
+
+ g_ptr_array_sort(range, crs_range_compare);
+
+ entry = g_ptr_array_index(range, 0);
+ range_base = entry->base;
+ range_limit = entry->limit;
+ for (i = 1; i < range->len; i++) {
+ entry = g_ptr_array_index(range, i);
+ if (entry->base - 1 == range_limit) {
+ range_limit = entry->limit;
+ } else {
+ crs_range_insert(tmp, range_base, range_limit);
+ range_base = entry->base;
+ range_limit = entry->limit;
+ }
+ }
+ crs_range_insert(tmp, range_base, range_limit);
+
+ g_ptr_array_set_size(range, 0);
+ for (i = 0; i < tmp->len; i++) {
+ entry = g_ptr_array_index(tmp, i);
+ crs_range_insert(range, entry->base, entry->limit);
+ }
+ g_ptr_array_free(tmp, true);
+}
+
static Aml *build_crs(PCIHostState *host,
GPtrArray *io_ranges, GPtrArray *mem_ranges)
{
Aml *crs = aml_resource_template();
+ GPtrArray *host_io_ranges = g_ptr_array_new_with_free_func(crs_range_free);
+ GPtrArray *host_mem_ranges = g_ptr_array_new_with_free_func(crs_range_free);
+ CrsRangeEntry *entry;
uint8_t max_bus = pci_bus_num(host->bus);
uint8_t type;
int devfn;
+ int i;
for (devfn = 0; devfn < ARRAY_SIZE(host->bus->devices); devfn++) {
- int i;
uint64_t range_base, range_limit;
PCIDevice *dev = host->bus->devices[devfn];
@@ -794,26 +837,9 @@ static Aml *build_crs(PCIHostState *host,
}
if (r->type & PCI_BASE_ADDRESS_SPACE_IO) {
- aml_append(crs,
- aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
- AML_POS_DECODE, AML_ENTIRE_RANGE,
- 0,
- range_base,
- range_limit,
- 0,
- range_limit - range_base + 1));
- crs_range_insert(io_ranges, range_base, range_limit);
+ crs_range_insert(host_io_ranges, range_base, range_limit);
} else { /* "memory" */
- aml_append(crs,
- aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
- AML_MAX_FIXED, AML_NON_CACHEABLE,
- AML_READ_WRITE,
- 0,
- range_base,
- range_limit,
- 0,
- range_limit - range_base + 1));
- crs_range_insert(mem_ranges, range_base, range_limit);
+ crs_range_insert(host_mem_ranges, range_base, range_limit);
}
}
@@ -832,15 +858,7 @@ static Aml *build_crs(PCIHostState *host,
* that do not support multiple root buses
*/
if (range_base && range_base <= range_limit) {
- aml_append(crs,
- aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
- AML_POS_DECODE, AML_ENTIRE_RANGE,
- 0,
- range_base,
- range_limit,
- 0,
- range_limit - range_base + 1));
- crs_range_insert(io_ranges, range_base, range_limit);
+ crs_range_insert(host_io_ranges, range_base, range_limit);
}
range_base =
@@ -853,16 +871,7 @@ static Aml *build_crs(PCIHostState *host,
* that do not support multiple root buses
*/
if (range_base && range_base <= range_limit) {
- aml_append(crs,
- aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
- AML_MAX_FIXED, AML_NON_CACHEABLE,
- AML_READ_WRITE,
- 0,
- range_base,
- range_limit,
- 0,
- range_limit - range_base + 1));
- crs_range_insert(mem_ranges, range_base, range_limit);
+ crs_range_insert(host_mem_ranges, range_base, range_limit);
}
range_base =
@@ -875,20 +884,36 @@ static Aml *build_crs(PCIHostState *host,
* that do not support multiple root buses
*/
if (range_base && range_base <= range_limit) {
- aml_append(crs,
- aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
- AML_MAX_FIXED, AML_NON_CACHEABLE,
- AML_READ_WRITE,
- 0,
- range_base,
- range_limit,
- 0,
- range_limit - range_base + 1));
- crs_range_insert(mem_ranges, range_base, range_limit);
+ crs_range_insert(host_mem_ranges, range_base, range_limit);
}
}
}
+ crs_range_merge(host_io_ranges);
+ for (i = 0; i < host_io_ranges->len; i++) {
+ entry = g_ptr_array_index(host_io_ranges, i);
+ aml_append(crs,
+ aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_POS_DECODE, AML_ENTIRE_RANGE,
+ 0, entry->base, entry->limit, 0,
+ entry->limit - entry->base + 1));
+ crs_range_insert(io_ranges, entry->base, entry->limit);
+ }
+ g_ptr_array_free(host_io_ranges, true);
+
+ crs_range_merge(host_mem_ranges);
+ for (i = 0; i < host_mem_ranges->len; i++) {
+ entry = g_ptr_array_index(host_mem_ranges, i);
+ aml_append(crs,
+ aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
+ AML_MAX_FIXED, AML_NON_CACHEABLE,
+ AML_READ_WRITE,
+ 0, entry->base, entry->limit, 0,
+ entry->limit - entry->base + 1));
+ crs_range_insert(mem_ranges, entry->base, entry->limit);
+ }
+ g_ptr_array_free(host_mem_ranges, true);
+
aml_append(crs,
aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
0,
--
2.1.0
next prev parent reply other threads:[~2015-11-15 15:39 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-11-15 15:39 [Qemu-devel] [PATCH V2 0/4] hw/pcie: Multi-root support for Q35 Marcel Apfelbaum
2015-11-15 15:39 ` [Qemu-devel] [PATCH V2 1/4] hw/pxb: remove the built-in pci bridge Marcel Apfelbaum
2015-11-15 15:39 ` Marcel Apfelbaum [this message]
2015-11-15 15:39 ` [Qemu-devel] [PATCH V2 3/4] hw/pc: query both q35 and i440fx bus Marcel Apfelbaum
2015-11-16 18:26 ` Eduardo Habkost
2015-11-17 10:29 ` Marcel Apfelbaum
2015-11-15 15:39 ` [Qemu-devel] [PATCH V2 4/4] hw/pxb: add support for PCIe Marcel Apfelbaum
2015-11-16 8:40 ` [Qemu-devel] [PATCH V2 0/4] hw/pcie: Multi-root support for Q35 Paolo Bonzini
2015-11-16 9:52 ` Marcel Apfelbaum
2015-11-16 9:56 ` Paolo Bonzini
2015-11-16 10:02 ` Marcel Apfelbaum
2015-11-16 10:03 ` Paolo Bonzini
2015-11-16 10:10 ` Marcel Apfelbaum
2015-11-16 10:11 ` Paolo Bonzini
2015-11-16 10:34 ` Marcel Apfelbaum
2015-11-16 10:37 ` Michael S. Tsirkin
2015-11-16 10:39 ` Marcel Apfelbaum
2015-11-16 10:59 ` Michael S. Tsirkin
2015-11-16 11:37 ` Marcel Apfelbaum
2015-11-16 13:30 ` Paolo Bonzini
2015-11-19 15:05 ` Laine Stump
2015-11-17 8:15 ` Markus Armbruster
2015-11-17 10:42 ` Marcel Apfelbaum
2015-11-17 12:26 ` Markus Armbruster
2015-11-17 13:49 ` Marcel Apfelbaum
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1447601946-31248-3-git-send-email-marcel@redhat.com \
--to=marcel@redhat.com \
--cc=ehabkost@redhat.com \
--cc=imammedo@redhat.com \
--cc=kraxel@redhat.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=rth@twiddle.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).