* [Qemu-devel] [PATCH for-2.13] i386: Helpers to encode cache information consistently
@ 2018-03-16 22:37 Eduardo Habkost
2018-03-19 23:13 ` Moger, Babu
0 siblings, 1 reply; 2+ messages in thread
From: Eduardo Habkost @ 2018-03-16 22:37 UTC (permalink / raw)
To: qemu-devel
Cc: Brijesh Singh, Paolo Bonzini, Babu Moger, Thomas.Lendacky,
Igor Mammedov, Gary.Hook, Eduardo Habkost,
Radim Krčmář, Kash Pande
Instead of having a collection of macros that need to be used in
complex expressions to build CPUID data, define a CPUCacheInfo
struct that can hold information about a given cache. Helper
functions will take a CPUCacheInfo struct as input to encode
CPUID leaves for a cache.
This will help us ensure consistency between cache information
CPUID leaves, and make the existing inconsistencies in CPUID info
more visible.
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
---
target/i386/cpu.c | 525 ++++++++++++++++++++++++++++++++++++++++++------------
1 file changed, 410 insertions(+), 115 deletions(-)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 6bb4ce8719..1c0571cac5 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -57,16 +57,226 @@
#include "disas/capstone.h"
-/* Cache topology CPUID constants: */
+/* Cache information data structures: */
-/* CPUID Leaf 2 Descriptors */
+enum CacheType {
+ DCACHE,
+ ICACHE,
+ UNIFIED_CACHE
+};
-#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
-#define CPUID_2_L1I_32KB_8WAY_64B 0x30
-#define CPUID_2_L2_2MB_8WAY_64B 0x7d
-#define CPUID_2_L3_16MB_16WAY_64B 0x4d
+struct CPUCacheInfo {
+ enum CacheType type;
+ uint8_t level;
+ /* Size in bytes */
+ uint32_t size;
+ /* Line size, in bytes */
+ uint16_t line_size;
+ /*
+ * Associativity.
+ * Note: representation of fully-associative caches is not implemented
+ */
+ uint8_t associativity;
+ /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */
+ uint8_t partitions;
+ /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */
+ uint32_t sets;
+ /*
+ * Lines per tag.
+ * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
+ * (Is this synonym to @partitions?)
+ */
+ uint8_t lines_per_tag;
+
+ /* Self-initializing cache */
+ bool self_init;
+ /*
+ * WBINVD/INVD is not guaranteed to act upon lower level caches of
+ * non-originating threads sharing this cache.
+ * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0]
+ */
+ bool no_invd_sharing;
+ /*
+ * Cache is inclusive of lower cache levels.
+ * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1].
+ */
+ bool inclusive;
+ /*
+ * A complex function is used to index the cache, potentially using all
+ * address bits. CPUID[4].EDX[bit 2].
+ */
+ bool complex_indexing;
+};
+/* Helpers for building CPUID[2] descriptors: */
+
+struct CPUID2CacheDescriptorInfo {
+ enum CacheType type;
+ int level;
+ int size;
+ int line_size;
+ int associativity;
+};
+
+#define KiB 1024
+#define MiB (1024 * 1024)
+
+/*
+ * Known CPUID 2 cache descriptors.
+ * From Intel SDM Volume 2A, CPUID instruction
+ */
+struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
+ [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
+ .associativity = 2, .line_size = 32, },
+ [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
+ .associativity = 6, .line_size = 64, },
+ [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
+ .associativity = 2, .line_size = 64, },
+ [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
+ .associativity = 8, .line_size = 64, },
+ /* lines per sector is not supported cpuid2_cache_descriptor(),
+ * so descriptors 0x22, 0x23 are not included
+ */
+ [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 16, .line_size = 64, },
+ /* lines per sector is not supported cpuid2_cache_descriptor(),
+ * so descriptors 0x25, 0x20 are not included
+ */
+ [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
+ .associativity = 8, .line_size = 64, },
+ [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
+ .associativity = 8, .line_size = 64, },
+ [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 4, .line_size = 32, },
+ [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 4, .line_size = 32, },
+ [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 4, .line_size = 32, },
+ [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
+ .associativity = 4, .line_size = 64, },
+ [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
+ .associativity = 12, .line_size = 64, },
+ /* Descriptor 0x49 depends on CPU family/model, so it is not included */
+ [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
+ .associativity = 12, .line_size = 64, },
+ [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
+ .associativity = 16, .line_size = 64, },
+ [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
+ .associativity = 12, .line_size = 64, },
+ [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
+ .associativity = 16, .line_size = 64, },
+ [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
+ .associativity = 24, .line_size = 64, },
+ [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
+ .associativity = 8, .line_size = 64, },
+ [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 4, .line_size = 64, },
+ /* lines per sector is not supported cpuid2_cache_descriptor(),
+ * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
+ */
+ [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 2, .line_size = 64, },
+ [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 8, .line_size = 64, },
+ [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
+ .associativity = 8, .line_size = 32, },
+ [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 8, .line_size = 32, },
+ [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 8, .line_size = 32, },
+ [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 8, .line_size = 32, },
+ [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
+ .associativity = 4, .line_size = 64, },
+ [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 4, .line_size = 64, },
+ [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 4, .line_size = 64, },
+ [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
+ .associativity = 8, .line_size = 64, },
+ [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
+ .associativity = 12, .line_size = 64, },
+ [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
+ .associativity = 12, .line_size = 64, },
+ [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
+ .associativity = 12, .line_size = 64, },
+ [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
+ .associativity = 16, .line_size = 64, },
+ [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
+ .associativity = 16, .line_size = 64, },
+ [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
+ .associativity = 16, .line_size = 64, },
+ [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
+ .associativity = 24, .line_size = 64, },
+ [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
+ .associativity = 24, .line_size = 64, },
+ [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
+ .associativity = 24, .line_size = 64, },
+};
+
+/*
+ * "CPUID leaf 2 does not report cache descriptor information,
+ * use CPUID leaf 4 to query cache parameters"
+ */
+#define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
+
+/*
+ * Return a CPUID 2 cache descriptor for a given cache.
+ * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
+ */
+static uint8_t cpuid2_cache_descriptor(struct CPUCacheInfo *cache)
+{
+ int i;
+
+ assert(cache->size > 0);
+ assert(cache->level > 0);
+ assert(cache->line_size > 0);
+ assert(cache->associativity > 0);
+ for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
+ struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
+ if (d->level == cache->level && d->type == cache->type &&
+ d->size == cache->size && d->line_size == cache->line_size &&
+ d->associativity == cache->associativity) {
+ return i;
+ }
+ }
+
+ return CACHE_DESCRIPTOR_UNAVAILABLE;
+}
+
/* CPUID Leaf 4 constants: */
/* EAX: */
@@ -84,6 +294,57 @@
#define CPUID_4_INCLUSIVE (1 << 1)
#define CPUID_4_COMPLEX_IDX (1 << 2)
+/* Encode CacheType for CPUID[4].EAX */
+#define CPUID_4_TYPE(t) (((t) == DCACHE) ? CPUID_4_TYPE_DCACHE : \
+ ((t) == ICACHE) ? CPUID_4_TYPE_ICACHE : \
+ ((t) == UNIFIED_CACHE) ? CPUID_4_TYPE_UNIFIED : \
+ 0 /* Invalid value */)
+
+
+/* Encode cache info for CPUID[4] */
+static void encode_cache_cpuid4(struct CPUCacheInfo *cache,
+ int num_apic_ids, int num_cores,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ assert(cache->size == cache->line_size * cache->associativity *
+ cache->partitions * cache->sets);
+
+ assert(num_apic_ids > 0);
+ *eax = CPUID_4_TYPE(cache->type) | \
+ (cache->level << 5) | \
+ (cache->self_init ? CPUID_4_SELF_INIT_LEVEL : 0) | \
+ ((num_cores - 1) << 26) | \
+ ((num_apic_ids - 1) << 14);
+
+ assert(cache->line_size > 0);
+ assert(cache->partitions > 0);
+ assert(cache->associativity > 0);
+ /* We don't implement fully-associative caches */
+ assert(cache->associativity < cache->sets);
+ *ebx = (cache->line_size - 1) | \
+ ((cache->partitions - 1) << 12) | \
+ ((cache->associativity - 1) << 22);
+
+ assert(cache->sets > 0);
+ *ecx = cache->sets - 1;
+
+ *edx = (cache->no_invd_sharing ? CPUID_4_NO_INVD_SHARING : 0) | \
+ (cache->inclusive ? CPUID_4_INCLUSIVE : 0) | \
+ (cache->complex_indexing ? CPUID_4_COMPLEX_IDX : 0);
+}
+
+/* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
+static uint32_t encode_cache_cpuid80000005(struct CPUCacheInfo *cache)
+{
+ assert(cache->size % 1024 == 0);
+ assert(cache->lines_per_tag > 0);
+ assert(cache->associativity > 0);
+ assert(cache->line_size > 0);
+ return ((cache->size / 1024) << 24) | (cache->associativity << 16) | \
+ (cache->lines_per_tag << 8) | (cache->line_size);
+}
+
#define ASSOC_FULL 0xFF
/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
@@ -100,57 +361,134 @@
a == ASSOC_FULL ? 0xF : \
0 /* invalid value */)
+/*
+ * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
+ * @l3 can be NULL.
+ */
+static void encode_cache_cpuid80000006(struct CPUCacheInfo *l2,
+ struct CPUCacheInfo *l3,
+ uint32_t *ecx, uint32_t *edx)
+{
+ assert(l2->size % 1024 == 0);
+ assert(l2->associativity > 0);
+ assert(l2->lines_per_tag > 0);
+ assert(l2->line_size > 0);
+ *ecx = ((l2->size / 1024) << 16) | \
+ (AMD_ENC_ASSOC(l2->associativity) << 12) | \
+ (l2->lines_per_tag << 8) | (l2->line_size);
+
+ if (l3) {
+ assert(l3->size % (512 * 1024) == 0);
+ assert(l3->associativity > 0);
+ assert(l3->lines_per_tag > 0);
+ assert(l3->line_size > 0);
+ *edx = ((l3->size / (512 * 1024)) << 18) | \
+ (AMD_ENC_ASSOC(l3->associativity) << 12) | \
+ (l3->lines_per_tag << 8) | (l3->line_size);
+ } else {
+ *edx = 0;
+ }
+}
/* Definitions of the hardcoded cache entries we expose: */
/* L1 data cache: */
-#define L1D_LINE_SIZE 64
-#define L1D_ASSOCIATIVITY 8
-#define L1D_SETS 64
-#define L1D_PARTITIONS 1
-/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
-#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
+static struct CPUCacheInfo l1d_cache = {
+ .type = DCACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 8,
+ .sets = 64,
+ .partitions = 1,
+ .no_invd_sharing = true,
+};
+
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
-#define L1D_LINES_PER_TAG 1
-#define L1D_SIZE_KB_AMD 64
-#define L1D_ASSOCIATIVITY_AMD 2
+static struct CPUCacheInfo l1d_cache_amd = {
+ .type = DCACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 2,
+ .lines_per_tag = 1,
+ .no_invd_sharing = true,
+};
/* L1 instruction cache: */
-#define L1I_LINE_SIZE 64
-#define L1I_ASSOCIATIVITY 8
-#define L1I_SETS 64
-#define L1I_PARTITIONS 1
-/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
-#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
+static struct CPUCacheInfo l1i_cache = {
+ .type = ICACHE,
+ .level = 1,
+ .size = 32 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 8,
+ .sets = 64,
+ .partitions = 1,
+ .no_invd_sharing = true,
+};
+
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
-#define L1I_LINES_PER_TAG 1
-#define L1I_SIZE_KB_AMD 64
-#define L1I_ASSOCIATIVITY_AMD 2
+static struct CPUCacheInfo l1i_cache_amd = {
+ .type = ICACHE,
+ .level = 1,
+ .size = 64 * KiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 2,
+ .lines_per_tag = 1,
+ .no_invd_sharing = true,
+};
/* Level 2 unified cache: */
-#define L2_LINE_SIZE 64
-#define L2_ASSOCIATIVITY 16
-#define L2_SETS 4096
-#define L2_PARTITIONS 1
-/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
+static struct CPUCacheInfo l2_cache = {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 4 * MiB,
+ .self_init = 1,
+ .line_size = 64,
+ .associativity = 16,
+ .sets = 4096,
+ .partitions = 1,
+ .no_invd_sharing = true,
+};
+
/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
-#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
+static struct CPUCacheInfo l2_cache_cpuid2 = {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 2 * MiB,
+ .line_size = 64,
+ .associativity = 8,
+};
+
+
/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
-#define L2_LINES_PER_TAG 1
-#define L2_SIZE_KB_AMD 512
+static struct CPUCacheInfo l2_cache_amd = {
+ .type = UNIFIED_CACHE,
+ .level = 2,
+ .size = 512 * KiB,
+ .line_size = 64,
+ .lines_per_tag = 1,
+ .associativity = 16,
+};
/* Level 3 unified cache: */
-#define L3_SIZE_KB 0 /* disabled */
-#define L3_ASSOCIATIVITY 0 /* disabled */
-#define L3_LINES_PER_TAG 0 /* disabled */
-#define L3_LINE_SIZE 0 /* disabled */
-#define L3_N_LINE_SIZE 64
-#define L3_N_ASSOCIATIVITY 16
-#define L3_N_SETS 16384
-#define L3_N_PARTITIONS 1
-#define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
-#define L3_N_LINES_PER_TAG 1
-#define L3_N_SIZE_KB_AMD 16384
+static struct CPUCacheInfo l3_cache = {
+ .type = UNIFIED_CACHE,
+ .level = 3,
+ .size = 16 * MiB,
+ .line_size = 64,
+ .associativity = 16,
+ .sets = 16384,
+ .partitions = 1,
+ .lines_per_tag = 1,
+ .self_init = true,
+ .inclusive = true,
+ .complex_indexing = true,
+};
/* TLB definitions: */
@@ -3298,85 +3636,53 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
if (!cpu->enable_l3_cache) {
*ecx = 0;
} else {
- *ecx = L3_N_DESCRIPTOR;
+ *ecx = cpuid2_cache_descriptor(&l3_cache);
}
- *edx = (L1D_DESCRIPTOR << 16) | \
- (L1I_DESCRIPTOR << 8) | \
- (L2_DESCRIPTOR);
+ *edx = (cpuid2_cache_descriptor(&l1d_cache) << 16) | \
+ (cpuid2_cache_descriptor(&l1i_cache) << 8) | \
+ (cpuid2_cache_descriptor(&l2_cache_cpuid2));
break;
case 4:
/* cache info: needed for Core compatibility */
if (cpu->cache_info_passthrough) {
host_cpuid(index, count, eax, ebx, ecx, edx);
+ /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
*eax &= ~0xFC000000;
+ if ((*eax & 31) && cs->nr_cores > 1) {
+ *eax |= (cs->nr_cores - 1) << 26;
+ }
} else {
*eax = 0;
switch (count) {
case 0: /* L1 dcache info */
- *eax |= CPUID_4_TYPE_DCACHE | \
- CPUID_4_LEVEL(1) | \
- CPUID_4_SELF_INIT_LEVEL;
- *ebx = (L1D_LINE_SIZE - 1) | \
- ((L1D_PARTITIONS - 1) << 12) | \
- ((L1D_ASSOCIATIVITY - 1) << 22);
- *ecx = L1D_SETS - 1;
- *edx = CPUID_4_NO_INVD_SHARING;
+ encode_cache_cpuid4(&l1d_cache,
+ 1, cs->nr_cores,
+ eax, ebx, ecx, edx);
break;
case 1: /* L1 icache info */
- *eax |= CPUID_4_TYPE_ICACHE | \
- CPUID_4_LEVEL(1) | \
- CPUID_4_SELF_INIT_LEVEL;
- *ebx = (L1I_LINE_SIZE - 1) | \
- ((L1I_PARTITIONS - 1) << 12) | \
- ((L1I_ASSOCIATIVITY - 1) << 22);
- *ecx = L1I_SETS - 1;
- *edx = CPUID_4_NO_INVD_SHARING;
+ encode_cache_cpuid4(&l1i_cache,
+ 1, cs->nr_cores,
+ eax, ebx, ecx, edx);
break;
case 2: /* L2 cache info */
- *eax |= CPUID_4_TYPE_UNIFIED | \
- CPUID_4_LEVEL(2) | \
- CPUID_4_SELF_INIT_LEVEL;
- if (cs->nr_threads > 1) {
- *eax |= (cs->nr_threads - 1) << 14;
- }
- *ebx = (L2_LINE_SIZE - 1) | \
- ((L2_PARTITIONS - 1) << 12) | \
- ((L2_ASSOCIATIVITY - 1) << 22);
- *ecx = L2_SETS - 1;
- *edx = CPUID_4_NO_INVD_SHARING;
+ encode_cache_cpuid4(&l2_cache,
+ cs->nr_threads, cs->nr_cores,
+ eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
- if (!cpu->enable_l3_cache) {
- *eax = 0;
- *ebx = 0;
- *ecx = 0;
- *edx = 0;
+ pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
+ if (cpu->enable_l3_cache) {
+ encode_cache_cpuid4(&l3_cache,
+ (1 << pkg_offset), cs->nr_cores,
+ eax, ebx, ecx, edx);
break;
}
- *eax |= CPUID_4_TYPE_UNIFIED | \
- CPUID_4_LEVEL(3) | \
- CPUID_4_SELF_INIT_LEVEL;
- pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
- *eax |= ((1 << pkg_offset) - 1) << 14;
- *ebx = (L3_N_LINE_SIZE - 1) | \
- ((L3_N_PARTITIONS - 1) << 12) | \
- ((L3_N_ASSOCIATIVITY - 1) << 22);
- *ecx = L3_N_SETS - 1;
- *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
- break;
+ /* fall through */
default: /* end of info */
- *eax = 0;
- *ebx = 0;
- *ecx = 0;
- *edx = 0;
+ *eax = *ebx = *ecx = *edx = 0;
break;
}
}
-
- /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
- if ((*eax & 31) && cs->nr_cores > 1) {
- *eax |= (cs->nr_cores - 1) << 26;
- }
break;
case 5:
/* mwait info: needed for Core compatibility */
@@ -3580,10 +3886,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
(L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
*ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
(L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
- *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
- (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
- *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
- (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
+ *ecx = encode_cache_cpuid80000005(&l1d_cache_amd);
+ *edx = encode_cache_cpuid80000005(&l1i_cache_amd);
break;
case 0x80000006:
/* cache info (L2 cache) */
@@ -3599,18 +3903,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
(L2_DTLB_4K_ENTRIES << 16) | \
(AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
(L2_ITLB_4K_ENTRIES);
- *ecx = (L2_SIZE_KB_AMD << 16) | \
- (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
- (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
- if (!cpu->enable_l3_cache) {
- *edx = ((L3_SIZE_KB / 512) << 18) | \
- (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
- (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
- } else {
- *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
- (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
- (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
- }
+ encode_cache_cpuid80000006(&l2_cache_amd,
+ cpu->enable_l3_cache ? &l3_cache : NULL,
+ ecx, edx);
break;
case 0x80000007:
*eax = 0;
--
2.14.3
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [Qemu-devel] [PATCH for-2.13] i386: Helpers to encode cache information consistently
2018-03-16 22:37 [Qemu-devel] [PATCH for-2.13] i386: Helpers to encode cache information consistently Eduardo Habkost
@ 2018-03-19 23:13 ` Moger, Babu
0 siblings, 0 replies; 2+ messages in thread
From: Moger, Babu @ 2018-03-19 23:13 UTC (permalink / raw)
To: Eduardo Habkost, qemu-devel@nongnu.org
Cc: Singh, Brijesh, Paolo Bonzini, Lendacky, Thomas, Igor Mammedov,
Hook, Gary, Radim Krčmář, Kash Pande
Hi Eduardo,
Thanks for patches. I am verifying the patch right now. Will respond tomorrow.
> -----Original Message-----
> From: Eduardo Habkost <ehabkost@redhat.com>
> Sent: Friday, March 16, 2018 5:37 PM
> To: qemu-devel@nongnu.org
> Cc: Singh, Brijesh <brijesh.singh@amd.com>; Paolo Bonzini
> <pbonzini@redhat.com>; Moger, Babu <Babu.Moger@amd.com>;
> Lendacky, Thomas <Thomas.Lendacky@amd.com>; Igor Mammedov
> <imammedo@redhat.com>; Hook, Gary <Gary.Hook@amd.com>; Eduardo
> Habkost <ehabkost@redhat.com>; Radim Krčmář <rkrcmar@redhat.com>;
> Kash Pande <kash@tripleback.net>
> Subject: [PATCH for-2.13] i386: Helpers to encode cache information
> consistently
>
> Instead of having a collection of macros that need to be used in
> complex expressions to build CPUID data, define a CPUCacheInfo
> struct that can hold information about a given cache. Helper
> functions will take a CPUCacheInfo struct as input to encode
> CPUID leaves for a cache.
>
> This will help us ensure consistency between cache information
> CPUID leaves, and make the existing inconsistencies in CPUID info
> more visible.
>
> Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
> ---
> target/i386/cpu.c | 525
> ++++++++++++++++++++++++++++++++++++++++++------------
> 1 file changed, 410 insertions(+), 115 deletions(-)
>
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index 6bb4ce8719..1c0571cac5 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -57,16 +57,226 @@
> #include "disas/capstone.h"
>
>
> -/* Cache topology CPUID constants: */
> +/* Cache information data structures: */
>
> -/* CPUID Leaf 2 Descriptors */
> +enum CacheType {
> + DCACHE,
> + ICACHE,
> + UNIFIED_CACHE
> +};
>
> -#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
> -#define CPUID_2_L1I_32KB_8WAY_64B 0x30
> -#define CPUID_2_L2_2MB_8WAY_64B 0x7d
> -#define CPUID_2_L3_16MB_16WAY_64B 0x4d
> +struct CPUCacheInfo {
> + enum CacheType type;
> + uint8_t level;
> + /* Size in bytes */
> + uint32_t size;
> + /* Line size, in bytes */
> + uint16_t line_size;
> + /*
> + * Associativity.
> + * Note: representation of fully-associative caches is not implemented
> + */
> + uint8_t associativity;
> + /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */
> + uint8_t partitions;
> + /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */
> + uint32_t sets;
> + /*
> + * Lines per tag.
> + * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
> + * (Is this synonym to @partitions?)
> + */
> + uint8_t lines_per_tag;
> +
> + /* Self-initializing cache */
> + bool self_init;
> + /*
> + * WBINVD/INVD is not guaranteed to act upon lower level caches of
> + * non-originating threads sharing this cache.
> + * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0]
> + */
> + bool no_invd_sharing;
> + /*
> + * Cache is inclusive of lower cache levels.
> + * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1].
> + */
> + bool inclusive;
> + /*
> + * A complex function is used to index the cache, potentially using all
> + * address bits. CPUID[4].EDX[bit 2].
> + */
> + bool complex_indexing;
> +};
>
>
> +/* Helpers for building CPUID[2] descriptors: */
> +
> +struct CPUID2CacheDescriptorInfo {
> + enum CacheType type;
> + int level;
> + int size;
> + int line_size;
> + int associativity;
> +};
> +
> +#define KiB 1024
> +#define MiB (1024 * 1024)
> +
> +/*
> + * Known CPUID 2 cache descriptors.
> + * From Intel SDM Volume 2A, CPUID instruction
> + */
> +struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
> + [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
> + .associativity = 4, .line_size = 32, },
> + [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
> + .associativity = 4, .line_size = 32, },
> + [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
> + .associativity = 4, .line_size = 64, },
> + [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
> + .associativity = 2, .line_size = 32, },
> + [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
> + .associativity = 4, .line_size = 32, },
> + [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
> + .associativity = 4, .line_size = 64, },
> + [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
> + .associativity = 6, .line_size = 64, },
> + [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
> + .associativity = 2, .line_size = 64, },
> + [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
> + .associativity = 8, .line_size = 64, },
> + /* lines per sector is not supported cpuid2_cache_descriptor(),
> + * so descriptors 0x22, 0x23 are not included
> + */
> + [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
> + .associativity = 16, .line_size = 64, },
> + /* lines per sector is not supported cpuid2_cache_descriptor(),
> + * so descriptors 0x25, 0x20 are not included
> + */
> + [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
> + .associativity = 8, .line_size = 64, },
> + [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
> + .associativity = 8, .line_size = 64, },
> + [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
> + .associativity = 4, .line_size = 32, },
> + [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
> + .associativity = 4, .line_size = 32, },
> + [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
> + .associativity = 4, .line_size = 32, },
> + [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
> + .associativity = 4, .line_size = 32, },
> + [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
> + .associativity = 4, .line_size = 32, },
> + [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
> + .associativity = 4, .line_size = 64, },
> + [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
> + .associativity = 8, .line_size = 64, },
> + [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
> + .associativity = 12, .line_size = 64, },
> + /* Descriptor 0x49 depends on CPU family/model, so it is not included */
> + [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
> + .associativity = 12, .line_size = 64, },
> + [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
> + .associativity = 16, .line_size = 64, },
> + [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
> + .associativity = 12, .line_size = 64, },
> + [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
> + .associativity = 16, .line_size = 64, },
> + [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
> + .associativity = 24, .line_size = 64, },
> + [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
> + .associativity = 8, .line_size = 64, },
> + [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
> + .associativity = 4, .line_size = 64, },
> + [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
> + .associativity = 4, .line_size = 64, },
> + [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
> + .associativity = 4, .line_size = 64, },
> + [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
> + .associativity = 4, .line_size = 64, },
> + /* lines per sector is not supported cpuid2_cache_descriptor(),
> + * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
> + */
> + [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
> + .associativity = 8, .line_size = 64, },
> + [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
> + .associativity = 2, .line_size = 64, },
> + [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
> + .associativity = 8, .line_size = 64, },
> + [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
> + .associativity = 8, .line_size = 32, },
> + [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
> + .associativity = 8, .line_size = 32, },
> + [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
> + .associativity = 8, .line_size = 32, },
> + [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
> + .associativity = 8, .line_size = 32, },
> + [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
> + .associativity = 4, .line_size = 64, },
> + [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
> + .associativity = 8, .line_size = 64, },
> + [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
> + .associativity = 4, .line_size = 64, },
> + [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
> + .associativity = 4, .line_size = 64, },
> + [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
> + .associativity = 4, .line_size = 64, },
> + [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
> + .associativity = 8, .line_size = 64, },
> + [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
> + .associativity = 8, .line_size = 64, },
> + [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
> + .associativity = 8, .line_size = 64, },
> + [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
> + .associativity = 12, .line_size = 64, },
> + [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
> + .associativity = 12, .line_size = 64, },
> + [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
> + .associativity = 12, .line_size = 64, },
> + [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
> + .associativity = 16, .line_size = 64, },
> + [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
> + .associativity = 16, .line_size = 64, },
> + [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
> + .associativity = 16, .line_size = 64, },
> + [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
> + .associativity = 24, .line_size = 64, },
> + [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
> + .associativity = 24, .line_size = 64, },
> + [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
> + .associativity = 24, .line_size = 64, },
> +};
> +
> +/*
> + * "CPUID leaf 2 does not report cache descriptor information,
> + * use CPUID leaf 4 to query cache parameters"
> + */
> +#define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
> +
> +/*
> + * Return a CPUID 2 cache descriptor for a given cache.
> + * If no known descriptor is found, return
> CACHE_DESCRIPTOR_UNAVAILABLE
> + */
> +static uint8_t cpuid2_cache_descriptor(struct CPUCacheInfo *cache)
> +{
> + int i;
> +
> + assert(cache->size > 0);
> + assert(cache->level > 0);
> + assert(cache->line_size > 0);
> + assert(cache->associativity > 0);
> + for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
> + struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
> + if (d->level == cache->level && d->type == cache->type &&
> + d->size == cache->size && d->line_size == cache->line_size &&
> + d->associativity == cache->associativity) {
> + return i;
> + }
> + }
> +
> + return CACHE_DESCRIPTOR_UNAVAILABLE;
> +}
> +
> /* CPUID Leaf 4 constants: */
>
> /* EAX: */
> @@ -84,6 +294,57 @@
> #define CPUID_4_INCLUSIVE (1 << 1)
> #define CPUID_4_COMPLEX_IDX (1 << 2)
>
> +/* Encode CacheType for CPUID[4].EAX */
> +#define CPUID_4_TYPE(t) (((t) == DCACHE) ? CPUID_4_TYPE_DCACHE : \
> + ((t) == ICACHE) ? CPUID_4_TYPE_ICACHE : \
> + ((t) == UNIFIED_CACHE) ? CPUID_4_TYPE_UNIFIED : \
> + 0 /* Invalid value */)
> +
> +
> +/* Encode cache info for CPUID[4] */
> +static void encode_cache_cpuid4(struct CPUCacheInfo *cache,
> + int num_apic_ids, int num_cores,
> + uint32_t *eax, uint32_t *ebx,
> + uint32_t *ecx, uint32_t *edx)
> +{
> + assert(cache->size == cache->line_size * cache->associativity *
> + cache->partitions * cache->sets);
> +
> + assert(num_apic_ids > 0);
> + *eax = CPUID_4_TYPE(cache->type) | \
> + (cache->level << 5) | \
> + (cache->self_init ? CPUID_4_SELF_INIT_LEVEL : 0) | \
> + ((num_cores - 1) << 26) | \
> + ((num_apic_ids - 1) << 14);
> +
> + assert(cache->line_size > 0);
> + assert(cache->partitions > 0);
> + assert(cache->associativity > 0);
> + /* We don't implement fully-associative caches */
> + assert(cache->associativity < cache->sets);
> + *ebx = (cache->line_size - 1) | \
> + ((cache->partitions - 1) << 12) | \
> + ((cache->associativity - 1) << 22);
> +
> + assert(cache->sets > 0);
> + *ecx = cache->sets - 1;
> +
> + *edx = (cache->no_invd_sharing ? CPUID_4_NO_INVD_SHARING : 0) | \
> + (cache->inclusive ? CPUID_4_INCLUSIVE : 0) | \
> + (cache->complex_indexing ? CPUID_4_COMPLEX_IDX : 0);
> +}
> +
> +/* Encode cache info for CPUID[0x80000005].ECX or
> CPUID[0x80000005].EDX */
> +static uint32_t encode_cache_cpuid80000005(struct CPUCacheInfo *cache)
> +{
> + assert(cache->size % 1024 == 0);
> + assert(cache->lines_per_tag > 0);
> + assert(cache->associativity > 0);
> + assert(cache->line_size > 0);
> + return ((cache->size / 1024) << 24) | (cache->associativity << 16) | \
> + (cache->lines_per_tag << 8) | (cache->line_size);
> +}
> +
> #define ASSOC_FULL 0xFF
>
> /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
> @@ -100,57 +361,134 @@
> a == ASSOC_FULL ? 0xF : \
> 0 /* invalid value */)
>
> +/*
> + * Encode cache info for CPUID[0x80000006].ECX and
> CPUID[0x80000006].EDX
> + * @l3 can be NULL.
> + */
> +static void encode_cache_cpuid80000006(struct CPUCacheInfo *l2,
> + struct CPUCacheInfo *l3,
> + uint32_t *ecx, uint32_t *edx)
> +{
> + assert(l2->size % 1024 == 0);
> + assert(l2->associativity > 0);
> + assert(l2->lines_per_tag > 0);
> + assert(l2->line_size > 0);
> + *ecx = ((l2->size / 1024) << 16) | \
> + (AMD_ENC_ASSOC(l2->associativity) << 12) | \
> + (l2->lines_per_tag << 8) | (l2->line_size);
> +
> + if (l3) {
> + assert(l3->size % (512 * 1024) == 0);
> + assert(l3->associativity > 0);
> + assert(l3->lines_per_tag > 0);
> + assert(l3->line_size > 0);
> + *edx = ((l3->size / (512 * 1024)) << 18) | \
> + (AMD_ENC_ASSOC(l3->associativity) << 12) | \
> + (l3->lines_per_tag << 8) | (l3->line_size);
> + } else {
> + *edx = 0;
> + }
> +}
>
> /* Definitions of the hardcoded cache entries we expose: */
>
> /* L1 data cache: */
> -#define L1D_LINE_SIZE 64
> -#define L1D_ASSOCIATIVITY 8
> -#define L1D_SETS 64
> -#define L1D_PARTITIONS 1
> -/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
> -#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
> +static struct CPUCacheInfo l1d_cache = {
> + .type = DCACHE,
> + .level = 1,
> + .size = 32 * KiB,
> + .self_init = 1,
> + .line_size = 64,
> + .associativity = 8,
> + .sets = 64,
> + .partitions = 1,
> + .no_invd_sharing = true,
> +};
> +
> /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
> -#define L1D_LINES_PER_TAG 1
> -#define L1D_SIZE_KB_AMD 64
> -#define L1D_ASSOCIATIVITY_AMD 2
> +static struct CPUCacheInfo l1d_cache_amd = {
> + .type = DCACHE,
> + .level = 1,
> + .size = 64 * KiB,
> + .self_init = 1,
> + .line_size = 64,
> + .associativity = 2,
> + .lines_per_tag = 1,
> + .no_invd_sharing = true,
> +};
>
> /* L1 instruction cache: */
> -#define L1I_LINE_SIZE 64
> -#define L1I_ASSOCIATIVITY 8
> -#define L1I_SETS 64
> -#define L1I_PARTITIONS 1
> -/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
> -#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
> +static struct CPUCacheInfo l1i_cache = {
> + .type = ICACHE,
> + .level = 1,
> + .size = 32 * KiB,
> + .self_init = 1,
> + .line_size = 64,
> + .associativity = 8,
> + .sets = 64,
> + .partitions = 1,
> + .no_invd_sharing = true,
> +};
> +
> /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
> -#define L1I_LINES_PER_TAG 1
> -#define L1I_SIZE_KB_AMD 64
> -#define L1I_ASSOCIATIVITY_AMD 2
> +static struct CPUCacheInfo l1i_cache_amd = {
> + .type = ICACHE,
> + .level = 1,
> + .size = 64 * KiB,
> + .self_init = 1,
> + .line_size = 64,
> + .associativity = 2,
> + .lines_per_tag = 1,
> + .no_invd_sharing = true,
> +};
>
> /* Level 2 unified cache: */
> -#define L2_LINE_SIZE 64
> -#define L2_ASSOCIATIVITY 16
> -#define L2_SETS 4096
> -#define L2_PARTITIONS 1
> -/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
> +static struct CPUCacheInfo l2_cache = {
> + .type = UNIFIED_CACHE,
> + .level = 2,
> + .size = 4 * MiB,
> + .self_init = 1,
> + .line_size = 64,
> + .associativity = 16,
> + .sets = 4096,
> + .partitions = 1,
> + .no_invd_sharing = true,
> +};
> +
> /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
> -#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
> +static struct CPUCacheInfo l2_cache_cpuid2 = {
> + .type = UNIFIED_CACHE,
> + .level = 2,
> + .size = 2 * MiB,
> + .line_size = 64,
> + .associativity = 8,
> +};
> +
> +
> /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
> -#define L2_LINES_PER_TAG 1
> -#define L2_SIZE_KB_AMD 512
> +static struct CPUCacheInfo l2_cache_amd = {
> + .type = UNIFIED_CACHE,
> + .level = 2,
> + .size = 512 * KiB,
> + .line_size = 64,
> + .lines_per_tag = 1,
> + .associativity = 16,
> +};
>
> /* Level 3 unified cache: */
> -#define L3_SIZE_KB 0 /* disabled */
> -#define L3_ASSOCIATIVITY 0 /* disabled */
> -#define L3_LINES_PER_TAG 0 /* disabled */
> -#define L3_LINE_SIZE 0 /* disabled */
> -#define L3_N_LINE_SIZE 64
> -#define L3_N_ASSOCIATIVITY 16
> -#define L3_N_SETS 16384
> -#define L3_N_PARTITIONS 1
> -#define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
> -#define L3_N_LINES_PER_TAG 1
> -#define L3_N_SIZE_KB_AMD 16384
> +static struct CPUCacheInfo l3_cache = {
> + .type = UNIFIED_CACHE,
> + .level = 3,
> + .size = 16 * MiB,
> + .line_size = 64,
> + .associativity = 16,
> + .sets = 16384,
> + .partitions = 1,
> + .lines_per_tag = 1,
> + .self_init = true,
> + .inclusive = true,
> + .complex_indexing = true,
> +};
>
> /* TLB definitions: */
>
> @@ -3298,85 +3636,53 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t
> index, uint32_t count,
> if (!cpu->enable_l3_cache) {
> *ecx = 0;
> } else {
> - *ecx = L3_N_DESCRIPTOR;
> + *ecx = cpuid2_cache_descriptor(&l3_cache);
> }
> - *edx = (L1D_DESCRIPTOR << 16) | \
> - (L1I_DESCRIPTOR << 8) | \
> - (L2_DESCRIPTOR);
> + *edx = (cpuid2_cache_descriptor(&l1d_cache) << 16) | \
> + (cpuid2_cache_descriptor(&l1i_cache) << 8) | \
> + (cpuid2_cache_descriptor(&l2_cache_cpuid2));
> break;
> case 4:
> /* cache info: needed for Core compatibility */
> if (cpu->cache_info_passthrough) {
> host_cpuid(index, count, eax, ebx, ecx, edx);
> + /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
> *eax &= ~0xFC000000;
> + if ((*eax & 31) && cs->nr_cores > 1) {
> + *eax |= (cs->nr_cores - 1) << 26;
> + }
> } else {
> *eax = 0;
> switch (count) {
> case 0: /* L1 dcache info */
> - *eax |= CPUID_4_TYPE_DCACHE | \
> - CPUID_4_LEVEL(1) | \
> - CPUID_4_SELF_INIT_LEVEL;
> - *ebx = (L1D_LINE_SIZE - 1) | \
> - ((L1D_PARTITIONS - 1) << 12) | \
> - ((L1D_ASSOCIATIVITY - 1) << 22);
> - *ecx = L1D_SETS - 1;
> - *edx = CPUID_4_NO_INVD_SHARING;
> + encode_cache_cpuid4(&l1d_cache,
> + 1, cs->nr_cores,
> + eax, ebx, ecx, edx);
> break;
> case 1: /* L1 icache info */
> - *eax |= CPUID_4_TYPE_ICACHE | \
> - CPUID_4_LEVEL(1) | \
> - CPUID_4_SELF_INIT_LEVEL;
> - *ebx = (L1I_LINE_SIZE - 1) | \
> - ((L1I_PARTITIONS - 1) << 12) | \
> - ((L1I_ASSOCIATIVITY - 1) << 22);
> - *ecx = L1I_SETS - 1;
> - *edx = CPUID_4_NO_INVD_SHARING;
> + encode_cache_cpuid4(&l1i_cache,
> + 1, cs->nr_cores,
> + eax, ebx, ecx, edx);
> break;
> case 2: /* L2 cache info */
> - *eax |= CPUID_4_TYPE_UNIFIED | \
> - CPUID_4_LEVEL(2) | \
> - CPUID_4_SELF_INIT_LEVEL;
> - if (cs->nr_threads > 1) {
> - *eax |= (cs->nr_threads - 1) << 14;
> - }
> - *ebx = (L2_LINE_SIZE - 1) | \
> - ((L2_PARTITIONS - 1) << 12) | \
> - ((L2_ASSOCIATIVITY - 1) << 22);
> - *ecx = L2_SETS - 1;
> - *edx = CPUID_4_NO_INVD_SHARING;
> + encode_cache_cpuid4(&l2_cache,
> + cs->nr_threads, cs->nr_cores,
> + eax, ebx, ecx, edx);
> break;
> case 3: /* L3 cache info */
> - if (!cpu->enable_l3_cache) {
> - *eax = 0;
> - *ebx = 0;
> - *ecx = 0;
> - *edx = 0;
> + pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
> + if (cpu->enable_l3_cache) {
> + encode_cache_cpuid4(&l3_cache,
> + (1 << pkg_offset), cs->nr_cores,
> + eax, ebx, ecx, edx);
> break;
> }
> - *eax |= CPUID_4_TYPE_UNIFIED | \
> - CPUID_4_LEVEL(3) | \
> - CPUID_4_SELF_INIT_LEVEL;
> - pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
> - *eax |= ((1 << pkg_offset) - 1) << 14;
> - *ebx = (L3_N_LINE_SIZE - 1) | \
> - ((L3_N_PARTITIONS - 1) << 12) | \
> - ((L3_N_ASSOCIATIVITY - 1) << 22);
> - *ecx = L3_N_SETS - 1;
> - *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
> - break;
> + /* fall through */
> default: /* end of info */
> - *eax = 0;
> - *ebx = 0;
> - *ecx = 0;
> - *edx = 0;
> + *eax = *ebx = *ecx = *edx = 0;
> break;
> }
> }
> -
> - /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
> - if ((*eax & 31) && cs->nr_cores > 1) {
> - *eax |= (cs->nr_cores - 1) << 26;
> - }
> break;
> case 5:
> /* mwait info: needed for Core compatibility */
> @@ -3580,10 +3886,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t
> index, uint32_t count,
> (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
> *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
> (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
> - *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) |
> \
> - (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
> - *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
> - (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
> + *ecx = encode_cache_cpuid80000005(&l1d_cache_amd);
> + *edx = encode_cache_cpuid80000005(&l1i_cache_amd);
> break;
> case 0x80000006:
> /* cache info (L2 cache) */
> @@ -3599,18 +3903,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t
> index, uint32_t count,
> (L2_DTLB_4K_ENTRIES << 16) | \
> (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
> (L2_ITLB_4K_ENTRIES);
> - *ecx = (L2_SIZE_KB_AMD << 16) | \
> - (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
> - (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
> - if (!cpu->enable_l3_cache) {
> - *edx = ((L3_SIZE_KB / 512) << 18) | \
> - (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
> - (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
> - } else {
> - *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
> - (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
> - (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
> - }
> + encode_cache_cpuid80000006(&l2_cache_amd,
> + cpu->enable_l3_cache ? &l3_cache : NULL,
> + ecx, edx);
> break;
> case 0x80000007:
> *eax = 0;
> --
> 2.14.3
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2018-03-19 23:14 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-03-16 22:37 [Qemu-devel] [PATCH for-2.13] i386: Helpers to encode cache information consistently Eduardo Habkost
2018-03-19 23:13 ` Moger, Babu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).