From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: Sean Christopherson <sean.j.christopherson@intel.com>,
Xiaoyao Li <xiaoyao.li@intel.com>
Subject: [PULL 25/63] i386/kvm: Move architectural CPUID leaf generation to separate helper
Date: Tue, 23 Apr 2024 17:09:13 +0200 [thread overview]
Message-ID: <20240423150951.41600-26-pbonzini@redhat.com> (raw)
In-Reply-To: <20240423150951.41600-1-pbonzini@redhat.com>
From: Sean Christopherson <sean.j.christopherson@intel.com>
Move the architectural (for lack of a better term) CPUID leaf generation
to a separate helper so that the generation code can be reused by TDX,
which needs to generate a canonical VM-scoped configuration.
For now this is just a cleanup, so keep the function static.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com>
Message-ID: <20240229063726.610065-23-xiaoyao.li@intel.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/kvm/kvm.c | 449 +++++++++++++++++++++---------------------
1 file changed, 227 insertions(+), 222 deletions(-)
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index e68cbe92930..fcf9603d3e6 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -1706,6 +1706,231 @@ static void kvm_init_nested_state(CPUX86State *env)
}
}
+static uint32_t kvm_x86_build_cpuid(CPUX86State *env,
+ struct kvm_cpuid_entry2 *entries,
+ uint32_t cpuid_i)
+{
+ uint32_t limit, i, j;
+ uint32_t unused;
+ struct kvm_cpuid_entry2 *c;
+
+ cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0; i <= limit; i++) {
+ j = 0;
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ switch (i) {
+ case 2: {
+ /* Keep reading function 2 till all the input is received */
+ int times;
+
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
+ KVM_CPUID_FLAG_STATE_READ_NEXT;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ times = c->eax & 0xff;
+
+ for (j = 1; j < times; ++j) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ break;
+ }
+ case 0x1f:
+ if (env->nr_dies < 2) {
+ cpuid_i--;
+ break;
+ }
+ /* fallthrough */
+ case 4:
+ case 0xb:
+ case 0xd:
+ for (j = 0; ; j++) {
+ if (i == 0xd && j == 64) {
+ break;
+ }
+
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (i == 4 && c->eax == 0) {
+ break;
+ }
+ if (i == 0xb && !(c->ecx & 0xff00)) {
+ break;
+ }
+ if (i == 0x1f && !(c->ecx & 0xff00)) {
+ break;
+ }
+ if (i == 0xd && c->eax == 0) {
+ continue;
+ }
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ }
+ break;
+ case 0x12:
+ for (j = 0; ; j++) {
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (j > 1 && (c->eax & 0xf) != 1) {
+ break;
+ }
+
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ }
+ break;
+ case 0x7:
+ case 0x14:
+ case 0x1d:
+ case 0x1e: {
+ uint32_t times;
+
+ c->function = i;
+ c->index = 0;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ times = c->eax;
+
+ for (j = 1; j <= times; ++j) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ c->function = i;
+ c->index = j;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ break;
+ }
+ default:
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
+ /*
+ * KVM already returns all zeroes if a CPUID entry is missing,
+ * so we can omit it and avoid hitting KVM's 80-entry limit.
+ */
+ cpuid_i--;
+ }
+ break;
+ }
+ }
+
+ if (limit >= 0x0a) {
+ uint32_t eax, edx;
+
+ cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
+
+ has_architectural_pmu_version = eax & 0xff;
+ if (has_architectural_pmu_version > 0) {
+ num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
+
+ /* Shouldn't be more than 32, since that's the number of bits
+ * available in EBX to tell us _which_ counters are available.
+ * Play it safe.
+ */
+ if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
+ num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
+ }
+
+ if (has_architectural_pmu_version > 1) {
+ num_architectural_pmu_fixed_counters = edx & 0x1f;
+
+ if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
+ num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
+ }
+ }
+ }
+ }
+
+ cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0x80000000; i <= limit; i++) {
+ j = 0;
+ c = &entries[cpuid_i++];
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+
+ switch (i) {
+ case 0x8000001d:
+ /* Query for all AMD cache information leaves */
+ for (j = 0; ; j++) {
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (c->eax == 0) {
+ break;
+ }
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+ }
+ break;
+ default:
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
+ /*
+ * KVM already returns all zeroes if a CPUID entry is missing,
+ * so we can omit it and avoid hitting KVM's 80-entry limit.
+ */
+ cpuid_i--;
+ }
+ break;
+ }
+ }
+
+ /* Call Centaur's CPUID instructions they are supported. */
+ if (env->cpuid_xlevel2 > 0) {
+ cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0xC0000000; i <= limit; i++) {
+ j = 0;
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ goto full;
+ }
+ c = &entries[cpuid_i++];
+
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ }
+
+ return cpuid_i;
+
+full:
+ fprintf(stderr, "cpuid_data is full, no space for "
+ "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
+ abort();
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
struct {
@@ -1722,8 +1947,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- uint32_t limit, i, j, cpuid_i;
- uint32_t unused;
+ uint32_t cpuid_i;
struct kvm_cpuid_entry2 *c;
uint32_t signature[3];
int kvm_base = KVM_CPUID_SIGNATURE;
@@ -1876,8 +2100,6 @@ int kvm_arch_init_vcpu(CPUState *cs)
c->edx = env->features[FEAT_KVM_HINTS];
}
- cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
-
if (cpu->kvm_pv_enforce_cpuid) {
r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1);
if (r < 0) {
@@ -1888,224 +2110,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
}
- for (i = 0; i <= limit; i++) {
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "unsupported level value: 0x%x\n", limit);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
-
- switch (i) {
- case 2: {
- /* Keep reading function 2 till all the input is received */
- int times;
-
- c->function = i;
- c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
- KVM_CPUID_FLAG_STATE_READ_NEXT;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- times = c->eax & 0xff;
-
- for (j = 1; j < times; ++j) {
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "cpuid_data is full, no space for "
- "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
- c->function = i;
- c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- }
- break;
- }
- case 0x1f:
- if (env->nr_dies < 2) {
- cpuid_i--;
- break;
- }
- /* fallthrough */
- case 4:
- case 0xb:
- case 0xd:
- for (j = 0; ; j++) {
- if (i == 0xd && j == 64) {
- break;
- }
-
- c->function = i;
- c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- c->index = j;
- cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
-
- if (i == 4 && c->eax == 0) {
- break;
- }
- if (i == 0xb && !(c->ecx & 0xff00)) {
- break;
- }
- if (i == 0x1f && !(c->ecx & 0xff00)) {
- break;
- }
- if (i == 0xd && c->eax == 0) {
- continue;
- }
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "cpuid_data is full, no space for "
- "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
- }
- break;
- case 0x12:
- for (j = 0; ; j++) {
- c->function = i;
- c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- c->index = j;
- cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
-
- if (j > 1 && (c->eax & 0xf) != 1) {
- break;
- }
-
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "cpuid_data is full, no space for "
- "cpuid(eax:0x12,ecx:0x%x)\n", j);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
- }
- break;
- case 0x7:
- case 0x14:
- case 0x1d:
- case 0x1e: {
- uint32_t times;
-
- c->function = i;
- c->index = 0;
- c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- times = c->eax;
-
- for (j = 1; j <= times; ++j) {
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "cpuid_data is full, no space for "
- "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
- c->function = i;
- c->index = j;
- c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
- }
- break;
- }
- default:
- c->function = i;
- c->flags = 0;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
- /*
- * KVM already returns all zeroes if a CPUID entry is missing,
- * so we can omit it and avoid hitting KVM's 80-entry limit.
- */
- cpuid_i--;
- }
- break;
- }
- }
-
- if (limit >= 0x0a) {
- uint32_t eax, edx;
-
- cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
-
- has_architectural_pmu_version = eax & 0xff;
- if (has_architectural_pmu_version > 0) {
- num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
-
- /* Shouldn't be more than 32, since that's the number of bits
- * available in EBX to tell us _which_ counters are available.
- * Play it safe.
- */
- if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
- num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
- }
-
- if (has_architectural_pmu_version > 1) {
- num_architectural_pmu_fixed_counters = edx & 0x1f;
-
- if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
- num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
- }
- }
- }
- }
-
- cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
-
- for (i = 0x80000000; i <= limit; i++) {
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
-
- switch (i) {
- case 0x8000001d:
- /* Query for all AMD cache information leaves */
- for (j = 0; ; j++) {
- c->function = i;
- c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
- c->index = j;
- cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
-
- if (c->eax == 0) {
- break;
- }
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "cpuid_data is full, no space for "
- "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
- }
- break;
- default:
- c->function = i;
- c->flags = 0;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
- /*
- * KVM already returns all zeroes if a CPUID entry is missing,
- * so we can omit it and avoid hitting KVM's 80-entry limit.
- */
- cpuid_i--;
- }
- break;
- }
- }
-
- /* Call Centaur's CPUID instructions they are supported. */
- if (env->cpuid_xlevel2 > 0) {
- cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
-
- for (i = 0xC0000000; i <= limit; i++) {
- if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
- fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
- abort();
- }
- c = &cpuid_data.entries[cpuid_i++];
-
- c->function = i;
- c->flags = 0;
- cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
- }
- }
-
+ cpuid_i = kvm_x86_build_cpuid(env, cpuid_data.entries, cpuid_i);
cpuid_data.cpuid.nent = cpuid_i;
if (((env->cpuid_version >> 8)&0xF) >= 6
--
2.44.0
next prev parent reply other threads:[~2024-04-23 15:16 UTC|newest]
Thread overview: 83+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-23 15:08 [PULL 00/63] First batch of i386 and build system patch for QEMU 9.1 Paolo Bonzini
2024-04-23 15:08 ` [PULL 01/63] meson: do not link pixman automatically into all targets Paolo Bonzini
2024-04-23 15:08 ` [PULL 02/63] tests: only build plugins if TCG is enabled Paolo Bonzini
2024-04-23 15:08 ` [PULL 03/63] ebpf: Restrict to system emulation Paolo Bonzini
2024-04-23 15:08 ` [PULL 04/63] tests/unit: match some unit tests to corresponding feature switches Paolo Bonzini
2024-04-23 15:08 ` [PULL 05/63] yank: only build if needed Paolo Bonzini
2024-04-23 15:08 ` [PULL 06/63] util/qemu-config: Extract QMP commands to qemu-config-qmp.c Paolo Bonzini
2024-04-23 15:08 ` [PULL 07/63] hw/core: Move system emulation files to system_ss Paolo Bonzini
2024-04-23 15:08 ` [PULL 08/63] hw: Include minimal source set in user emulation build Paolo Bonzini
2024-04-23 15:08 ` [PULL 09/63] stubs: remove obsolete stubs Paolo Bonzini
2024-04-23 15:08 ` [PULL 10/63] hw/usb: move stubs out of stubs/ Paolo Bonzini
2024-04-23 15:08 ` [PULL 11/63] hw/virtio: " Paolo Bonzini
2024-08-03 2:29 ` Michael Tokarev
2024-09-05 16:27 ` Paolo Bonzini
2024-09-06 7:08 ` Michael Tokarev
2024-09-06 7:30 ` Paolo Bonzini
2024-04-23 15:09 ` [PULL 12/63] semihosting: " Paolo Bonzini
2024-04-23 15:09 ` [PULL 13/63] ramfb: " Paolo Bonzini
2024-04-23 15:09 ` [PULL 14/63] memory-device: " Paolo Bonzini
2024-04-23 15:09 ` [PULL 15/63] colo: " Paolo Bonzini
2024-04-23 15:09 ` [PULL 16/63] stubs: split record/replay stubs further Paolo Bonzini
2024-04-23 15:09 ` [PULL 17/63] stubs: include stubs only if needed Paolo Bonzini
2024-06-04 10:07 ` Daniel P. Berrangé
2024-06-05 14:46 ` Zhao Liu
2024-04-23 15:09 ` [PULL 18/63] stubs: move monitor_fdsets_cleanup with other fdset stubs Paolo Bonzini
2024-04-23 15:09 ` [PULL 19/63] vga: optimize computation of dirty memory region Paolo Bonzini
2024-04-23 15:09 ` [PULL 20/63] vga: move dirty memory region code together Paolo Bonzini
2024-04-23 15:09 ` [PULL 21/63] kvm: use configs/ definition to conditionalize debug support Paolo Bonzini
2024-04-23 15:09 ` [PULL 22/63] hw: Add compat machines for 9.1 Paolo Bonzini
2024-04-23 15:09 ` [PULL 23/63] target/i386: add guest-phys-bits cpu property Paolo Bonzini
2024-04-23 15:09 ` [PULL 24/63] kvm: add support for guest physical bits Paolo Bonzini
2024-04-23 15:09 ` Paolo Bonzini [this message]
2024-04-23 15:29 ` [PULL 25/63] i386/kvm: Move architectural CPUID leaf generation to separate helper Xiaoyao Li
2024-04-23 15:09 ` [PULL 26/63] target/i386: Introduce Icelake-Server-v7 to enable TSX Paolo Bonzini
2024-04-23 15:09 ` [PULL 27/63] target/i386: Add new CPU model SierraForest Paolo Bonzini
2024-04-23 15:09 ` [PULL 28/63] target/i386: Export RFDS bit to guests Paolo Bonzini
2024-04-23 15:09 ` [PULL 29/63] pci-host/q35: Move PAM initialization above SMRAM initialization Paolo Bonzini
2025-08-12 14:20 ` Michael Tokarev
2025-08-12 14:40 ` Michael Tokarev
2024-04-23 15:09 ` [PULL 30/63] q35: Introduce smm_ranges property for q35-pci-host Paolo Bonzini
2025-08-12 14:45 ` Philippe Mathieu-Daudé
2025-08-12 15:27 ` Michael Tokarev
2025-09-17 14:23 ` Michael Tokarev
2025-09-18 9:20 ` Gerd Hoffmann
2025-09-18 9:27 ` Michael Tokarev
2025-09-18 10:25 ` Gerd Hoffmann
2024-04-23 15:09 ` [PULL 31/63] hw/i386/acpi: Set PCAT_COMPAT bit only when pic is not disabled Paolo Bonzini
2024-04-23 15:09 ` [PULL 32/63] confidential guest support: Add kvm_init() and kvm_reset() in class Paolo Bonzini
2024-04-23 15:09 ` [PULL 33/63] i386/sev: Switch to use confidential_guest_kvm_init() Paolo Bonzini
2024-04-23 15:09 ` [PULL 34/63] ppc/pef: switch to use confidential_guest_kvm_init/reset() Paolo Bonzini
2024-04-23 15:09 ` [PULL 35/63] s390: Switch to use confidential_guest_kvm_init() Paolo Bonzini
2024-04-23 15:09 ` [PULL 36/63] scripts/update-linux-headers: Add setup_data.h to import list Paolo Bonzini
2024-04-23 15:09 ` [PULL 37/63] scripts/update-linux-headers: Add bits.h to file imports Paolo Bonzini
2024-04-23 15:09 ` [PULL 38/63] linux-headers: update to current kvm/next Paolo Bonzini
2024-04-23 15:09 ` [PULL 39/63] runstate: skip initial CPU reset if reset is not actually possible Paolo Bonzini
2024-04-23 15:09 ` [PULL 40/63] KVM: track whether guest state is encrypted Paolo Bonzini
2024-04-23 15:09 ` [PULL 41/63] KVM: remove kvm_arch_cpu_check_are_resettable Paolo Bonzini
2024-04-23 15:09 ` [PULL 42/63] target/i386: introduce x86-confidential-guest Paolo Bonzini
2024-04-23 15:09 ` [PULL 43/63] target/i386: Implement mc->kvm_type() to get VM type Paolo Bonzini
2024-04-24 8:36 ` Xiaoyao Li
2024-04-23 15:09 ` [PULL 44/63] target/i386: SEV: use KVM_SEV_INIT2 if possible Paolo Bonzini
2024-04-23 15:09 ` [PULL 45/63] i386/sev: Add 'legacy-vm-type' parameter for SEV guest objects Paolo Bonzini
2024-04-23 15:09 ` [PULL 46/63] hw/i386/sev: Use legacy SEV VM types for older machine types Paolo Bonzini
2024-04-23 15:09 ` [PULL 47/63] trace/kvm: Split address space and slot id in trace_kvm_set_user_memory() Paolo Bonzini
2024-04-23 15:09 ` [PULL 48/63] kvm: Introduce support for memory_attributes Paolo Bonzini
2024-04-23 15:09 ` [PULL 49/63] RAMBlock: Add support of KVM private guest memfd Paolo Bonzini
2024-04-23 15:09 ` [PULL 50/63] kvm: Enable KVM_SET_USER_MEMORY_REGION2 for memslot Paolo Bonzini
2024-04-23 15:09 ` [PULL 51/63] kvm/memory: Make memory type private by default if it has guest memfd backend Paolo Bonzini
2024-04-23 15:09 ` [PULL 52/63] HostMem: Add mechanism to opt in kvm guest memfd via MachineState Paolo Bonzini
2024-04-23 15:09 ` [PULL 53/63] RAMBlock: make guest_memfd require uncoordinated discard Paolo Bonzini
2024-04-23 15:09 ` [PULL 54/63] physmem: Introduce ram_block_discard_guest_memfd_range() Paolo Bonzini
2024-04-23 15:09 ` [PULL 55/63] kvm: handle KVM_EXIT_MEMORY_FAULT Paolo Bonzini
2024-04-26 13:40 ` Peter Maydell
2024-04-30 19:06 ` Paolo Bonzini
2024-04-23 15:09 ` [PULL 56/63] kvm/tdx: Don't complain when converting vMMIO region to shared Paolo Bonzini
2024-04-23 15:09 ` [PULL 57/63] kvm/tdx: Ignore memory conversion to shared of unassigned region Paolo Bonzini
2024-04-23 15:09 ` [PULL 58/63] target/i386/host-cpu: Consolidate the use of warn_report_once() Paolo Bonzini
2024-04-23 15:09 ` [PULL 59/63] target/i386/cpu: " Paolo Bonzini
2024-04-23 15:09 ` [PULL 60/63] target/i386/cpu: Merge the warning and error messages for AMD HT check Paolo Bonzini
2024-04-23 15:09 ` [PULL 61/63] accel/tcg/icount-common: Consolidate the use of warn_report_once() Paolo Bonzini
2024-04-23 15:09 ` [PULL 62/63] pythondeps.toml: warn about updates needed to docs/requirements.txt Paolo Bonzini
2024-04-23 15:09 ` [PULL 63/63] target/i386/translate.c: always write 32-bits for SGDT and SIDT Paolo Bonzini
2024-04-24 4:26 ` [PULL 00/63] First batch of i386 and build system patch for QEMU 9.1 Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240423150951.41600-26-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=sean.j.christopherson@intel.com \
--cc=xiaoyao.li@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).