* [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun
@ 2015-12-16 19:06 Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 1/5] target-i386: kvm: Allocate kvm_msrs struct once per VCPU Eduardo Habkost
` (5 more replies)
0 siblings, 6 replies; 8+ messages in thread
From: Eduardo Habkost @ 2015-12-16 19:06 UTC (permalink / raw)
To: Paolo Bonzini, Marcelo Tosatti; +Cc: qemu-devel, kvm
We are dangerously close to the array limits in kvm_put_msrs()
and kvm_get_msrs(): with the default mcg_cap configuration, we
can set up to 148 MSRs in kvm_put_msrs(), and if we allow mcg_cap
to be changed, we can write up to 236 MSRs[1].
This series changes the code to allocate a buffer once per VCPU,
increase buffer size to 4096 bytes (that can hold up to 255 MSR
entries), and check array limits before appending new entries.
[1] I have checked the limits by copying and pasting the
kvm_put_msrs() code to a new file, replacing the "if" lines,
copying the macro definitions, and adding a helper macro to
keep track of the kvm_msr_entry_set() calls. The code can be
seen at:
https://gist.github.com/ehabkost/08d4177a33b8648a71ef
Eduardo Habkost (5):
target-i386: kvm: Allocate kvm_msrs struct once per VCPU
target-i386: kvm: Increase MSR_BUF_SIZE
target-i386: kvm: Simplify MSR array construction
target-i386: kvm: Simplify MSR setting functions
target-i386: kvm: Eliminate kvm_msr_entry_set()
target-i386/cpu-qom.h | 4 +
target-i386/kvm.c | 322 +++++++++++++++++++++++---------------------------
2 files changed, 149 insertions(+), 177 deletions(-)
--
2.1.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 1/5] target-i386: kvm: Allocate kvm_msrs struct once per VCPU
2015-12-16 19:06 [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Eduardo Habkost
@ 2015-12-16 19:06 ` Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 2/5] target-i386: kvm: Increase MSR_BUF_SIZE Eduardo Habkost
` (4 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Eduardo Habkost @ 2015-12-16 19:06 UTC (permalink / raw)
To: Paolo Bonzini, Marcelo Tosatti; +Cc: qemu-devel, kvm
Instead of using 2400 bytes in the stack for 150 MSR entries in
kvm_get_msrs() and kvm_put_msrs(), allocate a buffer once for
each VCPU.
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
---
target-i386/cpu-qom.h | 4 ++++
target-i386/kvm.c | 37 +++++++++++++++++++------------------
2 files changed, 23 insertions(+), 18 deletions(-)
diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
index e3bfe9d..f349b30 100644
--- a/target-i386/cpu-qom.h
+++ b/target-i386/cpu-qom.h
@@ -69,6 +69,8 @@ typedef struct X86CPUClass {
void (*parent_reset)(CPUState *cpu);
} X86CPUClass;
+struct kvm_msrs;
+
/**
* X86CPU:
* @env: #CPUX86State
@@ -119,6 +121,8 @@ typedef struct X86CPU {
struct DeviceState *apic_state;
struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
Notifier machine_done;
+
+ struct kvm_msrs *kvm_msr_buf;
} X86CPU;
static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 6dc9846..660b2d9 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -52,6 +52,9 @@
#define MSR_KVM_WALL_CLOCK 0x11
#define MSR_KVM_SYSTEM_TIME 0x12
+#define MSR_BUF_SIZE \
+ (sizeof(struct kvm_msrs) + 150 * sizeof(struct kvm_msr_entry))
+
#ifndef BUS_MCEERR_AR
#define BUS_MCEERR_AR 4
#endif
@@ -841,6 +844,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (has_xsave) {
env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
}
+ cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
has_msr_mtrr = true;
@@ -1349,6 +1353,11 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
entry->data = value;
}
+static void kvm_msr_buf_reset(X86CPU *cpu)
+{
+ memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
+}
+
static int kvm_put_tscdeadline_msr(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
@@ -1397,13 +1406,11 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
static int kvm_put_msrs(X86CPU *cpu, int level)
{
CPUX86State *env = &cpu->env;
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entries[150];
- } msr_data;
- struct kvm_msr_entry *msrs = msr_data.entries;
+ struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
int n = 0, i;
+ kvm_msr_buf_reset(cpu);
+
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
@@ -1562,11 +1569,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
}
}
- msr_data.info = (struct kvm_msrs) {
- .nmsrs = n,
- };
+ cpu->kvm_msr_buf->nmsrs = n;
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
}
@@ -1775,13 +1780,11 @@ static int kvm_get_sregs(X86CPU *cpu)
static int kvm_get_msrs(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entries[150];
- } msr_data;
- struct kvm_msr_entry *msrs = msr_data.entries;
+ struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
int ret, i, n;
+ kvm_msr_buf_reset(cpu);
+
n = 0;
msrs[n++].index = MSR_IA32_SYSENTER_CS;
msrs[n++].index = MSR_IA32_SYSENTER_ESP;
@@ -1904,11 +1907,9 @@ static int kvm_get_msrs(X86CPU *cpu)
}
}
- msr_data.info = (struct kvm_msrs) {
- .nmsrs = n,
- };
+ cpu->kvm_msr_buf->nmsrs = n;
- ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
if (ret < 0) {
return ret;
}
--
2.1.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 2/5] target-i386: kvm: Increase MSR_BUF_SIZE
2015-12-16 19:06 [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 1/5] target-i386: kvm: Allocate kvm_msrs struct once per VCPU Eduardo Habkost
@ 2015-12-16 19:06 ` Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 3/5] target-i386: kvm: Simplify MSR array construction Eduardo Habkost
` (3 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Eduardo Habkost @ 2015-12-16 19:06 UTC (permalink / raw)
To: Paolo Bonzini, Marcelo Tosatti; +Cc: qemu-devel, kvm
We are dangerously close to the array limits in kvm_put_msrs()
and kvm_get_msrs(): with the default mcg_cap configuration, we
can set up to 148 MSRs in kvm_put_msrs(), and if we allow mcg_cap
to be changed, we can write up to 236 MSRs.
Use 4096 bytes for the buffer, that can hold 255 kvm_msr_entry
structs.
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
---
target-i386/kvm.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 660b2d9..1e82400 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -52,8 +52,9 @@
#define MSR_KVM_WALL_CLOCK 0x11
#define MSR_KVM_SYSTEM_TIME 0x12
-#define MSR_BUF_SIZE \
- (sizeof(struct kvm_msrs) + 150 * sizeof(struct kvm_msr_entry))
+/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
+ * 255 kvm_msr_entry structs */
+#define MSR_BUF_SIZE 4096
#ifndef BUS_MCEERR_AR
#define BUS_MCEERR_AR 4
--
2.1.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 3/5] target-i386: kvm: Simplify MSR array construction
2015-12-16 19:06 [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 1/5] target-i386: kvm: Allocate kvm_msrs struct once per VCPU Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 2/5] target-i386: kvm: Increase MSR_BUF_SIZE Eduardo Habkost
@ 2015-12-16 19:06 ` Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 4/5] target-i386: kvm: Simplify MSR setting functions Eduardo Habkost
` (2 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Eduardo Habkost @ 2015-12-16 19:06 UTC (permalink / raw)
To: Paolo Bonzini, Marcelo Tosatti; +Cc: qemu-devel, kvm
Add a helper function that appends new entries to the MSR buffer
and checks for the buffer size limit.
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
---
target-i386/kvm.c | 262 ++++++++++++++++++++++++++----------------------------
1 file changed, 125 insertions(+), 137 deletions(-)
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 1e82400..ada484f 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -1359,6 +1359,18 @@ static void kvm_msr_buf_reset(X86CPU *cpu)
memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
}
+static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
+{
+ struct kvm_msrs *msrs = cpu->kvm_msr_buf;
+ void *limit = ((void*)msrs) + MSR_BUF_SIZE;
+ struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
+
+ assert((void*)(entry + 1) <= limit);
+
+ kvm_msr_entry_set(entry, index, value);
+ msrs->nmsrs++;
+}
+
static int kvm_put_tscdeadline_msr(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
@@ -1407,46 +1419,45 @@ static int kvm_put_msr_feature_control(X86CPU *cpu)
static int kvm_put_msrs(X86CPU *cpu, int level)
{
CPUX86State *env = &cpu->env;
- struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
- int n = 0, i;
+ int i;
kvm_msr_buf_reset(cpu);
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
- kvm_msr_entry_set(&msrs[n++], MSR_PAT, env->pat);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
+ kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
if (has_msr_star) {
- kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
+ kvm_msr_entry_add(cpu, MSR_STAR, env->star);
}
if (has_msr_hsave_pa) {
- kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
+ kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
}
if (has_msr_tsc_aux) {
- kvm_msr_entry_set(&msrs[n++], MSR_TSC_AUX, env->tsc_aux);
+ kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
}
if (has_msr_tsc_adjust) {
- kvm_msr_entry_set(&msrs[n++], MSR_TSC_ADJUST, env->tsc_adjust);
+ kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
}
if (has_msr_misc_enable) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_MISC_ENABLE,
+ kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
env->msr_ia32_misc_enable);
}
if (has_msr_smbase) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_SMBASE, env->smbase);
+ kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
}
if (has_msr_bndcfgs) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_BNDCFGS, env->msr_bndcfgs);
+ kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
}
if (has_msr_xss) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_XSS, env->xss);
+ kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
}
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
- kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
- kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
- kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
- kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
+ kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
+ kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
+ kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
+ kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
}
#endif
/*
@@ -1454,106 +1465,89 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
* for normal writeback. Limit them to reset or full state updates.
*/
if (level >= KVM_PUT_RESET_STATE) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
- kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME,
- env->system_time_msr);
- kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
+ kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
if (has_msr_async_pf_en) {
- kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN,
- env->async_pf_en_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
}
if (has_msr_pv_eoi_en) {
- kvm_msr_entry_set(&msrs[n++], MSR_KVM_PV_EOI_EN,
- env->pv_eoi_en_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
}
if (has_msr_kvm_steal_time) {
- kvm_msr_entry_set(&msrs[n++], MSR_KVM_STEAL_TIME,
- env->steal_time_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
}
if (has_msr_architectural_pmu) {
/* Stop the counter. */
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
/* Set the counter values. */
for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR0 + i,
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
env->msr_fixed_counters[i]);
}
for (i = 0; i < num_architectural_pmu_counters; i++) {
- kvm_msr_entry_set(&msrs[n++], MSR_P6_PERFCTR0 + i,
+ kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
env->msr_gp_counters[i]);
- kvm_msr_entry_set(&msrs[n++], MSR_P6_EVNTSEL0 + i,
+ kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
env->msr_gp_evtsel[i]);
}
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_STATUS,
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
env->msr_global_status);
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_OVF_CTRL,
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
env->msr_global_ovf_ctrl);
/* Now start the PMU. */
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_FIXED_CTR_CTRL,
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
env->msr_fixed_ctr_ctrl);
- kvm_msr_entry_set(&msrs[n++], MSR_CORE_PERF_GLOBAL_CTRL,
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
env->msr_global_ctrl);
}
if (has_msr_hv_hypercall) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_GUEST_OS_ID,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
env->msr_hv_guest_os_id);
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_HYPERCALL,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
env->msr_hv_hypercall);
}
if (has_msr_hv_vapic) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
env->msr_hv_vapic);
}
if (has_msr_hv_tsc) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
- env->msr_hv_tsc);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
}
if (has_msr_hv_crash) {
int j;
for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_P0 + j,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
env->msr_hv_crash_params[j]);
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_CRASH_CTL,
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
HV_X64_MSR_CRASH_CTL_NOTIFY);
}
if (has_msr_hv_runtime) {
- kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
- env->msr_hv_runtime);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
}
if (has_msr_mtrr) {
- kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
+ kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRphysBase(i), env->mtrr_var[i].base);
- kvm_msr_entry_set(&msrs[n++],
- MSR_MTRRphysMask(i), env->mtrr_var[i].mask);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
+ env->mtrr_var[i].base);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i),
+ env->mtrr_var[i].mask);
}
}
@@ -1563,17 +1557,14 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
if (env->mcg_cap) {
int i;
- kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status);
- kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl);
+ kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
+ kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
- kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]);
+ kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
}
}
- cpu->kvm_msr_buf->nmsrs = n;
-
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
-
}
@@ -1782,134 +1773,131 @@ static int kvm_get_msrs(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
- int ret, i, n;
+ int ret, i;
kvm_msr_buf_reset(cpu);
- n = 0;
- msrs[n++].index = MSR_IA32_SYSENTER_CS;
- msrs[n++].index = MSR_IA32_SYSENTER_ESP;
- msrs[n++].index = MSR_IA32_SYSENTER_EIP;
- msrs[n++].index = MSR_PAT;
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
+ kvm_msr_entry_add(cpu, MSR_PAT, 0);
if (has_msr_star) {
- msrs[n++].index = MSR_STAR;
+ kvm_msr_entry_add(cpu, MSR_STAR, 0);
}
if (has_msr_hsave_pa) {
- msrs[n++].index = MSR_VM_HSAVE_PA;
+ kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
}
if (has_msr_tsc_aux) {
- msrs[n++].index = MSR_TSC_AUX;
+ kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
}
if (has_msr_tsc_adjust) {
- msrs[n++].index = MSR_TSC_ADJUST;
+ kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
}
if (has_msr_tsc_deadline) {
- msrs[n++].index = MSR_IA32_TSCDEADLINE;
+ kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
}
if (has_msr_misc_enable) {
- msrs[n++].index = MSR_IA32_MISC_ENABLE;
+ kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
}
if (has_msr_smbase) {
- msrs[n++].index = MSR_IA32_SMBASE;
+ kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
}
if (has_msr_feature_control) {
- msrs[n++].index = MSR_IA32_FEATURE_CONTROL;
+ kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
}
if (has_msr_bndcfgs) {
- msrs[n++].index = MSR_IA32_BNDCFGS;
+ kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
}
if (has_msr_xss) {
- msrs[n++].index = MSR_IA32_XSS;
+ kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
}
if (!env->tsc_valid) {
- msrs[n++].index = MSR_IA32_TSC;
+ kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
env->tsc_valid = !runstate_is_running();
}
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
- msrs[n++].index = MSR_CSTAR;
- msrs[n++].index = MSR_KERNELGSBASE;
- msrs[n++].index = MSR_FMASK;
- msrs[n++].index = MSR_LSTAR;
+ kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
+ kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
+ kvm_msr_entry_add(cpu, MSR_FMASK, 0);
+ kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
}
#endif
- msrs[n++].index = MSR_KVM_SYSTEM_TIME;
- msrs[n++].index = MSR_KVM_WALL_CLOCK;
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
if (has_msr_async_pf_en) {
- msrs[n++].index = MSR_KVM_ASYNC_PF_EN;
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
}
if (has_msr_pv_eoi_en) {
- msrs[n++].index = MSR_KVM_PV_EOI_EN;
+ kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
}
if (has_msr_kvm_steal_time) {
- msrs[n++].index = MSR_KVM_STEAL_TIME;
+ kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
}
if (has_msr_architectural_pmu) {
- msrs[n++].index = MSR_CORE_PERF_FIXED_CTR_CTRL;
- msrs[n++].index = MSR_CORE_PERF_GLOBAL_CTRL;
- msrs[n++].index = MSR_CORE_PERF_GLOBAL_STATUS;
- msrs[n++].index = MSR_CORE_PERF_GLOBAL_OVF_CTRL;
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
- msrs[n++].index = MSR_CORE_PERF_FIXED_CTR0 + i;
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
}
for (i = 0; i < num_architectural_pmu_counters; i++) {
- msrs[n++].index = MSR_P6_PERFCTR0 + i;
- msrs[n++].index = MSR_P6_EVNTSEL0 + i;
+ kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
+ kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
}
}
if (env->mcg_cap) {
- msrs[n++].index = MSR_MCG_STATUS;
- msrs[n++].index = MSR_MCG_CTL;
+ kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
- msrs[n++].index = MSR_MC0_CTL + i;
+ kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
}
}
if (has_msr_hv_hypercall) {
- msrs[n++].index = HV_X64_MSR_HYPERCALL;
- msrs[n++].index = HV_X64_MSR_GUEST_OS_ID;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
}
if (has_msr_hv_vapic) {
- msrs[n++].index = HV_X64_MSR_APIC_ASSIST_PAGE;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
}
if (has_msr_hv_tsc) {
- msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
}
if (has_msr_hv_crash) {
int j;
for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
- msrs[n++].index = HV_X64_MSR_CRASH_P0 + j;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
}
}
if (has_msr_hv_runtime) {
- msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
}
if (has_msr_mtrr) {
- msrs[n++].index = MSR_MTRRdefType;
- msrs[n++].index = MSR_MTRRfix64K_00000;
- msrs[n++].index = MSR_MTRRfix16K_80000;
- msrs[n++].index = MSR_MTRRfix16K_A0000;
- msrs[n++].index = MSR_MTRRfix4K_C0000;
- msrs[n++].index = MSR_MTRRfix4K_C8000;
- msrs[n++].index = MSR_MTRRfix4K_D0000;
- msrs[n++].index = MSR_MTRRfix4K_D8000;
- msrs[n++].index = MSR_MTRRfix4K_E0000;
- msrs[n++].index = MSR_MTRRfix4K_E8000;
- msrs[n++].index = MSR_MTRRfix4K_F0000;
- msrs[n++].index = MSR_MTRRfix4K_F8000;
+ kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
- msrs[n++].index = MSR_MTRRphysBase(i);
- msrs[n++].index = MSR_MTRRphysMask(i);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
}
}
- cpu->kvm_msr_buf->nmsrs = n;
-
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
if (ret < 0) {
return ret;
--
2.1.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 4/5] target-i386: kvm: Simplify MSR setting functions
2015-12-16 19:06 [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Eduardo Habkost
` (2 preceding siblings ...)
2015-12-16 19:06 ` [Qemu-devel] [PATCH 3/5] target-i386: kvm: Simplify MSR array construction Eduardo Habkost
@ 2015-12-16 19:06 ` Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 5/5] target-i386: kvm: Eliminate kvm_msr_entry_set() Eduardo Habkost
2015-12-16 21:38 ` [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Paolo Bonzini
5 siblings, 0 replies; 8+ messages in thread
From: Eduardo Habkost @ 2015-12-16 19:06 UTC (permalink / raw)
To: Paolo Bonzini, Marcelo Tosatti; +Cc: qemu-devel, kvm
Simplify kvm_put_tscdeadline_msr() and
kvm_put_msr_feature_control() using kvm_msr_buf and the
kvm_msr_entry_add() helper.
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
---
target-i386/kvm.c | 28 ++++++----------------------
1 file changed, 6 insertions(+), 22 deletions(-)
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index ada484f..3550866 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -1374,23 +1374,15 @@ static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
static int kvm_put_tscdeadline_msr(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entries[1];
- } msr_data;
- struct kvm_msr_entry *msrs = msr_data.entries;
if (!has_msr_tsc_deadline) {
return 0;
}
- kvm_msr_entry_set(&msrs[0], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
-
- msr_data.info = (struct kvm_msrs) {
- .nmsrs = 1,
- };
+ kvm_msr_buf_reset(cpu);
+ kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
}
/*
@@ -1401,19 +1393,11 @@ static int kvm_put_tscdeadline_msr(X86CPU *cpu)
*/
static int kvm_put_msr_feature_control(X86CPU *cpu)
{
- struct {
- struct kvm_msrs info;
- struct kvm_msr_entry entry;
- } msr_data;
-
- kvm_msr_entry_set(&msr_data.entry, MSR_IA32_FEATURE_CONTROL,
+ kvm_msr_buf_reset(cpu);
+ kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL,
cpu->env.msr_ia32_feature_control);
- msr_data.info = (struct kvm_msrs) {
- .nmsrs = 1,
- };
-
- return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
}
static int kvm_put_msrs(X86CPU *cpu, int level)
--
2.1.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Qemu-devel] [PATCH 5/5] target-i386: kvm: Eliminate kvm_msr_entry_set()
2015-12-16 19:06 [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Eduardo Habkost
` (3 preceding siblings ...)
2015-12-16 19:06 ` [Qemu-devel] [PATCH 4/5] target-i386: kvm: Simplify MSR setting functions Eduardo Habkost
@ 2015-12-16 19:06 ` Eduardo Habkost
2015-12-16 21:38 ` [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Paolo Bonzini
5 siblings, 0 replies; 8+ messages in thread
From: Eduardo Habkost @ 2015-12-16 19:06 UTC (permalink / raw)
To: Paolo Bonzini, Marcelo Tosatti; +Cc: qemu-devel, kvm
Inline the function inside kvm_msr_entry_add().
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
---
target-i386/kvm.c | 12 +++---------
1 file changed, 3 insertions(+), 9 deletions(-)
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 3550866..b328392 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -1346,14 +1346,6 @@ static int kvm_put_sregs(X86CPU *cpu)
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
}
-static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
- uint32_t index, uint64_t value)
-{
- entry->index = index;
- entry->reserved = 0;
- entry->data = value;
-}
-
static void kvm_msr_buf_reset(X86CPU *cpu)
{
memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
@@ -1367,7 +1359,9 @@ static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
assert((void*)(entry + 1) <= limit);
- kvm_msr_entry_set(entry, index, value);
+ entry->index = index;
+ entry->reserved = 0;
+ entry->data = value;
msrs->nmsrs++;
}
--
2.1.0
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun
2015-12-16 19:06 [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Eduardo Habkost
` (4 preceding siblings ...)
2015-12-16 19:06 ` [Qemu-devel] [PATCH 5/5] target-i386: kvm: Eliminate kvm_msr_entry_set() Eduardo Habkost
@ 2015-12-16 21:38 ` Paolo Bonzini
2016-01-23 15:11 ` Eduardo Habkost
5 siblings, 1 reply; 8+ messages in thread
From: Paolo Bonzini @ 2015-12-16 21:38 UTC (permalink / raw)
To: Eduardo Habkost, Marcelo Tosatti; +Cc: qemu-devel, kvm
On 16/12/2015 20:06, Eduardo Habkost wrote:
> We are dangerously close to the array limits in kvm_put_msrs()
> and kvm_get_msrs(): with the default mcg_cap configuration, we
> can set up to 148 MSRs in kvm_put_msrs(), and if we allow mcg_cap
> to be changed, we can write up to 236 MSRs[1].
>
> This series changes the code to allocate a buffer once per VCPU,
> increase buffer size to 4096 bytes (that can hold up to 255 MSR
> entries), and check array limits before appending new entries.
Thanks, it's a good improvement.
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
> [1] I have checked the limits by copying and pasting the
> kvm_put_msrs() code to a new file, replacing the "if" lines,
> copying the macro definitions, and adding a helper macro to
> keep track of the kvm_msr_entry_set() calls. The code can be
> seen at:
> https://gist.github.com/ehabkost/08d4177a33b8648a71ef
>
> Eduardo Habkost (5):
> target-i386: kvm: Allocate kvm_msrs struct once per VCPU
> target-i386: kvm: Increase MSR_BUF_SIZE
> target-i386: kvm: Simplify MSR array construction
> target-i386: kvm: Simplify MSR setting functions
> target-i386: kvm: Eliminate kvm_msr_entry_set()
>
> target-i386/cpu-qom.h | 4 +
> target-i386/kvm.c | 322 +++++++++++++++++++++++---------------------------
> 2 files changed, 149 insertions(+), 177 deletions(-)
>
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun
2015-12-16 21:38 ` [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Paolo Bonzini
@ 2016-01-23 15:11 ` Eduardo Habkost
0 siblings, 0 replies; 8+ messages in thread
From: Eduardo Habkost @ 2016-01-23 15:11 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: Marcelo Tosatti, qemu-devel, kvm
On Wed, Dec 16, 2015 at 10:38:25PM +0100, Paolo Bonzini wrote:
>
>
> On 16/12/2015 20:06, Eduardo Habkost wrote:
> > We are dangerously close to the array limits in kvm_put_msrs()
> > and kvm_get_msrs(): with the default mcg_cap configuration, we
> > can set up to 148 MSRs in kvm_put_msrs(), and if we allow mcg_cap
> > to be changed, we can write up to 236 MSRs[1].
> >
> > This series changes the code to allocate a buffer once per VCPU,
> > increase buffer size to 4096 bytes (that can hold up to 255 MSR
> > entries), and check array limits before appending new entries.
>
> Thanks, it's a good improvement.
>
> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Thanks, applied to x86 tree.
--
Eduardo
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2016-01-23 15:11 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-12-16 19:06 [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 1/5] target-i386: kvm: Allocate kvm_msrs struct once per VCPU Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 2/5] target-i386: kvm: Increase MSR_BUF_SIZE Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 3/5] target-i386: kvm: Simplify MSR array construction Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 4/5] target-i386: kvm: Simplify MSR setting functions Eduardo Habkost
2015-12-16 19:06 ` [Qemu-devel] [PATCH 5/5] target-i386: kvm: Eliminate kvm_msr_entry_set() Eduardo Habkost
2015-12-16 21:38 ` [Qemu-devel] [PATCH 0/5] target-i386: kvm: Increase MSR entry array limits, check for array overrun Paolo Bonzini
2016-01-23 15:11 ` Eduardo Habkost
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).