From: Paolo Bonzini <pbonzini@redhat.com>
To: Sunil Muthuswamy <sunilmut@microsoft.com>,
Richard Henderson <rth@twiddle.net>,
Eduardo Habkost <ehabkost@redhat.com>
Cc: Stefan Weil <sw@weilnetz.de>,
"qemu-devel@nongnu.org" <qemu-devel@nongnu.org>
Subject: Re: [PATCH] WHPX: vmware cpuid leaf for tsc and apic frequency
Date: Fri, 11 Sep 2020 17:40:44 +0200 [thread overview]
Message-ID: <8509c09e-20ba-9480-0966-7fe83cf8ca35@redhat.com> (raw)
In-Reply-To: <SN4PR2101MB08808DFDDC3F442BBEAADFF4C0710@SN4PR2101MB0880.namprd21.prod.outlook.com>
On 31/07/20 00:11, Sunil Muthuswamy wrote:
> Newer versions of WHPX provide the capability to query the tsc
> and apic frequency. Expose these through the vmware cpuid leaf.
> This patch doesnt support setting the tsc frequency; that will
> come as a separate fix.
>
> Signed-off-by: Sunil Muthuswamy <sunilmut@microsoft.com>
> ---
> target/i386/whp-dispatch.h | 3 +-
> target/i386/whpx-all.c | 94 ++++++++++++++++++++++++++++++++++----
> 2 files changed, 86 insertions(+), 11 deletions(-)
>
> diff --git a/target/i386/whp-dispatch.h b/target/i386/whp-dispatch.h
> index e4695c349f..b18aba20ed 100644
> --- a/target/i386/whp-dispatch.h
> +++ b/target/i386/whp-dispatch.h
> @@ -2,10 +2,11 @@
> #define WHP_DISPATCH_H
>
> #include <windows.h>
> -
> #include <WinHvPlatform.h>
> #include <WinHvEmulation.h>
>
> +#define WHV_E_UNKNOWN_CAPABILITY 0x80370300L
> +
> #define LIST_WINHVPLATFORM_FUNCTIONS(X) \
> X(HRESULT, WHvGetCapability, (WHV_CAPABILITY_CODE CapabilityCode, VOID* CapabilityBuffer, UINT32 CapabilityBufferSizeInBytes, UINT32* WrittenSizeInBytes)) \
> X(HRESULT, WHvCreatePartition, (WHV_PARTITION_HANDLE* Partition)) \
> diff --git a/target/i386/whpx-all.c b/target/i386/whpx-all.c
> index c78baac6df..da4c135925 100644
> --- a/target/i386/whpx-all.c
> +++ b/target/i386/whpx-all.c
> @@ -27,6 +27,8 @@
> #include <WinHvPlatform.h>
> #include <WinHvEmulation.h>
>
> +#define HYPERV_APIC_BUS_FREQUENCY (200000000ULL)
> +
> struct whpx_state {
> uint64_t mem_quota;
> WHV_PARTITION_HANDLE partition;
> @@ -1061,6 +1063,18 @@ static int whpx_vcpu_run(CPUState *cpu)
> cpu_x86_cpuid(env, cpuid_fn, 0, (UINT32 *)&rax, (UINT32 *)&rbx,
> (UINT32 *)&rcx, (UINT32 *)&rdx);
> switch (cpuid_fn) {
> + case 0x40000000:
> + /* Expose the vmware cpu frequency cpuid leaf */
> + rax = 0x40000010;
> + rbx = rcx = rdx = 0;
> + break;
> +
> + case 0x40000010:
> + rax = env->tsc_khz;
> + rbx = env->apic_bus_freq / 1000; /* Hz to KHz */
> + rcx = rdx = 0;
> + break;
> +
> case 0x80000001:
> /* Remove any support of OSVW */
> rcx &= ~CPUID_EXT3_OSVW;
> @@ -1193,6 +1207,10 @@ int whpx_init_vcpu(CPUState *cpu)
> struct whpx_state *whpx = &whpx_global;
> struct whpx_vcpu *vcpu;
> Error *local_error = NULL;
> + struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr);
> + X86CPU *x86_cpu = X86_CPU(cpu);
> + UINT64 freq = 0;
> + int ret;
>
> /* Add migration blockers for all unsupported features of the
> * Windows Hypervisor Platform
> @@ -1207,7 +1225,8 @@ int whpx_init_vcpu(CPUState *cpu)
> error_report_err(local_error);
> migrate_del_blocker(whpx_migration_blocker);
> error_free(whpx_migration_blocker);
> - return -EINVAL;
> + ret = -EINVAL;
> + goto error;
> }
> }
>
> @@ -1215,7 +1234,8 @@ int whpx_init_vcpu(CPUState *cpu)
>
> if (!vcpu) {
> error_report("WHPX: Failed to allocte VCPU context.");
> - return -ENOMEM;
> + ret = -ENOMEM;
> + goto error;
> }
>
> hr = whp_dispatch.WHvEmulatorCreateEmulator(
> @@ -1224,8 +1244,8 @@ int whpx_init_vcpu(CPUState *cpu)
> if (FAILED(hr)) {
> error_report("WHPX: Failed to setup instruction completion support,"
> " hr=%08lx", hr);
> - g_free(vcpu);
> - return -EINVAL;
> + ret = -EINVAL;
> + goto error;
> }
>
> hr = whp_dispatch.WHvCreateVirtualProcessor(
> @@ -1234,17 +1254,72 @@ int whpx_init_vcpu(CPUState *cpu)
> error_report("WHPX: Failed to create a virtual processor,"
> " hr=%08lx", hr);
> whp_dispatch.WHvEmulatorDestroyEmulator(vcpu->emulator);
> - g_free(vcpu);
> - return -EINVAL;
> + ret = -EINVAL;
> + goto error;
> }
>
> - vcpu->interruptable = true;
> + /*
> + * vcpu's TSC frequency is either specified by user, or use the value
> + * provided by Hyper-V if the former is not present. In the latter case, we
> + * query it from Hyper-V and record in env->tsc_khz, so that vcpu's TSC
> + * frequency can be migrated later via this field.
> + */
> + if (!env->tsc_khz) {
> + hr = whp_dispatch.WHvGetCapability(
> + WHvCapabilityCodeProcessorClockFrequency, &freq, sizeof(freq),
> + NULL);
> + if (hr != WHV_E_UNKNOWN_CAPABILITY) {
> + if (FAILED(hr)) {
> + printf("WHPX: Failed to query tsc frequency, hr=0x%08lx\n", hr);
> + } else {
> + env->tsc_khz = freq / 1000; /* Hz to KHz */
> + }
> + }
> + }
>
> + env->apic_bus_freq = HYPERV_APIC_BUS_FREQUENCY;
> + hr = whp_dispatch.WHvGetCapability(
> + WHvCapabilityCodeInterruptClockFrequency, &freq, sizeof(freq), NULL);
> + if (hr != WHV_E_UNKNOWN_CAPABILITY) {
> + if (FAILED(hr)) {
> + printf("WHPX: Failed to query apic bus frequency hr=0x%08lx\n", hr);
> + } else {
> + env->apic_bus_freq = freq;
> + }
> + }
> +
> + /*
> + * If the vmware cpuid frequency leaf option is set, and we have a valid
> + * tsc value, trap the corresponding cpuid's.
> + */
> + if (x86_cpu->vmware_cpuid_freq && env->tsc_khz) {
> + UINT32 cpuidExitList[] = {1, 0x80000001, 0x40000000, 0x40000010};
> +
> + hr = whp_dispatch.WHvSetPartitionProperty(
> + whpx->partition,
> + WHvPartitionPropertyCodeCpuidExitList,
> + cpuidExitList,
> + RTL_NUMBER_OF(cpuidExitList) * sizeof(UINT32));
> +
> + if (FAILED(hr)) {
> + error_report("WHPX: Failed to set partition CpuidExitList hr=%08lx",
> + hr);
> + ret = -EINVAL;
> + goto error;
> + }
> + }
> +
> + vcpu->interruptable = true;
> cpu->vcpu_dirty = true;
> cpu->hax_vcpu = (struct hax_vcpu_state *)vcpu;
> qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr);
>
> return 0;
> +
> +error:
> + g_free(vcpu);
> +
> + return ret;
> }
>
> int whpx_vcpu_exec(CPUState *cpu)
> @@ -1493,6 +1568,7 @@ static int whpx_accel_init(MachineState *ms)
> WHV_CAPABILITY whpx_cap;
> UINT32 whpx_cap_size;
> WHV_PARTITION_PROPERTY prop;
> + UINT32 cpuidExitList[] = {1, 0x80000001};
>
> whpx = &whpx_global;
>
> @@ -1551,7 +1627,6 @@ static int whpx_accel_init(MachineState *ms)
> goto error;
> }
>
> - UINT32 cpuidExitList[] = {1, 0x80000001};
> hr = whp_dispatch.WHvSetPartitionProperty(
> whpx->partition,
> WHvPartitionPropertyCodeCpuidExitList,
> @@ -1579,14 +1654,13 @@ static int whpx_accel_init(MachineState *ms)
> printf("Windows Hypervisor Platform accelerator is operational\n");
> return 0;
>
> - error:
> +error:
>
> if (NULL != whpx->partition) {
> whp_dispatch.WHvDeletePartition(whpx->partition);
> whpx->partition = NULL;
> }
>
> -
> return ret;
> }
>
>
Queued, thanks.
Paolo
prev parent reply other threads:[~2020-09-11 15:42 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-30 22:11 [PATCH] WHPX: vmware cpuid leaf for tsc and apic frequency Sunil Muthuswamy
2020-08-31 23:32 ` Sunil Muthuswamy
2020-09-11 15:41 ` Paolo Bonzini
2020-09-11 15:40 ` Paolo Bonzini [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=8509c09e-20ba-9480-0966-7fe83cf8ca35@redhat.com \
--to=pbonzini@redhat.com \
--cc=ehabkost@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=rth@twiddle.net \
--cc=sunilmut@microsoft.com \
--cc=sw@weilnetz.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).