* [PATCH] KVM: x86: optimize steal time calculation
@ 2016-03-16 11:33 Liang Chen
2016-03-24 11:08 ` Paolo Bonzini
0 siblings, 1 reply; 2+ messages in thread
From: Liang Chen @ 2016-03-16 11:33 UTC (permalink / raw)
To: pbonzini; +Cc: kvm, linux-kernel, Liang Chen, Gavin Guo
Since accumulate_steal_time is now only called in record_steal_time, it
doesn't quite make sense to put the delta calculation in a separate
function. The function could be called thousands of times before guest
enables the steal time MSR (though the compiler may optimize out this
function call). And after it's enabled, the MSR enable bit is tested twice
every time. Removing the accumulate_steal_time function also avoids the
necessity of having the accum_steal field.
Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
Signed-off-by: Gavin Guo <gavin.guo@canonical.com>
---
arch/x86/include/asm/kvm_host.h | 1 -
arch/x86/kvm/x86.c | 19 +++----------------
2 files changed, 3 insertions(+), 17 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 44adbb8..3f460a7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -547,7 +547,6 @@ struct kvm_vcpu_arch {
struct {
u64 msr_val;
u64 last_steal;
- u64 accum_steal;
struct gfn_to_hva_cache stime;
struct kvm_steal_time steal;
} st;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eaf6ee8..82bfefc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2002,22 +2002,8 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
vcpu->arch.pv_time_enabled = false;
}
-static void accumulate_steal_time(struct kvm_vcpu *vcpu)
-{
- u64 delta;
-
- if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
- return;
-
- delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
- vcpu->arch.st.last_steal = current->sched_info.run_delay;
- vcpu->arch.st.accum_steal = delta;
-}
-
static void record_steal_time(struct kvm_vcpu *vcpu)
{
- accumulate_steal_time(vcpu);
-
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
@@ -2025,9 +2011,10 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
return;
- vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
+ vcpu->arch.st.steal.steal += current->sched_info.run_delay -
+ vcpu->arch.st.last_steal;
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
vcpu->arch.st.steal.version += 2;
- vcpu->arch.st.accum_steal = 0;
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
--
1.9.1
^ permalink raw reply related [flat|nested] 2+ messages in thread* Re: [PATCH] KVM: x86: optimize steal time calculation
2016-03-16 11:33 [PATCH] KVM: x86: optimize steal time calculation Liang Chen
@ 2016-03-24 11:08 ` Paolo Bonzini
0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2016-03-24 11:08 UTC (permalink / raw)
To: Liang Chen; +Cc: kvm, linux-kernel, Gavin Guo
On 16/03/2016 12:33, Liang Chen wrote:
> Since accumulate_steal_time is now only called in record_steal_time, it
> doesn't quite make sense to put the delta calculation in a separate
> function. The function could be called thousands of times before guest
> enables the steal time MSR (though the compiler may optimize out this
> function call). And after it's enabled, the MSR enable bit is tested twice
> every time. Removing the accumulate_steal_time function also avoids the
> necessity of having the accum_steal field.
>
> Signed-off-by: Liang Chen <liangchen.linux@gmail.com>
> Signed-off-by: Gavin Guo <gavin.guo@canonical.com>
> ---
> arch/x86/include/asm/kvm_host.h | 1 -
> arch/x86/kvm/x86.c | 19 +++----------------
> 2 files changed, 3 insertions(+), 17 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 44adbb8..3f460a7 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -547,7 +547,6 @@ struct kvm_vcpu_arch {
> struct {
> u64 msr_val;
> u64 last_steal;
> - u64 accum_steal;
> struct gfn_to_hva_cache stime;
> struct kvm_steal_time steal;
> } st;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index eaf6ee8..82bfefc 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -2002,22 +2002,8 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
> vcpu->arch.pv_time_enabled = false;
> }
>
> -static void accumulate_steal_time(struct kvm_vcpu *vcpu)
> -{
> - u64 delta;
> -
> - if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
> - return;
> -
> - delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
> - vcpu->arch.st.last_steal = current->sched_info.run_delay;
> - vcpu->arch.st.accum_steal = delta;
> -}
> -
> static void record_steal_time(struct kvm_vcpu *vcpu)
> {
> - accumulate_steal_time(vcpu);
> -
> if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
> return;
>
> @@ -2025,9 +2011,10 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
> &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
> return;
>
> - vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
> + vcpu->arch.st.steal.steal += current->sched_info.run_delay -
> + vcpu->arch.st.last_steal;
> + vcpu->arch.st.last_steal = current->sched_info.run_delay;
> vcpu->arch.st.steal.version += 2;
> - vcpu->arch.st.accum_steal = 0;
>
> kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
> &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
>
Nice cleanup; queued for 4.7, thanks.
Paolo
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2016-03-24 11:08 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-03-16 11:33 [PATCH] KVM: x86: optimize steal time calculation Liang Chen
2016-03-24 11:08 ` Paolo Bonzini
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox