From: Fabiano Rosas <farosas@linux.ibm.com>
To: Nicholas Piggin <npiggin@gmail.com>, kvm-ppc@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org, Nicholas Piggin <npiggin@gmail.com>
Subject: Re: [PATCH v2 24/37] KVM: PPC: Book3S HV P9: inline kvmhv_load_hv_regs_and_go into __kvmhv_vcpu_entry_p9
Date: Tue, 02 Mar 2021 10:48:35 -0300 [thread overview]
Message-ID: <87tuptwtsc.fsf@linux.ibm.com> (raw)
In-Reply-To: <20210225134652.2127648-25-npiggin@gmail.com>
Nicholas Piggin <npiggin@gmail.com> writes:
> Now the initial C implementation is done, inline more HV code to make
> rearranging things easier.
>
> And rename __kvmhv_vcpu_entry_p9 to drop the leading underscores as it's
> now C, and is now a more complete vcpu entry.
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
> ---
> arch/powerpc/include/asm/kvm_book3s_64.h | 2 +-
> arch/powerpc/kvm/book3s_hv.c | 181 +----------------------
> arch/powerpc/kvm/book3s_hv_interrupt.c | 168 ++++++++++++++++++++-
> 3 files changed, 169 insertions(+), 182 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
> index c214bcffb441..eaf3a562bf1e 100644
> --- a/arch/powerpc/include/asm/kvm_book3s_64.h
> +++ b/arch/powerpc/include/asm/kvm_book3s_64.h
> @@ -153,7 +153,7 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
> return radix;
> }
>
> -int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
> +int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
>
> #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
> #endif
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 28a2761515e3..f99503acdda5 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -3442,183 +3442,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
> trace_kvmppc_run_core(vc, 1);
> }
>
> -static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
> -{
> - struct kvmppc_vcore *vc = vcpu->arch.vcore;
> - struct kvm_nested_guest *nested = vcpu->arch.nested;
> - u32 lpid;
> -
> - lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
> -
> - mtspr(SPRN_LPID, lpid);
> - mtspr(SPRN_LPCR, lpcr);
> - mtspr(SPRN_PID, vcpu->arch.pid);
> - isync();
> -
> - /* TLBIEL must have LPIDR set, so set guest LPID before flushing. */
> - kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
> -}
> -
> -static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
> -{
> - mtspr(SPRN_PID, pid);
> - mtspr(SPRN_LPID, kvm->arch.host_lpid);
> - mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
> - isync();
> -}
> -
> -/*
> - * Load up hypervisor-mode registers on P9.
> - */
> -static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
> - unsigned long lpcr)
> -{
> - struct kvm *kvm = vcpu->kvm;
> - struct kvmppc_vcore *vc = vcpu->arch.vcore;
> - s64 hdec;
> - u64 tb, purr, spurr;
> - int trap;
> - unsigned long host_hfscr = mfspr(SPRN_HFSCR);
> - unsigned long host_ciabr = mfspr(SPRN_CIABR);
> - unsigned long host_dawr0 = mfspr(SPRN_DAWR0);
> - unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0);
> - unsigned long host_psscr = mfspr(SPRN_PSSCR);
> - unsigned long host_pidr = mfspr(SPRN_PID);
> - unsigned long host_dawr1 = 0;
> - unsigned long host_dawrx1 = 0;
> -
> - if (cpu_has_feature(CPU_FTR_DAWR1)) {
> - host_dawr1 = mfspr(SPRN_DAWR1);
> - host_dawrx1 = mfspr(SPRN_DAWRX1);
> - }
> -
> - tb = mftb();
> - hdec = time_limit - tb;
> - if (hdec < 0)
> - return BOOK3S_INTERRUPT_HV_DECREMENTER;
> -
> - if (vc->tb_offset) {
> - u64 new_tb = tb + vc->tb_offset;
> - mtspr(SPRN_TBU40, new_tb);
> - tb = mftb();
> - if ((tb & 0xffffff) < (new_tb & 0xffffff))
> - mtspr(SPRN_TBU40, new_tb + 0x1000000);
> - vc->tb_offset_applied = vc->tb_offset;
> - }
> -
> - if (vc->pcr)
> - mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
> - mtspr(SPRN_DPDES, vc->dpdes);
> - mtspr(SPRN_VTB, vc->vtb);
> -
> - local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
> - local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
> - mtspr(SPRN_PURR, vcpu->arch.purr);
> - mtspr(SPRN_SPURR, vcpu->arch.spurr);
> -
> - if (dawr_enabled()) {
> - mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
> - mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
> - if (cpu_has_feature(CPU_FTR_DAWR1)) {
> - mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
> - mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
> - }
> - }
> - mtspr(SPRN_CIABR, vcpu->arch.ciabr);
> - mtspr(SPRN_IC, vcpu->arch.ic);
> -
> - mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
> - (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
> -
> - mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
> -
> - mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
> - mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
> - mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
> - mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
> -
> - mtspr(SPRN_AMOR, ~0UL);
> -
> - switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
> -
> - /*
> - * P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
> - * so set guest LPCR (with HDICE) before writing HDEC.
> - */
> - mtspr(SPRN_HDEC, hdec);
> -
> - mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
> - mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
> -
> - trap = __kvmhv_vcpu_entry_p9(vcpu);
> -
> - /* Advance host PURR/SPURR by the amount used by guest */
> - purr = mfspr(SPRN_PURR);
> - spurr = mfspr(SPRN_SPURR);
> - mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
> - purr - vcpu->arch.purr);
> - mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
> - spurr - vcpu->arch.spurr);
> - vcpu->arch.purr = purr;
> - vcpu->arch.spurr = spurr;
> -
> - vcpu->arch.ic = mfspr(SPRN_IC);
> - vcpu->arch.pid = mfspr(SPRN_PID);
> - vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
> -
> - vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
> - vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
> - vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
> - vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
> -
> - /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
> - mtspr(SPRN_PSSCR, host_psscr |
> - (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
> - mtspr(SPRN_HFSCR, host_hfscr);
> - mtspr(SPRN_CIABR, host_ciabr);
> - mtspr(SPRN_DAWR0, host_dawr0);
> - mtspr(SPRN_DAWRX0, host_dawrx0);
> - if (cpu_has_feature(CPU_FTR_DAWR1)) {
> - mtspr(SPRN_DAWR1, host_dawr1);
> - mtspr(SPRN_DAWRX1, host_dawrx1);
> - }
> -
> - /*
> - * Since this is radix, do a eieio; tlbsync; ptesync sequence in
> - * case we interrupted the guest between a tlbie and a ptesync.
> - */
> - asm volatile("eieio; tlbsync; ptesync");
> -
> - /*
> - * cp_abort is required if the processor supports local copy-paste
> - * to clear the copy buffer that was under control of the guest.
> - */
> - if (cpu_has_feature(CPU_FTR_ARCH_31))
> - asm volatile(PPC_CP_ABORT);
> -
> - vc->dpdes = mfspr(SPRN_DPDES);
> - vc->vtb = mfspr(SPRN_VTB);
> - mtspr(SPRN_DPDES, 0);
> - if (vc->pcr)
> - mtspr(SPRN_PCR, PCR_MASK);
> -
> - if (vc->tb_offset_applied) {
> - u64 new_tb = mftb() - vc->tb_offset_applied;
> - mtspr(SPRN_TBU40, new_tb);
> - tb = mftb();
> - if ((tb & 0xffffff) < (new_tb & 0xffffff))
> - mtspr(SPRN_TBU40, new_tb + 0x1000000);
> - vc->tb_offset_applied = 0;
> - }
> -
> - /* HDEC must be at least as large as DEC, so decrementer_max fits */
> - mtspr(SPRN_HDEC, decrementer_max);
> -
> - switch_mmu_to_host_radix(kvm, host_pidr);
> -
> - return trap;
> -}
> -
> /*
> * Virtual-mode guest entry for POWER9 and later when the host and
> * guest are both using the radix MMU. The LPIDR has already been set.
> @@ -3710,7 +3533,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
> * We need to save and restore the guest visible part of the
> * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
> * doesn't do this for us. Note only required if pseries since
> - * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
> + * this is done in kvmhv_vcpu_entry_p9() below otherwise.
> */
> unsigned long host_psscr;
> /* call our hypervisor to load up HV regs and go */
> @@ -3748,7 +3571,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
>
> } else {
> kvmppc_xive_push_vcpu(vcpu);
> - trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
> + trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr);
> /* H_CEDE has to be handled now, not later */
> /* XICS hcalls must be handled before xive is pulled */
> if (trap == BOOK3S_INTERRUPT_SYSCALL &&
> diff --git a/arch/powerpc/kvm/book3s_hv_interrupt.c b/arch/powerpc/kvm/book3s_hv_interrupt.c
> index 5a7b036c447f..dea3eca3648a 100644
> --- a/arch/powerpc/kvm/book3s_hv_interrupt.c
> +++ b/arch/powerpc/kvm/book3s_hv_interrupt.c
> @@ -55,6 +55,31 @@ static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator
> #define accumulate_time(vcpu, next) do {} while (0)
> #endif
>
> +static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
> +{
> + struct kvmppc_vcore *vc = vcpu->arch.vcore;
> + struct kvm_nested_guest *nested = vcpu->arch.nested;
> + u32 lpid;
> +
> + lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
> +
> + mtspr(SPRN_LPID, lpid);
> + mtspr(SPRN_LPCR, lpcr);
> + mtspr(SPRN_PID, vcpu->arch.pid);
> + isync();
> +
> + /* TLBIEL must have LPIDR set, so set guest LPID before flushing. */
> + kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
> +}
> +
> +static void switch_mmu_to_host_radix(struct kvm *kvm, u32 pid)
> +{
> + mtspr(SPRN_PID, pid);
> + mtspr(SPRN_LPID, kvm->arch.host_lpid);
> + mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
> + isync();
> +}
> +
> static inline void mfslb(unsigned int idx, u64 *slbee, u64 *slbev)
> {
> asm volatile("slbmfev %0,%1" : "=r" (*slbev) : "r" (idx));
> @@ -94,11 +119,86 @@ static void radix_clear_slb(void)
> }
> }
>
> -int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu)
> +int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr)
> {
> + struct kvm *kvm = vcpu->kvm;
> + struct kvmppc_vcore *vc = vcpu->arch.vcore;
> + s64 hdec;
> + u64 tb, purr, spurr;
> u64 *exsave;
> unsigned long msr = mfmsr();
> int trap;
> + unsigned long host_hfscr = mfspr(SPRN_HFSCR);
> + unsigned long host_ciabr = mfspr(SPRN_CIABR);
> + unsigned long host_dawr0 = mfspr(SPRN_DAWR0);
> + unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0);
> + unsigned long host_psscr = mfspr(SPRN_PSSCR);
> + unsigned long host_pidr = mfspr(SPRN_PID);
> + unsigned long host_dawr1 = 0;
> + unsigned long host_dawrx1 = 0;
> +
> + if (cpu_has_feature(CPU_FTR_DAWR1)) {
> + host_dawr1 = mfspr(SPRN_DAWR1);
> + host_dawrx1 = mfspr(SPRN_DAWRX1);
> + }
> +
> + tb = mftb();
> + hdec = time_limit - tb;
> + if (hdec < 0)
> + return BOOK3S_INTERRUPT_HV_DECREMENTER;
> +
> + if (vc->tb_offset) {
> + u64 new_tb = tb + vc->tb_offset;
> + mtspr(SPRN_TBU40, new_tb);
> + tb = mftb();
> + if ((tb & 0xffffff) < (new_tb & 0xffffff))
> + mtspr(SPRN_TBU40, new_tb + 0x1000000);
> + vc->tb_offset_applied = vc->tb_offset;
> + }
> +
> + if (vc->pcr)
> + mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
> + mtspr(SPRN_DPDES, vc->dpdes);
> + mtspr(SPRN_VTB, vc->vtb);
> +
> + local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
> + local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
> + mtspr(SPRN_PURR, vcpu->arch.purr);
> + mtspr(SPRN_SPURR, vcpu->arch.spurr);
> +
> + if (dawr_enabled()) {
> + mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
> + mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
> + if (cpu_has_feature(CPU_FTR_DAWR1)) {
> + mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
> + mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
> + }
> + }
> + mtspr(SPRN_CIABR, vcpu->arch.ciabr);
> + mtspr(SPRN_IC, vcpu->arch.ic);
> +
> + mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
> + (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
> +
> + mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
> +
> + mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
> + mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
> + mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
> + mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
> +
> + mtspr(SPRN_AMOR, ~0UL);
> +
> + switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
> +
> + /*
> + * P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
> + * so set guest LPCR (with HDICE) before writing HDEC.
> + */
> + mtspr(SPRN_HDEC, hdec);
> +
> + mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
> + mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
>
> start_timing(vcpu, &vcpu->arch.rm_entry);
>
> @@ -216,6 +316,70 @@ int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu)
>
> end_timing(vcpu);
>
> + /* Advance host PURR/SPURR by the amount used by guest */
> + purr = mfspr(SPRN_PURR);
> + spurr = mfspr(SPRN_SPURR);
> + mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
> + purr - vcpu->arch.purr);
> + mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
> + spurr - vcpu->arch.spurr);
> + vcpu->arch.purr = purr;
> + vcpu->arch.spurr = spurr;
> +
> + vcpu->arch.ic = mfspr(SPRN_IC);
> + vcpu->arch.pid = mfspr(SPRN_PID);
> + vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
> +
> + vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
> + vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
> + vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
> + vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
> +
> + /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
> + mtspr(SPRN_PSSCR, host_psscr |
> + (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
> + mtspr(SPRN_HFSCR, host_hfscr);
> + mtspr(SPRN_CIABR, host_ciabr);
> + mtspr(SPRN_DAWR0, host_dawr0);
> + mtspr(SPRN_DAWRX0, host_dawrx0);
> + if (cpu_has_feature(CPU_FTR_DAWR1)) {
> + mtspr(SPRN_DAWR1, host_dawr1);
> + mtspr(SPRN_DAWRX1, host_dawrx1);
> + }
> +
> + /*
> + * Since this is radix, do a eieio; tlbsync; ptesync sequence in
> + * case we interrupted the guest between a tlbie and a ptesync.
> + */
> + asm volatile("eieio; tlbsync; ptesync");
> +
> + /*
> + * cp_abort is required if the processor supports local copy-paste
> + * to clear the copy buffer that was under control of the guest.
> + */
> + if (cpu_has_feature(CPU_FTR_ARCH_31))
> + asm volatile(PPC_CP_ABORT);
> +
> + vc->dpdes = mfspr(SPRN_DPDES);
> + vc->vtb = mfspr(SPRN_VTB);
> + mtspr(SPRN_DPDES, 0);
> + if (vc->pcr)
> + mtspr(SPRN_PCR, PCR_MASK);
> +
> + if (vc->tb_offset_applied) {
> + u64 new_tb = mftb() - vc->tb_offset_applied;
> + mtspr(SPRN_TBU40, new_tb);
> + tb = mftb();
> + if ((tb & 0xffffff) < (new_tb & 0xffffff))
> + mtspr(SPRN_TBU40, new_tb + 0x1000000);
> + vc->tb_offset_applied = 0;
> + }
> +
> + /* HDEC must be at least as large as DEC, so decrementer_max fits */
> + mtspr(SPRN_HDEC, decrementer_max);
> +
> + switch_mmu_to_host_radix(kvm, host_pidr);
> +
> return trap;
> }
> -EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9);
> +EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);
next prev parent reply other threads:[~2021-03-02 13:49 UTC|newest]
Thread overview: 67+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-25 13:46 [PATCH v2 00/37] KVM: PPC: Book3S: C-ify the P9 entry/exit code Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 01/37] KVM: PPC: Book3S 64: remove unused kvmppc_h_protect argument Nicholas Piggin
2021-02-26 5:01 ` Daniel Axtens
2021-02-26 23:50 ` Nicholas Piggin
2021-03-05 4:45 ` Daniel Axtens
2021-02-25 13:46 ` [PATCH v2 02/37] KVM: PPC: Book3S HV: Fix CONFIG_SPAPR_TCE_IOMMU=n default hcalls Nicholas Piggin
2021-02-26 5:21 ` Daniel Axtens
2021-02-25 13:46 ` [PATCH v2 03/37] powerpc/64s: Remove KVM handler support from CBE_RAS interrupts Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 04/37] powerpc/64s: remove KVM SKIP test from instruction breakpoint handler Nicholas Piggin
2021-02-26 5:44 ` Daniel Axtens
2021-02-26 23:51 ` Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 05/37] KVM: PPC: Book3S HV: Ensure MSR[ME] is always set in guest MSR Nicholas Piggin
2021-02-26 6:06 ` Daniel Axtens
2021-02-26 23:55 ` Nicholas Piggin
2021-03-05 4:50 ` Daniel Axtens
2021-02-25 13:46 ` [PATCH v2 06/37] KVM: PPC: Book3S 64: move KVM interrupt entry to a common entry point Nicholas Piggin
2021-03-05 5:03 ` Daniel Axtens
2021-02-25 13:46 ` [PATCH v2 07/37] KVM: PPC: Book3S 64: Move GUEST_MODE_SKIP test into KVM Nicholas Piggin
2021-03-05 5:54 ` Daniel Axtens
2021-02-25 13:46 ` [PATCH v2 08/37] KVM: PPC: Book3S 64: add hcall interrupt handler Nicholas Piggin
2021-03-05 6:03 ` Daniel Axtens
2021-02-25 13:46 ` [PATCH v2 09/37] KVM: PPC: Book3S 64: Move hcall early register setup to KVM Nicholas Piggin
2021-03-05 6:48 ` Daniel Axtens
2021-02-25 13:46 ` [PATCH v2 10/37] KVM: PPC: Book3S 64: Move interrupt " Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 11/37] KVM: PPC: Book3S 64: move bad_host_intr check to HV handler Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 12/37] KVM: PPC: Book3S 64: Minimise hcall handler calling convention differences Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 13/37] KVM: PPC: Book3S HV P9: Move radix MMU switching instructions together Nicholas Piggin
2021-02-26 15:56 ` Fabiano Rosas
2021-02-26 23:57 ` Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 14/37] KVM: PPC: Book3S HV P9: implement kvmppc_xive_pull_vcpu in C Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 15/37] KVM: PPC: Book3S HV P9: Move xive vcpu context management into kvmhv_p9_guest_entry Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 16/37] KVM: PPC: Book3S HV P9: Stop handling hcalls in real-mode in the P9 path Nicholas Piggin
2021-02-25 14:51 ` Cédric Le Goater
2021-02-26 23:59 ` Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 17/37] KVM: PPC: Book3S HV P9: Move setting HDEC after switching to guest LPCR Nicholas Piggin
2021-02-26 16:38 ` Fabiano Rosas
2021-02-26 23:59 ` Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 18/37] KVM: PPC: Book3S HV P9: Use large decrementer for HDEC Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 19/37] KVM: PPC: Book3S HV P9: Use host timer accounting to avoid decrementer read Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 20/37] KVM: PPC: Book3S HV P9: Reduce mftb per guest entry/exit Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 21/37] powerpc: add set_dec_or_work API for safely updating decrementer Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 22/37] KVM: PPC: Book3S HV P9: Reduce irq_work vs guest decrementer races Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 23/37] KVM: PPC: Book3S HV P9: Implement the rest of the P9 path in C Nicholas Piggin
2021-02-26 22:37 ` Fabiano Rosas
2021-02-27 0:21 ` Nicholas Piggin
2021-02-27 0:55 ` Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 24/37] KVM: PPC: Book3S HV P9: inline kvmhv_load_hv_regs_and_go into __kvmhv_vcpu_entry_p9 Nicholas Piggin
2021-03-02 13:48 ` Fabiano Rosas [this message]
2021-02-25 13:46 ` [PATCH v2 25/37] KVM: PPC: Book3S HV P9: Read machine check registers while MSR[RI] is 0 Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 26/37] KVM: PPC: Book3S HV P9: Improve exit timing accounting coverage Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 27/37] KVM: PPC: Book3S HV P9: Move SPR loading after expiry time check Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 28/37] KVM: PPC: Book3S HV P9: Add helpers for OS SPR handling Nicholas Piggin
2021-03-02 15:04 ` Fabiano Rosas
2021-03-04 11:02 ` Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 29/37] KVM: PPC: Book3S HV P9: Switch to guest MMU context as late as possible Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 30/37] KVM: PPC: Book3S HV: Implement radix prefetch workaround by disabling MMU Nicholas Piggin
2021-03-02 21:21 ` Fabiano Rosas
2021-03-04 11:04 ` Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 31/37] KVM: PPC: Book3S HV: Remove support for dependent threads mode on P9 Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 32/37] KVM: PPC: Book3S HV: Remove radix guest support from P7/8 path Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 33/37] KVM: PPC: Book3S HV: small pseries_do_hcall cleanup Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 34/37] KVM: PPC: Book3S HV: add virtual mode handlers for HPT hcalls and page faults Nicholas Piggin
2021-03-03 20:09 ` Fabiano Rosas
2021-03-04 11:05 ` Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 35/37] KVM: PPC: Book3S HV P9: implement hash guest support Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 36/37] KVM: PPC: Book3S HV P9: implement hash host / " Nicholas Piggin
2021-02-25 13:46 ` [PATCH v2 37/37] KVM: PPC: Book3S HV: remove POWER9 support from P7/8 paths Nicholas Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87tuptwtsc.fsf@linux.ibm.com \
--to=farosas@linux.ibm.com \
--cc=kvm-ppc@vger.kernel.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=npiggin@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).