* [PATCH V3] KVM: PPC: BOOK3S: PR: Enable Little Endian PR guest
@ 2014-01-28 6:46 Aneesh Kumar K.V
2014-02-06 15:46 ` Alexander Graf
0 siblings, 1 reply; 2+ messages in thread
From: Aneesh Kumar K.V @ 2014-01-28 6:46 UTC (permalink / raw)
To: agraf, benh, paulus; +Cc: linuxppc-dev, kvm, kvm-ppc, Aneesh Kumar K.V
This patch make sure we inherit the LE bit correctly in different case
so that we can run Little Endian distro in PR mode
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
Changes from V2:
* Move H_SET_MODE to qemu
arch/powerpc/include/asm/kvm_host.h | 1 +
arch/powerpc/kernel/asm-offsets.c | 1 +
arch/powerpc/kvm/book3s_64_mmu.c | 2 +-
arch/powerpc/kvm/book3s_pr.c | 32 +++++++++++++++++++++++++++++++-
4 files changed, 34 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 207b7826c9b1..f4be7be14330 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -550,6 +550,7 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_PPC_BOOK3S
ulong fault_dar;
u32 fault_dsisr;
+ unsigned long intr_msr;
#endif
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index b754f629a177..7484676b8f25 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -518,6 +518,7 @@ int main(void)
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
+ DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 83da1f868fd5..8231b83c493b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -38,7 +38,7 @@
static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
{
- kvmppc_set_msr(vcpu, MSR_SF);
+ kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
}
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index eb070eb4da40..828056ec208f 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -263,7 +263,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
ulong smsr = vcpu->arch.shared->msr;
/* Guest MSR values */
- smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
+ smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
/* Process MSR values */
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
/* External providers the guest reserved */
@@ -1178,6 +1178,15 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
break;
}
#endif /* CONFIG_VSX */
+ case KVM_REG_PPC_LPCR:
+ /*
+ * We are only interested in the LPCR_ILE bit
+ */
+ if (vcpu->arch.intr_msr & MSR_LE)
+ *val = get_reg_val(id, LPCR_ILE);
+ else
+ *val = get_reg_val(id, 0);
+ break;
default:
r = -EINVAL;
break;
@@ -1186,6 +1195,23 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
return r;
}
+static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
+{
+ struct kvm *kvm = vcpu->kvm;
+ /*
+ * If ILE (interrupt little-endian) has changed, update the
+ * MSR_LE bit in the intr_msr for each vcpu in this vcore.
+ */
+ if ((new_lpcr & LPCR_ILE) != (vcpu->arch.intr_msr & MSR_LE)) {
+ mutex_lock(&kvm->lock);
+ if (new_lpcr & LPCR_ILE)
+ vcpu->arch.intr_msr |= MSR_LE;
+ else
+ vcpu->arch.intr_msr &= ~MSR_LE;
+ mutex_unlock(&kvm->lock);
+ }
+}
+
static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val)
{
@@ -1209,6 +1235,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
break;
}
#endif /* CONFIG_VSX */
+ case KVM_REG_PPC_LPCR:
+ kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
+ break;
default:
r = -EINVAL;
break;
@@ -1261,6 +1290,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
vcpu->arch.pvr = 0x3C0301;
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
vcpu->arch.pvr = mfspr(SPRN_PVR);
+ vcpu->arch.intr_msr = MSR_SF;
#else
/* default to book3s_32 (750) */
vcpu->arch.pvr = 0x84202;
--
1.8.5.3
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH V3] KVM: PPC: BOOK3S: PR: Enable Little Endian PR guest
2014-01-28 6:46 [PATCH V3] KVM: PPC: BOOK3S: PR: Enable Little Endian PR guest Aneesh Kumar K.V
@ 2014-02-06 15:46 ` Alexander Graf
0 siblings, 0 replies; 2+ messages in thread
From: Alexander Graf @ 2014-02-06 15:46 UTC (permalink / raw)
To: Aneesh Kumar K.V
Cc: Paul Mackerras, linuxppc-dev, kvm-ppc,
kvm@vger.kernel.org mailing list
On 28.01.2014, at 07:46, Aneesh Kumar K.V =
<aneesh.kumar@linux.vnet.ibm.com> wrote:
> This patch make sure we inherit the LE bit correctly in different case
> so that we can run Little Endian distro in PR mode
>=20
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
> ---
> Changes from V2:
> * Move H_SET_MODE to qemu
>=20
>=20
> arch/powerpc/include/asm/kvm_host.h | 1 +
> arch/powerpc/kernel/asm-offsets.c | 1 +
> arch/powerpc/kvm/book3s_64_mmu.c | 2 +-
> arch/powerpc/kvm/book3s_pr.c | 32 =
+++++++++++++++++++++++++++++++-
> 4 files changed, 34 insertions(+), 2 deletions(-)
>=20
> diff --git a/arch/powerpc/include/asm/kvm_host.h =
b/arch/powerpc/include/asm/kvm_host.h
> index 207b7826c9b1..f4be7be14330 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -550,6 +550,7 @@ struct kvm_vcpu_arch {
> #ifdef CONFIG_PPC_BOOK3S
> ulong fault_dar;
> u32 fault_dsisr;
> + unsigned long intr_msr;
We already have a field with that name and semantics in the vcpu_arch =
struct. Just move that one to a more commonly accessible part of the =
struct.
> #endif
>=20
> #ifdef CONFIG_BOOKE
> diff --git a/arch/powerpc/kernel/asm-offsets.c =
b/arch/powerpc/kernel/asm-offsets.c
> index b754f629a177..7484676b8f25 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -518,6 +518,7 @@ int main(void)
> DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
> DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, =
arch.fault_dsisr));
> DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, =
arch.fault_dar));
> + DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
Same here:
arch/powerpc/kernel/asm-offsets.c: DEFINE(VCPU_INTR_MSR, =
offsetof(struct kvm_vcpu, arch.intr_msr));
> DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, =
arch.last_inst));
> DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
> DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
> diff --git a/arch/powerpc/kvm/book3s_64_mmu.c =
b/arch/powerpc/kvm/book3s_64_mmu.c
> index 83da1f868fd5..8231b83c493b 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu.c
> @@ -38,7 +38,7 @@
>=20
> static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
> {
> - kvmppc_set_msr(vcpu, MSR_SF);
> + kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
> }
>=20
> static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
> diff --git a/arch/powerpc/kvm/book3s_pr.c =
b/arch/powerpc/kvm/book3s_pr.c
> index eb070eb4da40..828056ec208f 100644
> --- a/arch/powerpc/kvm/book3s_pr.c
> +++ b/arch/powerpc/kvm/book3s_pr.c
> @@ -263,7 +263,7 @@ static void kvmppc_recalc_shadow_msr(struct =
kvm_vcpu *vcpu)
> ulong smsr =3D vcpu->arch.shared->msr;
>=20
> /* Guest MSR values */
> - smsr &=3D MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
> + smsr &=3D MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
> /* Process MSR values */
> smsr |=3D MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
> /* External providers the guest reserved */
> @@ -1178,6 +1178,15 @@ static int kvmppc_get_one_reg_pr(struct =
kvm_vcpu *vcpu, u64 id,
> break;
> }
> #endif /* CONFIG_VSX */
> + case KVM_REG_PPC_LPCR:
> + /*
> + * We are only interested in the LPCR_ILE bit
> + */
> + if (vcpu->arch.intr_msr & MSR_LE)
> + *val =3D get_reg_val(id, LPCR_ILE);
> + else
> + *val =3D get_reg_val(id, 0);
> + break;
> default:
> r =3D -EINVAL;
> break;
> @@ -1186,6 +1195,23 @@ static int kvmppc_get_one_reg_pr(struct =
kvm_vcpu *vcpu, u64 id,
> return r;
> }
>=20
> +static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
> +{
> + struct kvm *kvm =3D vcpu->kvm;
> + /*
> + * If ILE (interrupt little-endian) has changed, update the
> + * MSR_LE bit in the intr_msr for each vcpu in this vcore.
> + */
> + if ((new_lpcr & LPCR_ILE) !=3D (vcpu->arch.intr_msr & MSR_LE)) {
> + mutex_lock(&kvm->lock);
Why the lock?
Alex
> + if (new_lpcr & LPCR_ILE)
> + vcpu->arch.intr_msr |=3D MSR_LE;
> + else
> + vcpu->arch.intr_msr &=3D ~MSR_LE;
> + mutex_unlock(&kvm->lock);
> + }
> +}
> +
> static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
> union kvmppc_one_reg *val)
> {
> @@ -1209,6 +1235,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu =
*vcpu, u64 id,
> break;
> }
> #endif /* CONFIG_VSX */
> + case KVM_REG_PPC_LPCR:
> + kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
> + break;
> default:
> r =3D -EINVAL;
> break;
> @@ -1261,6 +1290,7 @@ static struct kvm_vcpu =
*kvmppc_core_vcpu_create_pr(struct kvm *kvm,
> vcpu->arch.pvr =3D 0x3C0301;
> if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
> vcpu->arch.pvr =3D mfspr(SPRN_PVR);
> + vcpu->arch.intr_msr =3D MSR_SF;
> #else
> /* default to book3s_32 (750) */
> vcpu->arch.pvr =3D 0x84202;
> --=20
> 1.8.5.3
>=20
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2014-02-06 15:46 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-01-28 6:46 [PATCH V3] KVM: PPC: BOOK3S: PR: Enable Little Endian PR guest Aneesh Kumar K.V
2014-02-06 15:46 ` Alexander Graf
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).