linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/4] powerpc/e500v2: Save SPEFCSR in flush_spe_to_thread()
@ 2011-03-28 19:59 Scott Wood
  2011-03-28 20:00 ` [PATCH v2 2/4] KVM: PPC: booke: Wrap __kvmppc_vcpu_run() Scott Wood
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Scott Wood @ 2011-03-28 19:59 UTC (permalink / raw)
  To: agraf; +Cc: linuxppc-dev, kvm-ppc

From: yu liu <yu.liu@freescale.com>

giveup_spe() saves the SPE state which is protected by MSR[SPE].
However, modifying SPEFSCR does not trap when MSR[SPE]=0.
And since SPEFSCR is already saved/restored in _switch(),
not all the callers want to save SPEFSCR again.
Thus, saving SPEFSCR should not belong to giveup_spe().

This patch moves SPEFSCR saving to flush_spe_to_thread(),
and cleans up the caller that needs to save SPEFSCR accordingly.

Signed-off-by: Liu Yu <yu.liu@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
---
v2: added kvm-ppc (sorry for the resend)

Kumar, could you ack this to go via the KVM tree, since the KVM
SPE save/restore patches depend on it?

 arch/powerpc/kernel/head_fsl_booke.S |    2 --
 arch/powerpc/kernel/process.c        |    1 +
 arch/powerpc/kernel/traps.c          |    5 +----
 3 files changed, 2 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 3e02710..b84fc5e 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -792,8 +792,6 @@ _GLOBAL(giveup_spe)
 	evmwumiaa evr6, evr6, evr6	/* evr6 <- ACC = 0 * 0 + ACC */
 	li	r4,THREAD_ACC
 	evstddx	evr6, r4, r3		/* save off accumulator */
-	mfspr	r6,SPRN_SPEFSCR
-	stw	r6,THREAD_SPEFSCR(r3)	/* save spefscr register value */
 	beq	1f
 	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 	lis	r3,MSR_SPE@h
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f74f355..138e7dd 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -213,6 +213,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
 #ifdef CONFIG_SMP
 			BUG_ON(tsk != current);
 #endif
+			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
 			giveup_spe(tsk);
 		}
 		preempt_enable();
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bd74fac..0ed23d1 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1356,10 +1356,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
 	int code = 0;
 	int err;
 
-	preempt_disable();
-	if (regs->msr & MSR_SPE)
-		giveup_spe(current);
-	preempt_enable();
+	flush_spe_to_thread(current);
 
 	spefscr = current->thread.spefscr;
 	fpexc_mode = current->thread.fpexc_mode;
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v2 2/4] KVM: PPC: booke: Wrap __kvmppc_vcpu_run()
  2011-03-28 19:59 [PATCH v2 1/4] powerpc/e500v2: Save SPEFCSR in flush_spe_to_thread() Scott Wood
@ 2011-03-28 20:00 ` Scott Wood
  2011-03-28 20:00 ` [PATCH v2 3/4] KVM: PPC: e500: Introduce msr_block for e500v2 Scott Wood
  2011-03-28 20:00 ` [PATCH v2 4/4] KVM: PPC: e500: SPE switch between guest and host Scott Wood
  2 siblings, 0 replies; 4+ messages in thread
From: Scott Wood @ 2011-03-28 20:00 UTC (permalink / raw)
  To: agraf; +Cc: linuxppc-dev, kvm-ppc

From: yu liu <yu.liu@freescale.com>

We need to save/restore SPE environment on e500 core.
Wrap __kvmppc_vcpu_run() so that we can put the SPE code in
e500.c.

Signed-off-by: Liu Yu <yu.liu@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
---
v2: added kvm-ppc (sorry for the resend)

 arch/powerpc/include/asm/kvm_ppc.h  |    1 +
 arch/powerpc/kvm/44x.c              |    5 +++++
 arch/powerpc/kvm/book3s.c           |    1 -
 arch/powerpc/kvm/booke_interrupts.S |    2 +-
 arch/powerpc/kvm/e500.c             |    5 +++++
 5 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index ecb3bc7..4e7a1be 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -42,6 +42,7 @@ enum emulation_result {
 	EMULATE_AGAIN,        /* something went wrong. go again */
 };
 
+extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 extern char kvmppc_handlers_start[];
 extern unsigned long kvmppc_handler_len;
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 74d0e74..3d2e7d2 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -147,6 +147,11 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
 	kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
 }
 
+int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+	return __kvmppc_vcpu_entry(kvm_run, vcpu);
+}
+
 static int __init kvmppc_44x_init(void)
 {
 	int r;
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index c961de4..fb12853 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1379,7 +1379,6 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
 	vfree(vcpu_book3s);
 }
 
-extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
 	int ret;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 1cc471f..ab29f5f 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -293,7 +293,7 @@ heavyweight_exit:
  *  r3: kvm_run pointer
  *  r4: vcpu pointer
  */
-_GLOBAL(__kvmppc_vcpu_run)
+_GLOBAL(__kvmppc_vcpu_entry)
 	stwu	r1, -HOST_STACK_SIZE(r1)
 	stw	r1, VCPU_HOST_STACK(r4)	/* Save stack pointer to vcpu. */
 
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index e3768ee..e762634 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -70,6 +70,11 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
+int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+	return __kvmppc_vcpu_entry(kvm_run, vcpu);
+}
+
 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
 int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
                                struct kvm_translation *tr)
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v2 3/4] KVM: PPC: e500: Introduce msr_block for e500v2
  2011-03-28 19:59 [PATCH v2 1/4] powerpc/e500v2: Save SPEFCSR in flush_spe_to_thread() Scott Wood
  2011-03-28 20:00 ` [PATCH v2 2/4] KVM: PPC: booke: Wrap __kvmppc_vcpu_run() Scott Wood
@ 2011-03-28 20:00 ` Scott Wood
  2011-03-28 20:00 ` [PATCH v2 4/4] KVM: PPC: e500: SPE switch between guest and host Scott Wood
  2 siblings, 0 replies; 4+ messages in thread
From: Scott Wood @ 2011-03-28 20:00 UTC (permalink / raw)
  To: agraf; +Cc: kvmppc, linuxppc-dev

From: yu liu <yu.liu@freescale.com>

In order to use lazy SPE register save/restore, we need to
know when the guest is using MSR[SPE].  In order to do that, we
need to control the actual MSR[SPE] separately from the guest's
notion of MSR[SPE].

Only bits set in msr_block can be changed by the guest in the real MSR.

Signed-off-by: Liu Yu <yu.liu@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
---
v2: added kvm-ppc (sorry for the resend)

 arch/powerpc/include/asm/kvm_host.h |    3 +++
 arch/powerpc/kernel/asm-offsets.c   |    3 +++
 arch/powerpc/kvm/booke.h            |   17 +++++++++++++++++
 arch/powerpc/kvm/booke_interrupts.S |    6 +++++-
 arch/powerpc/kvm/e500.c             |    3 +++
 5 files changed, 31 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bba3b9b..c376f6b 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -217,6 +217,9 @@ struct kvm_vcpu_arch {
 	ulong xer;
 	u32 cr;
 #endif
+#ifdef CONFIG_FSL_BOOKE
+	ulong msr_block;
+#endif
 
 #ifdef CONFIG_PPC_BOOK3S
 	ulong shadow_msr;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 23e6a93..75b72c7 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -403,6 +403,9 @@ int main(void)
 	DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
 	DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
 
+#ifdef CONFIG_FSL_BOOKE
+	DEFINE(VCPU_MSR_BLOCK, offsetof(struct kvm_vcpu, arch.msr_block));
+#endif
 	/* book3s */
 #ifdef CONFIG_PPC_BOOK3S
 	DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 492bb70..303a415 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -52,6 +52,23 @@
 
 extern unsigned long kvmppc_booke_handlers;
 
+#ifdef CONFIG_FSL_BOOKE
+static inline bool kvmppc_msr_block_has(struct kvm_vcpu *vcpu, u32 block_bit)
+{
+	return !(vcpu->arch.msr_block & block_bit);
+}
+
+static inline void kvmppc_set_msr_block(struct kvm_vcpu *vcpu, u32 block_bit)
+{
+	vcpu->arch.msr_block &= ~block_bit;
+}
+
+static inline void kvmppc_clr_msr_block(struct kvm_vcpu *vcpu, u32 block_bit)
+{
+	vcpu->arch.msr_block |= block_bit;
+}
+#endif
+
 /* Helper function for "full" MSR writes. No need to call this if only EE is
  * changing. */
 static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index ab29f5f..92193c7 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -409,7 +409,6 @@ lightweight_exit:
 	mtctr	r3
 	lwz	r3, VCPU_CR(r4)
 	mtcr	r3
-	lwz	r5, VCPU_GPR(r5)(r4)
 	lwz	r6, VCPU_GPR(r6)(r4)
 	lwz	r7, VCPU_GPR(r7)(r4)
 	lwz	r8, VCPU_GPR(r8)(r4)
@@ -419,6 +418,11 @@ lightweight_exit:
 	lwz	r3, (VCPU_SHARED_MSR + 4)(r3)
 	oris	r3, r3, KVMPPC_MSR_MASK@h
 	ori	r3, r3, KVMPPC_MSR_MASK@l
+#ifdef CONFIG_FSL_BOOKE
+	lwz	r5, VCPU_MSR_BLOCK(r4)
+	and	r3, r3, r5
+#endif
+	lwz	r5, VCPU_GPR(r5)(r4)
 	mtsrr1	r3
 
 	/* Clear any debug events which occurred since we disabled MSR[DE].
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index e762634..acfe052 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -67,6 +67,9 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
 	/* Since booke kvm only support one core, update all vcpus' PIR to 0 */
 	vcpu->vcpu_id = 0;
 
+	/* Unblock all msr bits */
+	kvmppc_clr_msr_block(vcpu, ~0UL);
+
 	return 0;
 }
 
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH v2 4/4] KVM: PPC: e500: SPE switch between guest and host
  2011-03-28 19:59 [PATCH v2 1/4] powerpc/e500v2: Save SPEFCSR in flush_spe_to_thread() Scott Wood
  2011-03-28 20:00 ` [PATCH v2 2/4] KVM: PPC: booke: Wrap __kvmppc_vcpu_run() Scott Wood
  2011-03-28 20:00 ` [PATCH v2 3/4] KVM: PPC: e500: Introduce msr_block for e500v2 Scott Wood
@ 2011-03-28 20:00 ` Scott Wood
  2 siblings, 0 replies; 4+ messages in thread
From: Scott Wood @ 2011-03-28 20:00 UTC (permalink / raw)
  To: agraf; +Cc: linuxppc-dev, kvm-ppc

From: yu liu <yu.liu@freescale.com>

This patch provide a lazy way to do SPE switch.
The SPE save/restore will be done only if it's needed.

Linux already switches SPEFSCR on context switch (non-lazily), so the
only remaining bit is to save it between qemu and the guest.

Signed-off-by: Liu Yu <yu.liu@freescale.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>
---
v2: added kvm-ppc (sorry for the resend)

 arch/powerpc/include/asm/kvm_host.h |    6 +++
 arch/powerpc/kernel/asm-offsets.c   |    6 +++
 arch/powerpc/kvm/booke.c            |   15 +++++++-
 arch/powerpc/kvm/booke_interrupts.S |   62 +++++++++++++++++++++++++++++++++++
 arch/powerpc/kvm/e500.c             |   45 ++++++++++++++++++++++++-
 5 files changed, 130 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index c376f6b..171cd85 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -195,6 +195,12 @@ struct kvm_vcpu_arch {
 	u64 fpr[32];
 	u64 fpscr;
 
+#ifdef CONFIG_SPE
+	ulong evr[32];
+	ulong spefscr;
+	ulong host_spefscr;
+	u64 acc;
+#endif
 #ifdef CONFIG_ALTIVEC
 	vector128 vr[32];
 	vector128 vscr;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 75b72c7..554f4d6 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -497,6 +497,12 @@ int main(void)
 	DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
 	DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
 #endif
+#ifdef CONFIG_SPE
+	DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
+	DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
+	DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
+	DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
+#endif /* CONFIG_SPE */
 
 #ifdef CONFIG_KVM_EXIT_TIMING
 	DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ef76acb..4e9c1a9 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -13,6 +13,7 @@
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  *
  * Copyright IBM Corp. 2007
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
  *
  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
@@ -344,10 +345,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		r = RESUME_GUEST;
 		break;
 
-	case BOOKE_INTERRUPT_SPE_UNAVAIL:
-		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
+#ifdef CONFIG_SPE
+	case BOOKE_INTERRUPT_SPE_UNAVAIL: {
+		extern void kvmppc_vcpu_spe_load(struct kvm_vcpu *vcpu);
+
+		/* reload the SPE env if guest first use SPE since last save */
+		if (kvmppc_msr_block_has(vcpu, MSR_SPE))
+			kvmppc_vcpu_spe_load(vcpu);
+
+		if (!(vcpu->arch.shared->msr & MSR_SPE))
+			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
 		r = RESUME_GUEST;
 		break;
+	}
 
 	case BOOKE_INTERRUPT_SPE_FP_DATA:
 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
@@ -358,6 +368,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
 		r = RESUME_GUEST;
 		break;
+#endif
 
 	case BOOKE_INTERRUPT_DATA_STORAGE:
 		kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 92193c7..910ec66 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -241,6 +241,14 @@ _GLOBAL(kvmppc_resume_host)
 heavyweight_exit:
 	/* Not returning to guest. */
 
+#ifdef CONFIG_SPE
+	/* save guest SPEFSCR and load host SPEFSCR */
+	mfspr	r9, SPRN_SPEFSCR
+	stw	r9, VCPU_SPEFSCR(r4)
+	lwz	r9, VCPU_HOST_SPEFSCR(r4)
+	mtspr	SPRN_SPEFSCR, r9
+#endif
+
 	/* We already saved guest volatile register state; now save the
 	 * non-volatiles. */
 	stw	r15, VCPU_GPR(r15)(r4)
@@ -342,6 +350,14 @@ _GLOBAL(__kvmppc_vcpu_entry)
 	lwz	r30, VCPU_GPR(r30)(r4)
 	lwz	r31, VCPU_GPR(r31)(r4)
 
+#ifdef CONFIG_SPE
+	/* save host SPEFSCR and load guest SPEFSCR */
+	mfspr	r3, SPRN_SPEFSCR
+	stw	r3, VCPU_HOST_SPEFSCR(r4)
+	lwz	r3, VCPU_SPEFSCR(r4)
+	mtspr	SPRN_SPEFSCR, r3
+#endif
+
 lightweight_exit:
 	stw	r2, HOST_R2(r1)
 
@@ -435,3 +451,49 @@ lightweight_exit:
 	lwz	r3, VCPU_GPR(r3)(r4)
 	lwz	r4, VCPU_GPR(r4)(r4)
 	rfi
+
+#ifdef CONFIG_SPE
+#define KVMPPC_SAVE_EVR(n,s,base)	evmergehi s,s,n; stw s,(4*(n))(base)
+#define KVMPPC_SAVE_2EVR(n,s,base)	KVMPPC_SAVE_EVR(n,s,base); \
+					   KVMPPC_SAVE_EVR(n+1,s,base)
+#define KVMPPC_SAVE_4EVR(n,s,base)	KVMPPC_SAVE_2EVR(n,s,base); \
+					   KVMPPC_SAVE_2EVR(n+2,s,base)
+#define KVMPPC_SAVE_8EVR(n,s,base)	KVMPPC_SAVE_4EVR(n,s,base); \
+					   KVMPPC_SAVE_4EVR(n+4,s,base)
+#define KVMPPC_SAVE_16EVR(n,s,base)	KVMPPC_SAVE_8EVR(n,s,base); \
+					   KVMPPC_SAVE_8EVR(n+8,s,base)
+#define KVMPPC_SAVE_32EVR(n,s,base)	KVMPPC_SAVE_16EVR(n,s,base); \
+					   KVMPPC_SAVE_16EVR(n+16,s,base)
+#define KVMPPC_LOAD_EVR(n,s,base)	lwz s,(4*(n))(base); evmergelo n,s,n
+#define KVMPPC_LOAD_2EVR(n,s,base)	KVMPPC_LOAD_EVR(n,s,base); \
+					   KVMPPC_LOAD_EVR(n+1,s,base)
+#define KVMPPC_LOAD_4EVR(n,s,base)	KVMPPC_LOAD_2EVR(n,s,base); \
+					   KVMPPC_LOAD_2EVR(n+2,s,base)
+#define KVMPPC_LOAD_8EVR(n,s,base)	KVMPPC_LOAD_4EVR(n,s,base); \
+					   KVMPPC_LOAD_4EVR(n+4,s,base)
+#define KVMPPC_LOAD_16EVR(n,s,base)	KVMPPC_LOAD_8EVR(n,s,base); \
+					   KVMPPC_LOAD_8EVR(n+8,s,base)
+#define KVMPPC_LOAD_32EVR(n,s,base)	KVMPPC_LOAD_16EVR(n,s,base); \
+					   KVMPPC_LOAD_16EVR(n+16,s,base)
+
+_GLOBAL(kvmppc_save_guest_spe)
+	cmpi	0,r3,0
+	beqlr-
+	addi	r5,r3,VCPU_EVR
+	KVMPPC_SAVE_32EVR(0,r4,r5)	/* save evr[32] */
+	evxor   evr6, evr6, evr6
+	evmwumiaa evr6, evr6, evr6
+	li	r4,VCPU_ACC
+	evstddx evr6, r4, r3		/* save acc */
+	blr
+
+_GLOBAL(kvmppc_load_guest_spe)
+	cmpi	0,r3,0
+	beqlr-
+	li      r4,VCPU_ACC
+	evlddx  evr6,r4,r3
+	evmra   evr6,evr6		/* load acc */
+	addi	r5,r3,VCPU_EVR
+	KVMPPC_LOAD_32EVR(0,r4,r5)	/* load evr[32] */
+	blr
+#endif
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index acfe052..038bc37 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
  *
  * Author: Yu Liu, <yu.liu@freescale.com>
  *
@@ -25,6 +25,25 @@
 #include "booke.h"
 #include "e500_tlb.h"
 
+#ifdef CONFIG_SPE
+extern void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
+extern void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
+
+void kvmppc_vcpu_spe_put(struct kvm_vcpu *vcpu)
+{
+	enable_kernel_spe();
+	kvmppc_save_guest_spe(vcpu);
+	kvmppc_set_msr_block(vcpu, MSR_SPE);
+}
+
+void kvmppc_vcpu_spe_load(struct kvm_vcpu *vcpu)
+{
+	enable_kernel_spe();
+	kvmppc_load_guest_spe(vcpu);
+	kvmppc_clr_msr_block(vcpu, MSR_SPE);
+}
+#endif
+
 void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
 {
 }
@@ -41,6 +60,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
 {
 	kvmppc_e500_tlb_put(vcpu);
+#ifdef CONFIG_SPE
+	/* save SPE env if guest has used SPE since last save */
+	if (!kvmppc_msr_block_has(vcpu, MSR_SPE))
+		kvmppc_vcpu_spe_put(vcpu);
+#endif
 }
 
 int kvmppc_core_check_processor_compat(void)
@@ -75,7 +99,24 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
 
 int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
-	return __kvmppc_vcpu_entry(kvm_run, vcpu);
+	int ret;
+
+#ifdef CONFIG_SPE
+	/*
+	 * if guest is using SPE, we reload the env.
+	 * otherwise we do it when needed.
+	 */
+	if (vcpu->arch.shared->msr & MSR_SPE)
+		kvmppc_vcpu_spe_load(vcpu);
+#endif
+	ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
+#ifdef CONFIG_SPE
+	/* save SPE env if guest has used SPE since last save */
+	if (!kvmppc_msr_block_has(vcpu, MSR_SPE))
+		kvmppc_vcpu_spe_put(vcpu);
+#endif
+
+	return ret;
 }
 
 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2011-03-28 20:00 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-03-28 19:59 [PATCH v2 1/4] powerpc/e500v2: Save SPEFCSR in flush_spe_to_thread() Scott Wood
2011-03-28 20:00 ` [PATCH v2 2/4] KVM: PPC: booke: Wrap __kvmppc_vcpu_run() Scott Wood
2011-03-28 20:00 ` [PATCH v2 3/4] KVM: PPC: e500: Introduce msr_block for e500v2 Scott Wood
2011-03-28 20:00 ` [PATCH v2 4/4] KVM: PPC: e500: SPE switch between guest and host Scott Wood

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).