* [PATCH 1/9] KVM: PPC: Convert u64 -> ulong
[not found] ` <1271724594-6223-1-git-send-email-agraf-l3A5Bk7waGM@public.gmane.org>
@ 2010-04-20 0:49 ` Alexander Graf
2010-04-20 0:49 ` [PATCH 2/9] KVM: PPC: Make Performance Counters work Alexander Graf
` (4 subsequent siblings)
5 siblings, 0 replies; 11+ messages in thread
From: Alexander Graf @ 2010-04-20 0:49 UTC (permalink / raw)
To: kvm-ppc-u79uwXL29TY76Z2rM5mHXA; +Cc: kvm-u79uwXL29TY76Z2rM5mHXA
There are some pieces in the code that I overlooked that still use
u64s instead of longs. This slows down 32 bit hosts unnecessarily, so
let's just move them to ulong.
Signed-off-by: Alexander Graf <agraf-l3A5Bk7waGM@public.gmane.org>
---
v1 -> v2:
- don't touch vsid - that stays u64!
---
arch/powerpc/include/asm/kvm_book3s.h | 4 ++--
arch/powerpc/include/asm/kvm_host.h | 6 +++---
arch/powerpc/kvm/book3s.c | 6 +++---
arch/powerpc/kvm/book3s_32_mmu.c | 6 +++---
arch/powerpc/kvm/book3s_32_mmu_host.c | 8 +++-----
arch/powerpc/kvm/book3s_64_mmu.c | 4 ++--
arch/powerpc/kvm/book3s_64_mmu_host.c | 6 +++---
7 files changed, 19 insertions(+), 21 deletions(-)
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 9517b8d..5d3bd0c 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -107,9 +107,9 @@ struct kvmppc_vcpu_book3s {
#define VSID_BAT 0x7fffffffffb00000ULL
#define VSID_PR 0x8000000000000000ULL
-extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask);
+extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
-extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end);
+extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 5a83995..0c9ad86 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -124,9 +124,9 @@ struct kvm_arch {
};
struct kvmppc_pte {
- u64 eaddr;
+ ulong eaddr;
u64 vpage;
- u64 raddr;
+ ulong raddr;
bool may_read : 1;
bool may_write : 1;
bool may_execute : 1;
@@ -145,7 +145,7 @@ struct kvmppc_mmu {
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
void (*reset_msr)(struct kvm_vcpu *vcpu);
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
- int (*esid_to_vsid)(struct kvm_vcpu *vcpu, u64 esid, u64 *vsid);
+ int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
};
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 5805f99..a7de709 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -812,12 +812,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* so we can't use the NX bit inside the guest. Let's cross our fingers,
* that no guest that needs the dcbz hack does NX.
*/
- kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
r = RESUME_GUEST;
} else {
vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
r = RESUME_GUEST;
}
break;
@@ -843,7 +843,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.dear = dar;
to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
r = RESUME_GUEST;
}
break;
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 48efb37..33186b7 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -60,7 +60,7 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data);
-static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid);
static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
@@ -183,7 +183,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_sr *sre;
hva_t ptegp;
u32 pteg[16];
- u64 ptem = 0;
+ u32 ptem = 0;
int i;
int found = 0;
@@ -327,7 +327,7 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool lar
kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
}
-static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
/* In case we only have one of MSR_IR or MSR_DR set, let's put
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index ce1bfb1..2bb67e6 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -77,11 +77,9 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
kvm_release_pfn_clean(pte->pfn);
}
-void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 _guest_ea, u64 _ea_mask)
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
{
int i;
- u32 guest_ea = _guest_ea;
- u32 ea_mask = _ea_mask;
dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
@@ -127,7 +125,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
}
}
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
int i;
@@ -265,7 +263,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
/* Get host physical address for gpa */
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpaddr)) {
- printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n",
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
orig_pte->eaddr);
return -EINVAL;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 12e4c97..a9241e9 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -232,7 +232,7 @@ do_second:
}
dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
- "-> 0x%llx\n",
+ "-> 0x%lx\n",
eaddr, avpn, gpte->vpage, gpte->raddr);
found = true;
break;
@@ -439,7 +439,7 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
}
-static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
+static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index b230154..41af12f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -62,7 +62,7 @@ static void invalidate_pte(struct hpte_cache *pte)
kvm_release_pfn_clean(pte->pfn);
}
-void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
{
int i;
@@ -110,7 +110,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
}
}
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
int i;
@@ -216,7 +216,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
/* Get host physical address for gpa */
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpaddr)) {
- printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr);
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
return -EINVAL;
}
hpaddr <<= PAGE_SHIFT;
--
1.6.0.2
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 3/9] KVM: PPC: Improve split mode
[not found] ` <1271724594-6223-1-git-send-email-agraf-l3A5Bk7waGM@public.gmane.org>
2010-04-20 0:49 ` [PATCH 1/9] KVM: PPC: Convert u64 -> ulong Alexander Graf
2010-04-20 0:49 ` [PATCH 2/9] KVM: PPC: Make Performance Counters work Alexander Graf
@ 2010-04-20 0:49 ` Alexander Graf
2010-04-20 0:49 ` [PATCH 5/9] KVM: PPC: Be more informative on BUG Alexander Graf
` (2 subsequent siblings)
5 siblings, 0 replies; 11+ messages in thread
From: Alexander Graf @ 2010-04-20 0:49 UTC (permalink / raw)
To: kvm-ppc-u79uwXL29TY76Z2rM5mHXA; +Cc: kvm-u79uwXL29TY76Z2rM5mHXA
When in split mode, instruction relocation and data relocation are not equal.
So far we implemented this mode by reserving a special pseudo-VSID for the
two cases and flushing all PTEs when going into split mode, which is slow.
Unfortunately 32bit Linux and Mac OS X use split mode extensively. So to not
slow down things too much, I came up with a different idea: Mark the split
mode with a bit in the VSID and then treat it like any other segment.
This means we can just flush the shadow segment cache, but keep the PTEs
intact. I verified that this works with ppc32 Linux and Mac OS X 10.4
guests and does speed them up.
Signed-off-by: Alexander Graf <agraf-l3A5Bk7waGM@public.gmane.org>
---
arch/powerpc/include/asm/kvm_book3s.h | 9 ++++-----
arch/powerpc/kvm/book3s.c | 28 ++++++++++++++--------------
arch/powerpc/kvm/book3s_32_mmu.c | 21 +++++++++++++--------
arch/powerpc/kvm/book3s_64_mmu.c | 27 +++++++++++++++------------
4 files changed, 46 insertions(+), 39 deletions(-)
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 5d3bd0c..6f74d93 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -100,11 +100,10 @@ struct kvmppc_vcpu_book3s {
#define CONTEXT_GUEST 1
#define CONTEXT_GUEST_END 2
-#define VSID_REAL_DR 0x7ffffffffff00000ULL
-#define VSID_REAL_IR 0x7fffffffffe00000ULL
-#define VSID_SPLIT_MASK 0x7fffffffffe00000ULL
-#define VSID_REAL 0x7fffffffffc00000ULL
-#define VSID_BAT 0x7fffffffffb00000ULL
+#define VSID_REAL 0x1fffffffffc00000ULL
+#define VSID_BAT 0x1fffffffffb00000ULL
+#define VSID_REAL_DR 0x2000000000000000ULL
+#define VSID_REAL_IR 0x4000000000000000ULL
#define VSID_PR 0x8000000000000000ULL
extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index a03163b..91dc42d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -147,16 +147,8 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
}
}
- if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
- (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
- bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
- bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
-
- /* Flush split mode PTEs */
- if (dr != ir)
- kvmppc_mmu_pte_vflush(vcpu, VSID_SPLIT_MASK,
- VSID_SPLIT_MASK);
-
+ if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+ (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
kvmppc_mmu_flush_segments(vcpu);
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
}
@@ -534,6 +526,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
bool is_mmio = false;
bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
+ u64 vsid;
relocated = data ? dr : ir;
@@ -551,13 +544,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
- pte.vpage |= VSID_REAL;
+ pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
break;
case MSR_DR:
- pte.vpage |= VSID_REAL_DR;
- break;
case MSR_IR:
- pte.vpage |= VSID_REAL_IR;
+ vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
+
+ if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
+ pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
+ else
+ pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
+ pte.vpage |= vsid;
+
+ if (vsid == -1)
+ page_found = -EINVAL;
break;
}
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 33186b7..0b10503 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -330,30 +330,35 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool lar
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
+ ulong ea = esid << SID_SHIFT;
+ struct kvmppc_sr *sr;
+ u64 gvsid = esid;
+
+ if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ sr = find_sr(to_book3s(vcpu), ea);
+ if (sr->valid)
+ gvsid = sr->vsid;
+ }
+
/* In case we only have one of MSR_IR or MSR_DR set, let's put
that in the real-mode context (and hope RM doesn't access
high memory) */
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
- *vsid = (VSID_REAL >> 16) | esid;
+ *vsid = VSID_REAL | esid;
break;
case MSR_IR:
- *vsid = (VSID_REAL_IR >> 16) | esid;
+ *vsid = VSID_REAL_IR | gvsid;
break;
case MSR_DR:
- *vsid = (VSID_REAL_DR >> 16) | esid;
+ *vsid = VSID_REAL_DR | gvsid;
break;
case MSR_DR|MSR_IR:
- {
- ulong ea = esid << SID_SHIFT;
- struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea);
-
if (!sr->valid)
return -1;
*vsid = sr->vsid;
break;
- }
default:
BUG();
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index a9241e9..612de6e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -442,29 +442,32 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 *vsid)
{
+ ulong ea = esid << SID_SHIFT;
+ struct kvmppc_slb *slb;
+ u64 gvsid = esid;
+
+ if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
+ slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
+ if (slb)
+ gvsid = slb->vsid;
+ }
+
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
- *vsid = (VSID_REAL >> 16) | esid;
+ *vsid = VSID_REAL | esid;
break;
case MSR_IR:
- *vsid = (VSID_REAL_IR >> 16) | esid;
+ *vsid = VSID_REAL_IR | gvsid;
break;
case MSR_DR:
- *vsid = (VSID_REAL_DR >> 16) | esid;
+ *vsid = VSID_REAL_DR | gvsid;
break;
case MSR_DR|MSR_IR:
- {
- ulong ea;
- struct kvmppc_slb *slb;
- ea = esid << SID_SHIFT;
- slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
- if (slb)
- *vsid = slb->vsid;
- else
+ if (!slb)
return -ENOENT;
+ *vsid = gvsid;
break;
- }
default:
BUG();
break;
--
1.6.0.2
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 8/9] KVM: PPC: Find HTAB ourselves
[not found] ` <1271724594-6223-1-git-send-email-agraf-l3A5Bk7waGM@public.gmane.org>
` (3 preceding siblings ...)
2010-04-20 0:49 ` [PATCH 5/9] KVM: PPC: Be more informative on BUG Alexander Graf
@ 2010-04-20 0:49 ` Alexander Graf
2010-04-20 0:49 ` [PATCH 9/9] KVM: PPC: Enable native paired singles Alexander Graf
5 siblings, 0 replies; 11+ messages in thread
From: Alexander Graf @ 2010-04-20 0:49 UTC (permalink / raw)
To: kvm-ppc-u79uwXL29TY76Z2rM5mHXA
Cc: kvm-u79uwXL29TY76Z2rM5mHXA, Benjamin Herrenschmidt
For KVM we need to find the location of the HTAB. We can either rely
on internal data structures of the kernel or ask the hardware.
Ben issued complaints about the internal data structure method, so
let's switch it to our own inquiry of the HTAB. Now we're fully
independend :-).
CC: Benjamin Herrenschmidt <benh-XVmvHMARGAS8U2dJNN8I7kB+6BGkLq7r@public.gmane.org>
Signed-off-by: Alexander Graf <agraf-l3A5Bk7waGM@public.gmane.org>
---
arch/powerpc/kernel/ppc_ksyms.c | 5 -----
arch/powerpc/kvm/book3s_32_mmu_host.c | 21 +++++++++++++--------
2 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 2b7c43f..bc9f39d 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -178,11 +178,6 @@ EXPORT_SYMBOL(switch_mmu_context);
extern long mol_trampoline;
EXPORT_SYMBOL(mol_trampoline); /* For MOL */
EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
-
-extern struct hash_pte *Hash;
-extern unsigned long _SDR1;
-EXPORT_SYMBOL_GPL(Hash); /* For KVM */
-EXPORT_SYMBOL_GPL(_SDR1); /* For KVM */
#ifdef CONFIG_SMP
extern int mmu_hash_lock;
EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 2bb67e6..0bb6600 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -54,6 +54,9 @@
#error Only 32 bit pages are supported for now
#endif
+static ulong htab;
+static u32 htabmask;
+
static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
volatile u32 *pteg;
@@ -217,14 +220,11 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
return NULL;
}
-extern struct hash_pte *Hash;
-extern unsigned long _SDR1;
-
static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
bool primary)
{
- u32 page, hash, htabmask;
- ulong pteg = (ulong)Hash;
+ u32 page, hash;
+ ulong pteg = htab;
page = (eaddr & ~ESID_MASK) >> 12;
@@ -232,13 +232,12 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
if (!primary)
hash = ~hash;
- htabmask = ((_SDR1 & 0x1FF) << 16) | 0xFFC0;
hash &= htabmask;
pteg |= hash;
- dprintk_mmu("htab: %p | hash: %x | htabmask: %x | pteg: %lx\n",
- Hash, hash, htabmask, pteg);
+ dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
+ htab, hash, htabmask, pteg);
return (u32*)pteg;
}
@@ -453,6 +452,7 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int err;
+ ulong sdr1;
err = __init_new_context();
if (err < 0)
@@ -474,5 +474,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
vcpu3s->vsid_next = vcpu3s->vsid_first;
+ /* Remember where the HTAB is */
+ asm ( "mfsdr1 %0" : "=r"(sdr1) );
+ htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
+ htab = (ulong)__va(sdr1 & 0xffff0000);
+
return 0;
}
--
1.6.0.2
^ permalink raw reply related [flat|nested] 11+ messages in thread