From: David Gibson <david@gibson.dropbear.id.au>
To: benh@kernel.crashing.org
Cc: aik@ozlabs.ru, agraf@suse.de, qemu-devel@nongnu.org,
av1474@comtv.ru, qemu-ppc@nongnu.org,
David Gibson <david@gibson.dropbear.id.au>
Subject: [Qemu-devel] [RFC 3/9] target-ppc: Rework SLB page size lookup
Date: Fri, 15 Jan 2016 18:04:34 +1100 [thread overview]
Message-ID: <1452841480-13325-4-git-send-email-david@gibson.dropbear.id.au> (raw)
In-Reply-To: <1452841480-13325-1-git-send-email-david@gibson.dropbear.id.au>
Currently, the ppc_hash64_page_shift() function looks up a page size based
on information in an SLB entry. It open codes the bit translation for
existing CPUs, however different CPU models can have different SLB
encodings. We already store those in the 'sps' table in CPUPPCState, but
we don't currently enforce that that actually matches the logic in
ppc_hash64_page_shift.
This patch reworks lookup of page size from SLB in several ways:
* ppc_hash64_page_shift() is replaced by slb_page_size() which
- Uses the the sps table, so it is correct for whatever encodings are
stored there
- Returns a pointer to a table entry, rather than the raw shift,
which we'll want later on
* We adjust ppc_store_slb() to verify that the stored SLBE has a valid
page size encoding (otherwise it fails, which will trigger an illegal
instruction exception)
* ppc_hash64_htab_lookup() is extended to return the SLB's page size in
addition to other information.
* Adjust ppc_hash64_pte_raddr() to take a page shift directly
instead of an SLB entry. Both callers have just called
ppc_hash64_htab_lookup() which has already done the SLB -> page
size lookup.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
target-ppc/mmu-hash64.c | 88 ++++++++++++++++++++++++++++---------------------
1 file changed, 51 insertions(+), 37 deletions(-)
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 03e25fd..678053b 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -90,6 +90,28 @@ void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
}
}
+static const struct ppc_one_seg_page_size *slb_page_size(PowerPCCPU *cpu,
+ uint64_t slbv)
+{
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
+
+ if (!sps->page_shift) {
+ break;
+ }
+
+ if ((slbv & SLB_VSID_LLP_MASK) == sps->slb_enc) {
+ return sps;
+ }
+ }
+
+ /* Bad page size encoding */
+ return NULL;
+}
+
void helper_slbia(CPUPPCState *env)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
@@ -150,6 +172,9 @@ int ppc_store_slb(PowerPCCPU *cpu, target_ulong rb, target_ulong rs)
if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
return -1; /* 1T segment on MMU that doesn't support it */
}
+ if (!slb_page_size(cpu, rs)) {
+ return -1; /* Bad page size encoding for this CPU */
+ }
/* Mask out the slot number as we store the entry */
slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
@@ -392,46 +417,37 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
return -1;
}
-static uint64_t ppc_hash64_page_shift(ppc_slb_t *slb)
-{
- uint64_t epnshift;
-
- /* Page size according to the SLB, which we use to generate the
- * EPN for hash table lookup.. When we implement more recent MMU
- * extensions this might be different from the actual page size
- * encoded in the PTE */
- if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_4K) {
- epnshift = TARGET_PAGE_BITS;
- } else if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_64K) {
- epnshift = TARGET_PAGE_BITS_64K;
- } else {
- epnshift = TARGET_PAGE_BITS_16M;
- }
- return epnshift;
-}
-
static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
ppc_slb_t *slb, target_ulong eaddr,
- ppc_hash_pte64_t *pte)
+ ppc_hash_pte64_t *pte,
+ const struct ppc_one_seg_page_size **psps)
{
CPUPPCState *env = &cpu->env;
hwaddr pte_offset;
hwaddr hash;
- uint64_t vsid, epnshift, epnmask, epn, ptem;
+ const struct ppc_one_seg_page_size *sps;
+ uint64_t vsid, epnmask, epn, ptem;
+
+ sps = slb_page_size(cpu, slb->vsid);
+ /* The SLB store path should prevent any bad page size encodings
+ * getting in there, so: */
+ assert(sps);
+ if (psps) {
+ *psps = sps;
+ }
- epnshift = ppc_hash64_page_shift(slb);
- epnmask = ~((1ULL << epnshift) - 1);
+ epnmask = ~((1ULL << sps->page_shift) - 1);
if (slb->vsid & SLB_VSID_B) {
/* 1TB segment */
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
- hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
+ hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
} else {
/* 256M segment */
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
- hash = vsid ^ (epn >> epnshift);
+ hash = vsid ^ (epn >> sps->page_shift);
}
ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
@@ -463,17 +479,12 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
return pte_offset;
}
-static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
- target_ulong eaddr)
+static hwaddr ppc_hash64_pte_raddr(PowerPCCPU *cpu, unsigned page_shift,
+ ppc_hash_pte64_t pte, target_ulong eaddr)
{
- hwaddr mask;
- int target_page_bits;
+ hwaddr mask = (1ULL << page_shift) - 1;
hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
- /*
- * We support 4K, 64K and 16M now
- */
- target_page_bits = ppc_hash64_page_shift(slb);
- mask = (1ULL << target_page_bits) - 1;
+
return (rpn & ~mask) | (eaddr & mask);
}
@@ -483,6 +494,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
+ const struct ppc_one_seg_page_size *sps;
hwaddr pte_offset;
ppc_hash_pte64_t pte;
int pp_prot, amr_prot, prot;
@@ -526,7 +538,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
}
/* 4. Locate the PTE in the hash table */
- pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
+ pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &sps);
if (pte_offset == -1) {
if (rwx == 2) {
cs->exception_index = POWERPC_EXCP_ISI;
@@ -598,7 +610,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
/* 7. Determine the real address from the PTE */
- raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
+ raddr = ppc_hash64_pte_raddr(cpu, sps->page_shift, pte, eaddr);
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
@@ -610,6 +622,7 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
{
CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
+ const struct ppc_one_seg_page_size *sps;
hwaddr pte_offset;
ppc_hash_pte64_t pte;
@@ -623,12 +636,13 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
return -1;
}
- pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
+ pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &sps);
if (pte_offset == -1) {
return -1;
}
- return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
+ return ppc_hash64_pte_raddr(cpu, sps->page_shift, pte, addr)
+ & TARGET_PAGE_MASK;
}
void ppc_hash64_store_hpte(PowerPCCPU *cpu,
--
2.5.0
next prev parent reply other threads:[~2016-01-15 7:03 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-01-15 7:04 [Qemu-devel] [RFC 0/9] Clean up page size handling for ppc 64-bit hash MMUs with TCG David Gibson
2016-01-15 7:04 ` [Qemu-devel] [RFC 1/9] target-ppc: Remove unused kvmppc_read_segment_page_sizes() stub David Gibson
2016-01-15 7:04 ` [Qemu-devel] [RFC 2/9] target-ppc: Convert mmu-hash{32, 64}.[ch] from CPUPPCState to PowerPCCPU David Gibson
2016-01-15 7:04 ` David Gibson [this message]
2016-01-15 7:04 ` [Qemu-devel] [RFC 4/9] target-ppc: Use actual page size encodings from HPTE David Gibson
2016-01-15 7:04 ` [Qemu-devel] [RFC 5/9] target-ppc: Remove unused mmu models from ppc_tlb_invalidate_one David Gibson
2016-01-15 7:04 ` [Qemu-devel] [RFC 6/9] target-ppc: Split 44x tlbiva from ppc_tlb_invalidate_one() David Gibson
2016-01-15 7:04 ` [Qemu-devel] [RFC 7/9] target-ppc: Add new TLB invalidate by HPTE call for hash64 MMUs David Gibson
2016-01-15 7:04 ` [Qemu-devel] [RFC 8/9] target-ppc: Helper to determine page size information from hpte alone David Gibson
2016-01-15 7:04 ` [Qemu-devel] [RFC 9/9] target-ppc: Allow more page sizes for POWER7 & POWER8 in TCG David Gibson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1452841480-13325-4-git-send-email-david@gibson.dropbear.id.au \
--to=david@gibson.dropbear.id.au \
--cc=agraf@suse.de \
--cc=aik@ozlabs.ru \
--cc=av1474@comtv.ru \
--cc=benh@kernel.crashing.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).