From: "Cédric Le Goater" <clg@kaod.org>
To: David Gibson <david@gibson.dropbear.id.au>
Cc: "Cédric Le Goater" <clg@kaod.org>,
qemu-ppc@nongnu.org, "Greg Kurz" <groug@kaod.org>,
"Suraj Jitindar Singh" <sjitindarsingh@gmail.com>,
qemu-devel@nongnu.org
Subject: [PATCH v2 1/4] target/ppc: Introduce ppc_radix64_xlate() for Radix tree translation
Date: Wed, 1 Apr 2020 18:28:07 +0200 [thread overview]
Message-ID: <20200401162810.16254-2-clg@kaod.org> (raw)
In-Reply-To: <20200401162810.16254-1-clg@kaod.org>
This is moving code under a new ppc_radix64_xlate() routine shared by
the MMU Radix page fault handler and the 'get_phys_page_debug' PPC
callback. The difference being that 'get_phys_page_debug' does not
generate exceptions.
The specific part of process-scoped Radix translation is moved under
ppc_radix64_process_scoped_xlate() in preparation of the future support
for partition-scoped Radix translation. Routines raising the exceptions
now take a 'cause_excp' bool to cover the 'get_phys_page_debug' case.
It should be functionally equivalent.
Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
---
target/ppc/mmu-radix64.c | 223 ++++++++++++++++++++++-----------------
1 file changed, 125 insertions(+), 98 deletions(-)
diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c
index d2422d1c54c9..410376fbeb65 100644
--- a/target/ppc/mmu-radix64.c
+++ b/target/ppc/mmu-radix64.c
@@ -69,11 +69,16 @@ static bool ppc_radix64_get_fully_qualified_addr(CPUPPCState *env, vaddr eaddr,
return true;
}
-static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr)
+static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr,
+ bool cause_excp)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ if (!cause_excp) {
+ return;
+ }
+
if (rwx == 2) { /* Instruction Segment Interrupt */
cs->exception_index = POWERPC_EXCP_ISEG;
} else { /* Data Segment Interrupt */
@@ -84,11 +89,15 @@ static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr)
}
static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr,
- uint32_t cause)
+ uint32_t cause, bool cause_excp)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ if (!cause_excp) {
+ return;
+ }
+
if (rwx == 2) { /* Instruction Storage Interrupt */
cs->exception_index = POWERPC_EXCP_ISI;
env->error_code = cause;
@@ -219,17 +228,118 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
return true;
}
+static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
+ vaddr eaddr, uint64_t pid,
+ ppc_v3_pate_t pate, hwaddr *g_raddr,
+ int *g_prot, int *g_page_size,
+ bool cause_excp)
+{
+ CPUState *cs = CPU(cpu);
+ uint64_t offset, size, prtbe_addr, prtbe0, pte;
+ int fault_cause = 0;
+ hwaddr pte_addr;
+
+ /* Index Process Table by PID to Find Corresponding Process Table Entry */
+ offset = pid * sizeof(struct prtb_entry);
+ size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
+ if (offset >= size) {
+ /* offset exceeds size of the process table */
+ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE, cause_excp);
+ return 1;
+ }
+ prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
+ prtbe0 = ldq_phys(cs->as, prtbe_addr);
+
+ /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
+ *g_page_size = PRTBE_R_GET_RTS(prtbe0);
+ pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
+ prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
+ g_raddr, g_page_size, &fault_cause, &pte_addr);
+
+ if (!(pte & R_PTE_VALID) ||
+ ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot)) {
+ /* No valid pte or access denied due to protection */
+ ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause, cause_excp);
+ return 1;
+ }
+
+ ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot);
+
+ return 0;
+}
+
+static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
+ bool relocation,
+ hwaddr *raddr, int *psizep, int *protp,
+ bool cause_excp)
+{
+ uint64_t lpid = 0, pid = 0;
+ ppc_v3_pate_t pate;
+ int psize, prot;
+ hwaddr g_raddr;
+
+ /* Virtual Mode Access - get the fully qualified address */
+ if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
+ ppc_radix64_raise_segi(cpu, rwx, eaddr, cause_excp);
+ return 1;
+ }
+
+ /* Get Process Table */
+ if (cpu->vhyp) {
+ PPCVirtualHypervisorClass *vhc;
+ vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
+ vhc->get_pate(cpu->vhyp, &pate);
+ } else {
+ if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
+ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE, cause_excp);
+ return 1;
+ }
+ if (!validate_pate(cpu, lpid, &pate)) {
+ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG,
+ cause_excp);
+ return 1;
+ }
+ /* We don't support guest mode yet */
+ if (lpid != 0) {
+ error_report("PowerNV guest support Unimplemented");
+ exit(1);
+ }
+ }
+
+ *psizep = INT_MAX;
+ *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ /*
+ * Perform process-scoped translation if relocation enabled.
+ *
+ * - Translates an effective address to a host real address in
+ * quadrants 0 and 3 when HV=1.
+ */
+ if (relocation) {
+ int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, pid,
+ pate, &g_raddr, &prot,
+ &psize, cause_excp);
+ if (ret) {
+ return ret;
+ }
+ *psizep = MIN(*psizep, psize);
+ *protp &= prot;
+ } else {
+ g_raddr = eaddr & R_EADDR_MASK;
+ }
+
+ *raddr = g_raddr;
+ return 0;
+}
+
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
int mmu_idx)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- PPCVirtualHypervisorClass *vhc;
- hwaddr raddr, pte_addr;
- uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte;
- int page_size, prot, fault_cause = 0;
- ppc_v3_pate_t pate;
+ int page_size, prot;
bool relocation;
+ hwaddr raddr;
assert(!(msr_hv && cpu->vhyp));
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
@@ -262,55 +372,12 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
}
- /* Virtual Mode Access - get the fully qualified address */
- if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
- ppc_radix64_raise_segi(cpu, rwx, eaddr);
- return 1;
- }
-
- /* Get Process Table */
- if (cpu->vhyp) {
- vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
- vhc->get_pate(cpu->vhyp, &pate);
- } else {
- if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
- ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
- return 1;
- }
- if (!validate_pate(cpu, lpid, &pate)) {
- ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG);
- }
- /* We don't support guest mode yet */
- if (lpid != 0) {
- error_report("PowerNV guest support Unimplemented");
- exit(1);
- }
- }
-
- /* Index Process Table by PID to Find Corresponding Process Table Entry */
- offset = pid * sizeof(struct prtb_entry);
- size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
- if (offset >= size) {
- /* offset exceeds size of the process table */
- ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
- return 1;
- }
- prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset);
-
- /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
- page_size = PRTBE_R_GET_RTS(prtbe0);
- pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
- prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
- &raddr, &page_size, &fault_cause, &pte_addr);
- if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) {
- /* Couldn't get pte or access denied due to protection */
- ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
+ /* Translate eaddr to raddr (where raddr is addr qemu needs for access) */
+ if (ppc_radix64_xlate(cpu, eaddr, rwx, relocation, &raddr,
+ &page_size, &prot, 1)) {
return 1;
}
- /* Update Reference and Change Bits */
- ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot);
-
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
prot, mmu_idx, 1UL << page_size);
return 0;
@@ -318,58 +385,18 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
{
- CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- PPCVirtualHypervisorClass *vhc;
- hwaddr raddr, pte_addr;
- uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte;
- int page_size, fault_cause = 0;
- ppc_v3_pate_t pate;
+ int psize, prot;
+ hwaddr raddr;
/* Handle Real Mode */
- if (msr_dr == 0) {
+ if ((msr_dr == 0) && (msr_hv || cpu->vhyp)) {
/* In real mode top 4 effective addr bits (mostly) ignored */
return eaddr & 0x0FFFFFFFFFFFFFFFULL;
}
- /* Virtual Mode Access - get the fully qualified address */
- if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
- return -1;
- }
-
- /* Get Process Table */
- if (cpu->vhyp) {
- vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
- vhc->get_pate(cpu->vhyp, &pate);
- } else {
- if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
- return -1;
- }
- if (!validate_pate(cpu, lpid, &pate)) {
- return -1;
- }
- /* We don't support guest mode yet */
- if (lpid != 0) {
- error_report("PowerNV guest support Unimplemented");
- exit(1);
- }
- }
-
- /* Index Process Table by PID to Find Corresponding Process Table Entry */
- offset = pid * sizeof(struct prtb_entry);
- size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
- if (offset >= size) {
- /* offset exceeds size of the process table */
- return -1;
- }
- prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset);
-
- /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
- page_size = PRTBE_R_GET_RTS(prtbe0);
- pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
- prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
- &raddr, &page_size, &fault_cause, &pte_addr);
- if (!pte) {
+ if (ppc_radix64_xlate(cpu, eaddr, 0, msr_dr, &raddr, &psize,
+ &prot, 0)) {
return -1;
}
--
2.21.1
next prev parent reply other threads:[~2020-04-01 16:29 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-01 16:28 [PATCH v2 0/4] target/ppc: Add support for Radix partition-scoped translation Cédric Le Goater
2020-04-01 16:28 ` Cédric Le Goater [this message]
2020-04-02 1:59 ` [PATCH v2 1/4] target/ppc: Introduce ppc_radix64_xlate() for Radix tree translation David Gibson
2020-04-02 6:40 ` Cédric Le Goater
2020-04-01 16:28 ` [PATCH v2 2/4] target/ppc: Extend ppc_radix64_check_prot() with a 'partition_scoped' bool Cédric Le Goater
2020-04-01 16:28 ` [PATCH v2 3/4] target/ppc: Rework ppc_radix64_walk_tree() for partition-scoped translation Cédric Le Goater
2020-04-01 16:28 ` [PATCH v2 4/4] target/ppc: Add support for Radix " Cédric Le Goater
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200401162810.16254-2-clg@kaod.org \
--to=clg@kaod.org \
--cc=david@gibson.dropbear.id.au \
--cc=groug@kaod.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
--cc=sjitindarsingh@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).