From: Greg Kurz <groug@kaod.org>
To: David Gibson <david@gibson.dropbear.id.au>
Cc: lvivier@redhat.com, Thomas Huth <thuth@redhat.com>,
Xiao Guangrong <xiaoguangrong.eric@gmail.com>,
farosas@linux.ibm.com, aik@ozlabs.ru,
"Michael S. Tsirkin" <mst@redhat.com>,
Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>,
qemu-devel@nongnu.org, Paolo Bonzini <pbonzini@redhat.com>,
qemu-ppc@nongnu.org, clg@kaod.org,
Igor Mammedov <imammedo@redhat.com>,
"Edgar E. Iglesias" <edgar.iglesias@gmail.com>,
paulus@samba.org
Subject: Re: [PATCH v7 11/17] target/ppc: Don't store VRMA SLBE persistently
Date: Tue, 3 Mar 2020 10:37:07 +0100 [thread overview]
Message-ID: <20200303103707.2856b74e@bahia.home> (raw)
In-Reply-To: <20200303034351.333043-12-david@gibson.dropbear.id.au>
On Tue, 3 Mar 2020 14:43:45 +1100
David Gibson <david@gibson.dropbear.id.au> wrote:
> Currently, we construct the SLBE used for VRMA translations when the LPCR
> is written (which controls some bits in the SLBE), then use it later for
> translations.
>
> This is a bit complex and confusing - simplify it by simply constructing
> the SLBE directly from the LPCR when we need it.
>
> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
> ---
Reviewed-by: Greg Kurz <groug@kaod.org>
> target/ppc/cpu.h | 3 --
> target/ppc/mmu-hash64.c | 92 ++++++++++++++++-------------------------
> 2 files changed, 35 insertions(+), 60 deletions(-)
>
> diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
> index f9871b1233..5a55fb02bd 100644
> --- a/target/ppc/cpu.h
> +++ b/target/ppc/cpu.h
> @@ -1044,9 +1044,6 @@ struct CPUPPCState {
> uint32_t flags;
> uint64_t insns_flags;
> uint64_t insns_flags2;
> -#if defined(TARGET_PPC64)
> - ppc_slb_t vrma_slb;
> -#endif
>
> int error_code;
> uint32_t pending_interrupts;
> diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
> index 4fd7b7ee74..34f6009b1e 100644
> --- a/target/ppc/mmu-hash64.c
> +++ b/target/ppc/mmu-hash64.c
> @@ -784,11 +784,41 @@ static target_ulong rmls_limit(PowerPCCPU *cpu)
> return rma_sizes[rmls];
> }
>
> +static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
> +{
> + CPUPPCState *env = &cpu->env;
> + target_ulong lpcr = env->spr[SPR_LPCR];
> + uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
> + target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
> + int i;
> +
> + for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> + const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
> +
> + if (!sps->page_shift) {
> + break;
> + }
> +
> + if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
> + slb->esid = SLB_ESID_V;
> + slb->vsid = vsid;
> + slb->sps = sps;
> + return 0;
> + }
> + }
> +
> + error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
> + TARGET_FMT_lx"\n", lpcr);
> +
> + return -1;
> +}
> +
> int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
> int rwx, int mmu_idx)
> {
> CPUState *cs = CPU(cpu);
> CPUPPCState *env = &cpu->env;
> + ppc_slb_t vrma_slbe;
> ppc_slb_t *slb;
> unsigned apshift;
> hwaddr ptex;
> @@ -827,8 +857,8 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
> }
> } else if (ppc_hash64_use_vrma(env)) {
> /* Emulated VRMA mode */
> - slb = &env->vrma_slb;
> - if (!slb->sps) {
> + slb = &vrma_slbe;
> + if (build_vrma_slbe(cpu, slb) != 0) {
> /* Invalid VRMA setup, machine check */
> cs->exception_index = POWERPC_EXCP_MCHECK;
> env->error_code = 0;
> @@ -976,6 +1006,7 @@ skip_slb_search:
> hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
> {
> CPUPPCState *env = &cpu->env;
> + ppc_slb_t vrma_slbe;
> ppc_slb_t *slb;
> hwaddr ptex, raddr;
> ppc_hash_pte64_t pte;
> @@ -997,8 +1028,8 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
> return raddr | env->spr[SPR_HRMOR];
> } else if (ppc_hash64_use_vrma(env)) {
> /* Emulated VRMA mode */
> - slb = &env->vrma_slb;
> - if (!slb->sps) {
> + slb = &vrma_slbe;
> + if (build_vrma_slbe(cpu, slb) != 0) {
> return -1;
> }
> } else {
> @@ -1037,65 +1068,12 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
> cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
> }
>
> -static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
> -{
> - CPUPPCState *env = &cpu->env;
> - const PPCHash64SegmentPageSizes *sps = NULL;
> - target_ulong esid, vsid, lpcr;
> - ppc_slb_t *slb = &env->vrma_slb;
> - uint32_t vrmasd;
> - int i;
> -
> - /* First clear it */
> - slb->esid = slb->vsid = 0;
> - slb->sps = NULL;
> -
> - /* Is VRMA enabled ? */
> - if (!ppc_hash64_use_vrma(env)) {
> - return;
> - }
> -
> - /*
> - * Make one up. Mostly ignore the ESID which will not be needed
> - * for translation
> - */
> - lpcr = env->spr[SPR_LPCR];
> - vsid = SLB_VSID_VRMA;
> - vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
> - vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
> - esid = SLB_ESID_V;
> -
> - for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
> - const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
> -
> - if (!sps1->page_shift) {
> - break;
> - }
> -
> - if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
> - sps = sps1;
> - break;
> - }
> - }
> -
> - if (!sps) {
> - error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
> - " vsid 0x"TARGET_FMT_lx, esid, vsid);
> - return;
> - }
> -
> - slb->vsid = vsid;
> - slb->esid = esid;
> - slb->sps = sps;
> -}
> -
> void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
> {
> PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
> CPUPPCState *env = &cpu->env;
>
> env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
> - ppc_hash64_update_vrma(cpu);
> }
>
> void helper_store_lpcr(CPUPPCState *env, target_ulong val)
next prev parent reply other threads:[~2020-03-03 9:38 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-03 3:43 [PATCH v7 00/17] target/ppc: Correct some errors with real mode handling David Gibson
2020-03-03 3:43 ` [PATCH v7 01/17] ppc: Remove stub support for 32-bit hypervisor mode David Gibson
2020-03-05 8:58 ` Philippe Mathieu-Daudé
2020-03-03 3:43 ` [PATCH v7 02/17] ppc: Remove stub of PPC970 HID4 implementation David Gibson
2020-03-03 3:43 ` [PATCH v7 03/17] target/ppc: Correct handling of real mode accesses with vhyp on hash MMU David Gibson
2020-03-03 3:43 ` [PATCH v7 04/17] target/ppc: Introduce ppc_hash64_use_vrma() helper David Gibson
2020-03-05 8:59 ` Philippe Mathieu-Daudé
2020-03-03 3:43 ` [PATCH v7 05/17] spapr, ppc: Remove VPM0/RMLS hacks for POWER9 David Gibson
2020-03-03 3:43 ` [PATCH v7 06/17] target/ppc: Remove RMOR register from POWER9 & POWER10 David Gibson
2020-03-03 3:43 ` [PATCH v7 07/17] target/ppc: Use class fields to simplify LPCR masking David Gibson
2020-03-05 8:56 ` Philippe Mathieu-Daudé
2020-03-10 10:06 ` Cédric Le Goater
2020-03-11 3:15 ` David Gibson
2020-03-03 3:43 ` [PATCH v7 08/17] target/ppc: Streamline calculation of RMA limit from LPCR[RMLS] David Gibson
2020-03-03 7:52 ` Greg Kurz
2020-03-05 8:53 ` Philippe Mathieu-Daudé
2020-03-03 3:43 ` [PATCH v7 09/17] target/ppc: Correct RMLS table David Gibson
2020-03-03 3:43 ` [PATCH v7 10/17] target/ppc: Only calculate RMLS derived RMA limit on demand David Gibson
2020-03-03 8:57 ` Greg Kurz
2020-03-05 8:48 ` Philippe Mathieu-Daudé
2020-03-03 3:43 ` [PATCH v7 11/17] target/ppc: Don't store VRMA SLBE persistently David Gibson
2020-03-03 9:37 ` Greg Kurz [this message]
2020-03-05 9:17 ` Philippe Mathieu-Daudé
2020-03-05 9:47 ` Greg Kurz
2020-03-05 10:35 ` Philippe Mathieu-Daudé
2020-03-03 3:43 ` [PATCH v7 12/17] spapr: Don't use weird units for MIN_RMA_SLOF David Gibson
2020-03-05 8:39 ` Philippe Mathieu-Daudé
2020-03-03 3:43 ` [PATCH v7 13/17] spapr,ppc: Simplify signature of kvmppc_rma_size() David Gibson
2020-03-05 8:47 ` Philippe Mathieu-Daudé
2020-03-03 3:43 ` [PATCH v7 14/17] spapr: Don't attempt to clamp RMA to VRMA constraint David Gibson
2020-03-03 9:55 ` Greg Kurz
2020-03-03 3:43 ` [PATCH v7 15/17] spapr: Don't clamp RMA to 16GiB on new machine types David Gibson
2020-03-03 10:02 ` Greg Kurz
2020-03-05 8:45 ` Philippe Mathieu-Daudé
2020-03-03 3:43 ` [PATCH v7 16/17] spapr: Clean up RMA size calculation David Gibson
2020-03-03 10:18 ` Greg Kurz
2020-03-05 8:43 ` Philippe Mathieu-Daudé
2020-03-03 3:43 ` [PATCH v7 17/17] spapr: Fold spapr_node0_size() into its only caller David Gibson
2020-03-03 10:32 ` Greg Kurz
2020-03-04 1:25 ` David Gibson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200303103707.2856b74e@bahia.home \
--to=groug@kaod.org \
--cc=aik@ozlabs.ru \
--cc=clg@kaod.org \
--cc=david@gibson.dropbear.id.au \
--cc=edgar.iglesias@gmail.com \
--cc=farosas@linux.ibm.com \
--cc=imammedo@redhat.com \
--cc=lvivier@redhat.com \
--cc=mark.cave-ayland@ilande.co.uk \
--cc=mst@redhat.com \
--cc=paulus@samba.org \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
--cc=thuth@redhat.com \
--cc=xiaoguangrong.eric@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).