From: Harsh Prateek Bora <harshpb@linux.ibm.com>
To: Nicholas Piggin <npiggin@gmail.com>, qemu-ppc@nongnu.org
Cc: qemu-devel@nongnu.org, Harsh Prateek Bora <harshpb@linux.ibm.com>
Subject: Re: [RFC PATCH 3/4] spapr: load and store l2 state with helper functions
Date: Fri, 5 May 2023 16:33:50 +0530 [thread overview]
Message-ID: <1d0ce917-7ea0-35dd-b79f-5054ac81412b@linux.ibm.com> (raw)
In-Reply-To: <20230503003954.128188-4-npiggin@gmail.com>
<correcting my email in CC>
On 5/3/23 06:09, Nicholas Piggin wrote:
> Arguably this is just shuffling around register accesses, but one nice
> thing it does is allow the exit to save away the L2 state then switch
> the environment to the L1 before copying L2 data back to the L1, which
> logically flows more naturally and simplifies the error paths.
>
The supposed advantage you have mentioned is coming at the cost of
double copy (env -> l2_state, switch to L1, l2_state -> hvstate/ptregs),
but previously we were just doing a single copy that directly conveyed
it to L1 before switching to L1. Additional copy means additional delay
in transition of L1/L2. Not sure if it's worth it?
regards,
Harsh
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
> hw/ppc/spapr_hcall.c | 178 +++++++++++++++++++++----------------------
> 1 file changed, 85 insertions(+), 93 deletions(-)
>
> diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
> index 6679150ac7..783a06ba98 100644
> --- a/hw/ppc/spapr_hcall.c
> +++ b/hw/ppc/spapr_hcall.c
> @@ -1675,9 +1675,9 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
> target_ulong *args)
> {
> PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
> - CPUState *cs = CPU(cpu);
> CPUPPCState *env = &cpu->env;
> SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
> + struct nested_ppc_state l2_state;
> target_ulong hv_ptr = args[0];
> target_ulong regs_ptr = args[1];
> target_ulong hdec, now = cpu_ppc_load_tbl(env);
> @@ -1686,8 +1686,6 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
> struct kvmppc_hv_guest_state hv_state;
> struct kvmppc_pt_regs *regs;
> hwaddr len;
> - uint64_t cr;
> - int i;
>
> if (spapr->nested_ptcr == 0) {
> return H_NOT_AVAILABLE;
> @@ -1713,6 +1711,10 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
> return H_PARAMETER;
> }
>
> + if (hv_state.lpid == 0) {
> + return H_PARAMETER;
> + }
> +
> spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1);
> if (!spapr_cpu->nested_host_state) {
> return H_NO_MEM;
> @@ -1731,51 +1733,49 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
> return H_P2;
> }
>
> - len = sizeof(env->gpr);
> + len = sizeof(l2_state.gpr);
> assert(len == sizeof(regs->gpr));
> - memcpy(env->gpr, regs->gpr, len);
> + memcpy(l2_state.gpr, regs->gpr, len);
>
> - env->lr = regs->link;
> - env->ctr = regs->ctr;
> - cpu_write_xer(env, regs->xer);
> -
> - cr = regs->ccr;
> - for (i = 7; i >= 0; i--) {
> - env->crf[i] = cr & 15;
> - cr >>= 4;
> - }
> -
> - env->msr = regs->msr;
> - env->nip = regs->nip;
> + l2_state.lr = regs->link;
> + l2_state.ctr = regs->ctr;
> + l2_state.xer = regs->xer;
> + l2_state.cr = regs->ccr;
> + l2_state.msr = regs->msr;
> + l2_state.nip = regs->nip;
>
> address_space_unmap(CPU(cpu)->as, regs, len, len, false);
>
> - env->cfar = hv_state.cfar;
> -
> - assert(env->spr[SPR_LPIDR] == 0);
> - env->spr[SPR_LPIDR] = hv_state.lpid;
> + l2_state.cfar = hv_state.cfar;
> + l2_state.lpidr = hv_state.lpid;
>
> lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER;
> lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | (hv_state.lpcr & lpcr_mask);
> lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE;
> lpcr &= ~LPCR_LPES0;
> - env->spr[SPR_LPCR] = lpcr & pcc->lpcr_mask;
> + l2_state.lpcr = lpcr & pcc->lpcr_mask;
>
> - env->spr[SPR_PCR] = hv_state.pcr;
> + l2_state.pcr = hv_state.pcr;
> /* hv_state.amor is not used */
> - env->spr[SPR_DPDES] = hv_state.dpdes;
> - env->spr[SPR_HFSCR] = hv_state.hfscr;
> - hdec = hv_state.hdec_expiry - now;
> + l2_state.dpdes = hv_state.dpdes;
> + l2_state.hfscr = hv_state.hfscr;
> /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/
> - env->spr[SPR_SRR0] = hv_state.srr0;
> - env->spr[SPR_SRR1] = hv_state.srr1;
> - env->spr[SPR_SPRG0] = hv_state.sprg[0];
> - env->spr[SPR_SPRG1] = hv_state.sprg[1];
> - env->spr[SPR_SPRG2] = hv_state.sprg[2];
> - env->spr[SPR_SPRG3] = hv_state.sprg[3];
> - env->spr[SPR_BOOKS_PID] = hv_state.pidr;
> - env->spr[SPR_PPR] = hv_state.ppr;
> + l2_state.srr0 = hv_state.srr0;
> + l2_state.srr1 = hv_state.srr1;
> + l2_state.sprg0 = hv_state.sprg[0];
> + l2_state.sprg1 = hv_state.sprg[1];
> + l2_state.sprg2 = hv_state.sprg[2];
> + l2_state.sprg3 = hv_state.sprg[3];
> + l2_state.pidr = hv_state.pidr;
> + l2_state.ppr = hv_state.ppr;
> + l2_state.tb_offset = env->tb_env->tb_offset + hv_state.tb_offset;
>
> + /*
> + * Switch to the nested guest environment and start the "hdec" timer.
> + */
> + nested_load_state(cpu, &l2_state);
> +
> + hdec = hv_state.hdec_expiry - now;
> cpu_ppc_hdecr_init(env);
> cpu_ppc_store_hdecr(env, hdec);
>
> @@ -1791,14 +1791,8 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
> * and it's not obviously worth a new data structure to do it.
> */
>
> - env->tb_env->tb_offset += hv_state.tb_offset;
> spapr_cpu->in_nested = true;
>
> - hreg_compute_hflags(env);
> - ppc_maybe_interrupt(env);
> - tlb_flush(cs);
> - env->reserve_addr = -1; /* Reset the reservation */
> -
> /*
> * The spapr hcall helper sets env->gpr[3] to the return value, but at
> * this point the L1 is not returning from the hcall but rather we
> @@ -1812,51 +1806,69 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp)
> {
> CPUPPCState *env = &cpu->env;
> SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
> - target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */
> + struct nested_ppc_state l2_state;
> target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4];
> target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5];
> + target_ulong hsrr0, hsrr1, hdar, asdr, hdsisr;
> struct kvmppc_hv_guest_state *hvstate;
> struct kvmppc_pt_regs *regs;
> hwaddr len;
> - uint64_t cr;
> - int i;
>
> assert(spapr_cpu->in_nested);
>
> + nested_save_state(&l2_state, cpu);
> + hsrr0 = env->spr[SPR_HSRR0];
> + hsrr1 = env->spr[SPR_HSRR1];
> + hdar = env->spr[SPR_HDAR];
> + hdsisr = env->spr[SPR_HDSISR];
> + asdr = env->spr[SPR_ASDR];
> +
> + /*
> + * Switch back to the host environment (including for any error).
> + */
> + assert(env->spr[SPR_LPIDR] != 0);
> + nested_load_state(cpu, spapr_cpu->nested_host_state);
> + env->gpr[3] = env->excp_vectors[excp]; /* hcall return value */
> +
> cpu_ppc_hdecr_exit(env);
>
> + spapr_cpu->in_nested = false;
> +
> + g_free(spapr_cpu->nested_host_state);
> + spapr_cpu->nested_host_state = NULL;
> +
> len = sizeof(*hvstate);
> hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true,
> MEMTXATTRS_UNSPECIFIED);
> if (len != sizeof(*hvstate)) {
> address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true);
> - r3_return = H_PARAMETER;
> - goto out_restore_l1;
> + env->gpr[3] = H_PARAMETER;
> + return;
> }
>
> - hvstate->cfar = env->cfar;
> - hvstate->lpcr = env->spr[SPR_LPCR];
> - hvstate->pcr = env->spr[SPR_PCR];
> - hvstate->dpdes = env->spr[SPR_DPDES];
> - hvstate->hfscr = env->spr[SPR_HFSCR];
> + hvstate->cfar = l2_state.cfar;
> + hvstate->lpcr = l2_state.lpcr;
> + hvstate->pcr = l2_state.pcr;
> + hvstate->dpdes = l2_state.dpdes;
> + hvstate->hfscr = l2_state.hfscr;
>
> if (excp == POWERPC_EXCP_HDSI) {
> - hvstate->hdar = env->spr[SPR_HDAR];
> - hvstate->hdsisr = env->spr[SPR_HDSISR];
> - hvstate->asdr = env->spr[SPR_ASDR];
> + hvstate->hdar = hdar;
> + hvstate->hdsisr = hdsisr;
> + hvstate->asdr = asdr;
> } else if (excp == POWERPC_EXCP_HISI) {
> - hvstate->asdr = env->spr[SPR_ASDR];
> + hvstate->asdr = asdr;
> }
>
> /* HEIR should be implemented for HV mode and saved here. */
> - hvstate->srr0 = env->spr[SPR_SRR0];
> - hvstate->srr1 = env->spr[SPR_SRR1];
> - hvstate->sprg[0] = env->spr[SPR_SPRG0];
> - hvstate->sprg[1] = env->spr[SPR_SPRG1];
> - hvstate->sprg[2] = env->spr[SPR_SPRG2];
> - hvstate->sprg[3] = env->spr[SPR_SPRG3];
> - hvstate->pidr = env->spr[SPR_BOOKS_PID];
> - hvstate->ppr = env->spr[SPR_PPR];
> + hvstate->srr0 = l2_state.srr0;
> + hvstate->srr1 = l2_state.srr1;
> + hvstate->sprg[0] = l2_state.sprg0;
> + hvstate->sprg[1] = l2_state.sprg1;
> + hvstate->sprg[2] = l2_state.sprg2;
> + hvstate->sprg[3] = l2_state.sprg3;
> + hvstate->pidr = l2_state.pidr;
> + hvstate->ppr = l2_state.ppr;
>
> /* Is it okay to specify write length larger than actual data written? */
> address_space_unmap(CPU(cpu)->as, hvstate, len, len, true);
> @@ -1866,51 +1878,31 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp)
> MEMTXATTRS_UNSPECIFIED);
> if (!regs || len != sizeof(*regs)) {
> address_space_unmap(CPU(cpu)->as, regs, len, 0, true);
> - r3_return = H_P2;
> - goto out_restore_l1;
> + env->gpr[3] = H_P2;
> + return;
> }
>
> len = sizeof(env->gpr);
> assert(len == sizeof(regs->gpr));
> - memcpy(regs->gpr, env->gpr, len);
> + memcpy(regs->gpr, l2_state.gpr, len);
>
> - regs->link = env->lr;
> - regs->ctr = env->ctr;
> - regs->xer = cpu_read_xer(env);
> -
> - cr = 0;
> - for (i = 0; i < 8; i++) {
> - cr |= (env->crf[i] & 15) << (4 * (7 - i));
> - }
> - regs->ccr = cr;
> + regs->link = l2_state.lr;
> + regs->ctr = l2_state.ctr;
> + regs->xer = l2_state.xer;
> + regs->ccr = l2_state.cr;
>
> if (excp == POWERPC_EXCP_MCHECK ||
> excp == POWERPC_EXCP_RESET ||
> excp == POWERPC_EXCP_SYSCALL) {
> - regs->nip = env->spr[SPR_SRR0];
> - regs->msr = env->spr[SPR_SRR1] & env->msr_mask;
> + regs->nip = l2_state.srr0;
> + regs->msr = l2_state.srr1 & env->msr_mask;
> } else {
> - regs->nip = env->spr[SPR_HSRR0];
> - regs->msr = env->spr[SPR_HSRR1] & env->msr_mask;
> + regs->nip = hsrr0;
> + regs->msr = hsrr1 & env->msr_mask;
> }
>
> /* Is it okay to specify write length larger than actual data written? */
> address_space_unmap(CPU(cpu)->as, regs, len, len, true);
> -
> -out_restore_l1:
> - assert(env->spr[SPR_LPIDR] != 0);
> - nested_load_state(cpu, spapr_cpu->nested_host_state);
> -
> - /*
> - * Return the interrupt vector address from H_ENTER_NESTED to the L1
> - * (or error code).
> - */
> - env->gpr[3] = r3_return;
> -
> - spapr_cpu->in_nested = false;
> -
> - g_free(spapr_cpu->nested_host_state);
> - spapr_cpu->nested_host_state = NULL;
> }
>
> static void hypercall_register_nested(void)
next prev parent reply other threads:[~2023-05-05 11:04 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-03 0:39 [RFC PATCH 0/4] spapr: clean up nested hv Nicholas Piggin
2023-05-03 0:39 ` [RFC PATCH 1/4] spapr: H_ENTER_NESTED should restore host XER ca field Nicholas Piggin
2023-05-05 10:20 ` Harsh Prateek Bora
2023-05-03 0:39 ` [RFC PATCH 2/4] spapr: Add a nested state struct Nicholas Piggin
2023-05-05 10:54 ` Harsh Prateek Bora
2023-05-13 3:27 ` Nicholas Piggin
2023-05-03 0:39 ` [RFC PATCH 3/4] spapr: load and store l2 state with helper functions Nicholas Piggin
2023-05-05 11:03 ` Harsh Prateek Bora [this message]
2023-05-13 3:30 ` Nicholas Piggin
2023-05-03 0:39 ` [RFC PATCH 4/4] spapr: Move spapr nested HV to a new file Nicholas Piggin
2023-05-05 11:09 ` Harsh Prateek Bora
2023-05-13 3:32 ` Nicholas Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1d0ce917-7ea0-35dd-b79f-5054ac81412b@linux.ibm.com \
--to=harshpb@linux.ibm.com \
--cc=npiggin@gmail.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).