qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Harsh Prateek Bora <harshpb@linux.ibm.com>
To: Nicholas Piggin <npiggin@gmail.com>, qemu-ppc@nongnu.org
Cc: qemu-devel@nongnu.org, Harsh Prateek Bora <harshpb@linux.ibm.com>
Subject: Re: [RFC PATCH 2/4] spapr: Add a nested state struct
Date: Fri, 5 May 2023 16:24:10 +0530	[thread overview]
Message-ID: <516d30a6-a329-d361-feea-e616e936dd41@linux.ibm.com> (raw)
In-Reply-To: <20230503003954.128188-3-npiggin@gmail.com>

<correcting my email in CC>

On 5/3/23 06:09, Nicholas Piggin wrote:
> Rather than use a copy of CPUPPCState to store the host state while
> the environment has been switched to the L2, use a new struct for
> this purpose.
> 
> Have helper functions to save and load this host state.
> 
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
>   hw/ppc/spapr_hcall.c            | 164 ++++++++++++++++++++++++--------
>   include/hw/ppc/spapr_cpu_core.h |   5 +-
>   2 files changed, 129 insertions(+), 40 deletions(-)
> 
> diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
> index be225adaf6..6679150ac7 100644
> --- a/hw/ppc/spapr_hcall.c
> +++ b/hw/ppc/spapr_hcall.c
> @@ -1544,6 +1544,126 @@ static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu,
>       return H_FUNCTION;
>   }
>   
> +struct nested_ppc_state {
> +    uint64_t gpr[32];
> +    uint64_t lr;
> +    uint64_t ctr;
> +    uint64_t cfar;
> +    uint64_t msr;
> +    uint64_t nip;
> +    uint32_t cr;
> +
> +    uint64_t xer;
> +
> +    uint64_t lpcr;
> +    uint64_t lpidr;
> +    uint64_t pidr;
> +    uint64_t pcr;
> +    uint64_t dpdes;
> +    uint64_t hfscr;
> +    uint64_t srr0;
> +    uint64_t srr1;
> +    uint64_t sprg0;
> +    uint64_t sprg1;
> +    uint64_t sprg2;
> +    uint64_t sprg3;
> +    uint64_t ppr;
> +
> +    int64_t tb_offset;
> +};
> +
> +static void nested_save_state(struct nested_ppc_state *save, PowerPCCPU *cpu)
> +{
> +    CPUPPCState *env = &cpu->env;
> +    uint32_t cr;
> +    int i;
> +
> +    memcpy(save->gpr, env->gpr, sizeof(save->gpr));
> +
> +    save->lr = env->lr;
> +    save->ctr = env->ctr;
> +    save->cfar = env->cfar;
> +    save->msr = env->msr;
> +    save->nip = env->nip;
> +
> +    cr = 0;
> +    for (i = 0; i < 8; i++) {
> +        cr |= (env->crf[i] & 15) << (4 * (7 - i));
> +    }
> +    save->cr = cr;
> +
> +    save->xer = cpu_read_xer(env);
> +
> +    save->lpcr = env->spr[SPR_LPCR];
> +    save->lpidr = env->spr[SPR_LPIDR];
> +    save->pcr = env->spr[SPR_PCR];
> +    save->dpdes = env->spr[SPR_DPDES];
> +    save->hfscr = env->spr[SPR_HFSCR];
> +    save->srr0 = env->spr[SPR_SRR0];
> +    save->srr1 = env->spr[SPR_SRR1];
> +    save->sprg0 = env->spr[SPR_SPRG0];
> +    save->sprg1 = env->spr[SPR_SPRG1];
> +    save->sprg2 = env->spr[SPR_SPRG2];
> +    save->sprg3 = env->spr[SPR_SPRG3];
> +    save->pidr = env->spr[SPR_BOOKS_PID];
> +    save->ppr = env->spr[SPR_PPR];
> +
> +    save->tb_offset = env->tb_env->tb_offset;
> +}
> +
> +static void nested_load_state(PowerPCCPU *cpu, struct nested_ppc_state *load)
> +{
> +    CPUState *cs = CPU(cpu);
> +    CPUPPCState *env = &cpu->env;
> +    uint32_t cr;
> +    int i;
> +
> +    memcpy(env->gpr, load->gpr, sizeof(env->gpr));
> +
> +    env->lr = load->lr;
> +    env->ctr = load->ctr;
> +    env->cfar = load->cfar;
> +    env->msr = load->msr;
> +    env->nip = load->nip;
> +
> +    cr = load->cr;
> +    for (i = 7; i >= 0; i--) {
> +        env->crf[i] = cr & 15;
> +        cr >>= 4;
> +    }
> +
> +    cpu_write_xer(env, load->xer);
> +
> +    env->spr[SPR_LPCR] = load->lpcr;
> +    env->spr[SPR_LPIDR] = load->lpidr;
> +    env->spr[SPR_PCR] = load->pcr;
> +    env->spr[SPR_DPDES] = load->dpdes;
> +    env->spr[SPR_HFSCR] = load->hfscr;
> +    env->spr[SPR_SRR0] = load->srr0;
> +    env->spr[SPR_SRR1] = load->srr1;
> +    env->spr[SPR_SPRG0] = load->sprg0;
> +    env->spr[SPR_SPRG1] = load->sprg1;
> +    env->spr[SPR_SPRG2] = load->sprg2;
> +    env->spr[SPR_SPRG3] = load->sprg3;
> +    env->spr[SPR_BOOKS_PID] = load->pidr;
> +    env->spr[SPR_PPR] = load->ppr;
> +
> +    env->tb_env->tb_offset = load->tb_offset;
> +
> +    /*
> +     * MSR updated, compute hflags and possible interrupts.
> +     */
> +    hreg_compute_hflags(env);
> +    ppc_maybe_interrupt(env);
> +
> +    /*
> +     * Nested HV does not tag TLB entries between L1 and L2, so must
> +     * flush on transition.
> +     */
> +    tlb_flush(cs);
> +    env->reserve_addr = -1; /* Reset the reservation */
> +}
> +
>   /*
>    * When this handler returns, the environment is switched to the L2 guest
>    * and TCG begins running that. spapr_exit_nested() performs the switch from
> @@ -1593,12 +1713,14 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
>           return H_PARAMETER;
>       }
>   
> -    spapr_cpu->nested_host_state = g_try_new(CPUPPCState, 1);
> +    spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1);
>       if (!spapr_cpu->nested_host_state) {
>           return H_NO_MEM;
>       }
>   
> -    memcpy(spapr_cpu->nested_host_state, env, sizeof(CPUPPCState));
> +    assert(env->spr[SPR_LPIDR] == 0);
> +    assert(env->spr[SPR_DPDES] == 0);
> +    nested_save_state(spapr_cpu->nested_host_state, cpu);
>   
Ideally, we may want to save entire env for L1 host, while switching to 
L2 rather than keeping a subset of it for 2 reasons:
  - keeping enitre L1 env ensures it remains untouched by L2 during L2 
execution (shouldnt allow L2 to modify remaining L1 env bits unexpectedly)
  - I see some of the registers are retained only for L1 (so, ca, ov32, 
ca32, etc) but not for L2 (got missed in nested_load_state helper in 
this patch), are they not really needed anymore? Previous patch 
introduced one of them though.

regards,
Harsh
>       len = sizeof(*regs);
>       regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, false,
> @@ -1644,7 +1766,6 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
>       env->spr[SPR_DPDES] = hv_state.dpdes;
>       env->spr[SPR_HFSCR] = hv_state.hfscr;
>       hdec = hv_state.hdec_expiry - now;
> -    spapr_cpu->nested_tb_offset = hv_state.tb_offset;
>       /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/
>       env->spr[SPR_SRR0] = hv_state.srr0;
>       env->spr[SPR_SRR1] = hv_state.srr1;
> @@ -1670,7 +1791,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
>        * and it's not obviously worth a new data structure to do it.
>        */
>   
> -    env->tb_env->tb_offset += spapr_cpu->nested_tb_offset;
> +    env->tb_env->tb_offset += hv_state.tb_offset;
>       spapr_cpu->in_nested = true;
>   
>       hreg_compute_hflags(env);
> @@ -1689,7 +1810,6 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
>   
>   void spapr_exit_nested(PowerPCCPU *cpu, int excp)
>   {
> -    CPUState *cs = CPU(cpu);
>       CPUPPCState *env = &cpu->env;
>       SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
>       target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */
> @@ -1778,34 +1898,8 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp)
>       address_space_unmap(CPU(cpu)->as, regs, len, len, true);
>   
>   out_restore_l1:
> -    memcpy(env->gpr, spapr_cpu->nested_host_state->gpr, sizeof(env->gpr));
> -    env->lr = spapr_cpu->nested_host_state->lr;
> -    env->ctr = spapr_cpu->nested_host_state->ctr;
> -    memcpy(env->crf, spapr_cpu->nested_host_state->crf, sizeof(env->crf));
> -    env->cfar = spapr_cpu->nested_host_state->cfar;
> -    env->xer = spapr_cpu->nested_host_state->xer;
> -    env->so = spapr_cpu->nested_host_state->so;
> -    env->ca = spapr_cpu->nested_host_state->ca;
> -    env->ov = spapr_cpu->nested_host_state->ov;
> -    env->ov32 = spapr_cpu->nested_host_state->ov32;
> -    env->ca32 = spapr_cpu->nested_host_state->ca32;
> -    env->msr = spapr_cpu->nested_host_state->msr;
> -    env->nip = spapr_cpu->nested_host_state->nip;
> -
>       assert(env->spr[SPR_LPIDR] != 0);
> -    env->spr[SPR_LPCR] = spapr_cpu->nested_host_state->spr[SPR_LPCR];
> -    env->spr[SPR_LPIDR] = spapr_cpu->nested_host_state->spr[SPR_LPIDR];
> -    env->spr[SPR_PCR] = spapr_cpu->nested_host_state->spr[SPR_PCR];
> -    env->spr[SPR_DPDES] = 0;
> -    env->spr[SPR_HFSCR] = spapr_cpu->nested_host_state->spr[SPR_HFSCR];
> -    env->spr[SPR_SRR0] = spapr_cpu->nested_host_state->spr[SPR_SRR0];
> -    env->spr[SPR_SRR1] = spapr_cpu->nested_host_state->spr[SPR_SRR1];
> -    env->spr[SPR_SPRG0] = spapr_cpu->nested_host_state->spr[SPR_SPRG0];
> -    env->spr[SPR_SPRG1] = spapr_cpu->nested_host_state->spr[SPR_SPRG1];
> -    env->spr[SPR_SPRG2] = spapr_cpu->nested_host_state->spr[SPR_SPRG2];
> -    env->spr[SPR_SPRG3] = spapr_cpu->nested_host_state->spr[SPR_SPRG3];
> -    env->spr[SPR_BOOKS_PID] = spapr_cpu->nested_host_state->spr[SPR_BOOKS_PID];
> -    env->spr[SPR_PPR] = spapr_cpu->nested_host_state->spr[SPR_PPR];
> +    nested_load_state(cpu, spapr_cpu->nested_host_state);
>   
>       /*
>        * Return the interrupt vector address from H_ENTER_NESTED to the L1
> @@ -1813,14 +1907,8 @@ out_restore_l1:
>        */
>       env->gpr[3] = r3_return;
>   
> -    env->tb_env->tb_offset -= spapr_cpu->nested_tb_offset;
>       spapr_cpu->in_nested = false;
>   
> -    hreg_compute_hflags(env);
> -    ppc_maybe_interrupt(env);
> -    tlb_flush(cs);
> -    env->reserve_addr = -1; /* Reset the reservation */
> -
>       g_free(spapr_cpu->nested_host_state);
>       spapr_cpu->nested_host_state = NULL;
>   }
> diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
> index b560514560..69a52e39b8 100644
> --- a/include/hw/ppc/spapr_cpu_core.h
> +++ b/include/hw/ppc/spapr_cpu_core.h
> @@ -41,6 +41,8 @@ void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip,
>                                  target_ulong r1, target_ulong r3,
>                                  target_ulong r4);
>   
> +struct nested_ppc_state;
> +
>   typedef struct SpaprCpuState {
>       uint64_t vpa_addr;
>       uint64_t slb_shadow_addr, slb_shadow_size;
> @@ -51,8 +53,7 @@ typedef struct SpaprCpuState {
>   
>       /* Fields for nested-HV support */
>       bool in_nested; /* true while the L2 is executing */
> -    CPUPPCState *nested_host_state; /* holds the L1 state while L2 executes */
> -    int64_t nested_tb_offset; /* L1->L2 TB offset */
> +    struct nested_ppc_state *nested_host_state; /* holds the L1 state while L2 executes */
>   } SpaprCpuState;
>   
>   static inline SpaprCpuState *spapr_cpu_state(PowerPCCPU *cpu)


  reply	other threads:[~2023-05-05 10:54 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-03  0:39 [RFC PATCH 0/4] spapr: clean up nested hv Nicholas Piggin
2023-05-03  0:39 ` [RFC PATCH 1/4] spapr: H_ENTER_NESTED should restore host XER ca field Nicholas Piggin
2023-05-05 10:20   ` Harsh Prateek Bora
2023-05-03  0:39 ` [RFC PATCH 2/4] spapr: Add a nested state struct Nicholas Piggin
2023-05-05 10:54   ` Harsh Prateek Bora [this message]
2023-05-13  3:27     ` Nicholas Piggin
2023-05-03  0:39 ` [RFC PATCH 3/4] spapr: load and store l2 state with helper functions Nicholas Piggin
2023-05-05 11:03   ` Harsh Prateek Bora
2023-05-13  3:30     ` Nicholas Piggin
2023-05-03  0:39 ` [RFC PATCH 4/4] spapr: Move spapr nested HV to a new file Nicholas Piggin
2023-05-05 11:09   ` Harsh Prateek Bora
2023-05-13  3:32     ` Nicholas Piggin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=516d30a6-a329-d361-feea-e616e936dd41@linux.ibm.com \
    --to=harshpb@linux.ibm.com \
    --cc=npiggin@gmail.com \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-ppc@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).