From: Harsh Prateek Bora <harshpb@linux.ibm.com>
To: qemu-ppc@nongnu.org
Cc: qemu-devel@nongnu.org, farosas@suse.de, npiggin@gmail.com,
danielhb413@gmail.com
Subject: [PATCH v2 3/4] ppc: spapr: cleanup spapr_exit_nested() with helper routines.
Date: Mon, 24 Apr 2023 20:17:11 +0530 [thread overview]
Message-ID: <20230424144712.1985425-4-harshpb@linux.ibm.com> (raw)
In-Reply-To: <20230424144712.1985425-1-harshpb@linux.ibm.com>
Currently, in spapr_exit_nested(), it does a lot of register state
restoring from ptregs/hvstate after mapping each of those before
restoring the L1 host state. This patch breaks down those set of ops
to respective helper routines for better code readability/maintenance.
Signed-off-by: Harsh Prateek Bora <harshpb@linux.ibm.com>
---
hw/ppc/spapr_hcall.c | 120 ++++++++++++++++++++++++++-----------------
1 file changed, 72 insertions(+), 48 deletions(-)
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index f24d4b368e..e69634bc22 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -1719,45 +1719,14 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
return env->gpr[3];
}
-void spapr_exit_nested(PowerPCCPU *cpu, int excp)
+static void restore_hvstate_from_env(struct kvmppc_hv_guest_state *hvstate,
+ CPUPPCState *env, int excp)
{
- CPUState *cs = CPU(cpu);
- CPUPPCState *env = &cpu->env;
- SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
- target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */
- target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4];
- target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5];
- struct kvmppc_hv_guest_state *hvstate;
- struct kvmppc_pt_regs *regs;
- hwaddr len;
-
- assert(spapr_cpu->in_nested);
-
- cpu_ppc_hdecr_exit(env);
-
- len = sizeof(*hvstate);
- hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true,
- MEMTXATTRS_UNSPECIFIED);
- if (len != sizeof(*hvstate)) {
- address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true);
- r3_return = H_PARAMETER;
- goto out_restore_l1;
- }
-
hvstate->cfar = env->cfar;
hvstate->lpcr = env->spr[SPR_LPCR];
hvstate->pcr = env->spr[SPR_PCR];
hvstate->dpdes = env->spr[SPR_DPDES];
hvstate->hfscr = env->spr[SPR_HFSCR];
-
- if (excp == POWERPC_EXCP_HDSI) {
- hvstate->hdar = env->spr[SPR_HDAR];
- hvstate->hdsisr = env->spr[SPR_HDSISR];
- hvstate->asdr = env->spr[SPR_ASDR];
- } else if (excp == POWERPC_EXCP_HISI) {
- hvstate->asdr = env->spr[SPR_ASDR];
- }
-
/* HEIR should be implemented for HV mode and saved here. */
hvstate->srr0 = env->spr[SPR_SRR0];
hvstate->srr1 = env->spr[SPR_SRR1];
@@ -1768,27 +1737,43 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp)
hvstate->pidr = env->spr[SPR_BOOKS_PID];
hvstate->ppr = env->spr[SPR_PPR];
- /* Is it okay to specify write length larger than actual data written? */
- address_space_unmap(CPU(cpu)->as, hvstate, len, len, true);
+ if (excp == POWERPC_EXCP_HDSI) {
+ hvstate->hdar = env->spr[SPR_HDAR];
+ hvstate->hdsisr = env->spr[SPR_HDSISR];
+ hvstate->asdr = env->spr[SPR_ASDR];
+ } else if (excp == POWERPC_EXCP_HISI) {
+ hvstate->asdr = env->spr[SPR_ASDR];
+ }
+}
- len = sizeof(*regs);
- regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true,
+static int map_and_restore_l2_hvstate(PowerPCCPU *cpu, int excp, target_ulong *r3)
+{
+ CPUPPCState *env = &cpu->env;
+ SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+ target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4];
+ struct kvmppc_hv_guest_state *hvstate;
+ hwaddr len = sizeof(*hvstate);
+
+ hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true,
MEMTXATTRS_UNSPECIFIED);
- if (!regs || len != sizeof(*regs)) {
- address_space_unmap(CPU(cpu)->as, regs, len, 0, true);
- r3_return = H_P2;
- goto out_restore_l1;
+ if (len != sizeof(*hvstate)) {
+ address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true);
+ *r3 = H_PARAMETER;
+ return -1;
}
+ restore_hvstate_from_env(hvstate, env, excp);
+ /* Is it okay to specify write length larger than actual data written? */
+ address_space_unmap(CPU(cpu)->as, hvstate, len, len, true);
+ return 0;
+}
+static void restore_ptregs_from_env(struct kvmppc_pt_regs *regs,
+ CPUPPCState *env, int excp)
+{
+ hwaddr len;
len = sizeof(env->gpr);
assert(len == sizeof(regs->gpr));
memcpy(regs->gpr, env->gpr, len);
-
- regs->link = env->lr;
- regs->ctr = env->ctr;
- regs->xer = cpu_read_xer(env);
- regs->ccr = ppc_get_cr(env);
-
if (excp == POWERPC_EXCP_MCHECK ||
excp == POWERPC_EXCP_RESET ||
excp == POWERPC_EXCP_SYSCALL) {
@@ -1798,11 +1783,50 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp)
regs->nip = env->spr[SPR_HSRR0];
regs->msr = env->spr[SPR_HSRR1] & env->msr_mask;
}
+ regs->link = env->lr;
+ regs->ctr = env->ctr;
+ regs->xer = cpu_read_xer(env);
+ regs->ccr = ppc_get_cr(env);
+}
+static int map_and_restore_l2_ptregs(PowerPCCPU *cpu, int excp, target_ulong *r3)
+{
+ CPUPPCState *env = &cpu->env;
+ SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+ target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5];
+ hwaddr len;
+ struct kvmppc_pt_regs *regs = NULL;
+
+ len = sizeof(*regs);
+ regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true,
+ MEMTXATTRS_UNSPECIFIED);
+ if (!regs || len != sizeof(*regs)) {
+ address_space_unmap(CPU(cpu)->as, regs, len, 0, true);
+ *r3 = H_P2;
+ return -1;
+ }
+ restore_ptregs_from_env(regs, env, excp);
/* Is it okay to specify write length larger than actual data written? */
address_space_unmap(CPU(cpu)->as, regs, len, len, true);
+ return 0;
+}
+
+void spapr_exit_nested(PowerPCCPU *cpu, int excp)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+ target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */
+
+ assert(spapr_cpu->in_nested);
+
+ cpu_ppc_hdecr_exit(env);
+
+ if (!map_and_restore_l2_hvstate(cpu, excp, &r3_return)) {
+ map_and_restore_l2_ptregs (cpu, excp, &r3_return);
+ }
-out_restore_l1:
+ /* out_restore_l1 */
memcpy(env->gpr, spapr_cpu->nested_host_state->gpr, sizeof(env->gpr));
env->lr = spapr_cpu->nested_host_state->lr;
env->ctr = spapr_cpu->nested_host_state->ctr;
--
2.31.1
next prev parent reply other threads:[~2023-04-24 14:49 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-24 14:47 [PATCH v2 0/4] Cleanup [h_enter|spapr_exit]_nested routines Harsh Prateek Bora
2023-04-24 14:47 ` [PATCH v2 1/4] ppc: spapr: cleanup cr get/store in [h_enter|spapr_exit]_nested with helpers Harsh Prateek Bora
2023-05-02 4:37 ` Nicholas Piggin
2023-05-02 5:00 ` Harsh Prateek Bora
2023-05-02 14:39 ` Nicholas Piggin
2023-05-02 14:46 ` Fabiano Rosas
2023-04-24 14:47 ` [PATCH v2 2/4] ppc: spapr: cleanup h_enter_nested() with helper routines Harsh Prateek Bora
2023-05-02 4:49 ` Nicholas Piggin
2023-05-02 6:13 ` Harsh Prateek Bora
2023-05-02 6:41 ` Nicholas Piggin
2023-05-02 7:36 ` Harsh Prateek Bora
2023-05-02 8:39 ` Nicholas Piggin
2023-05-02 10:20 ` Harsh Prateek Bora
2023-04-24 14:47 ` Harsh Prateek Bora [this message]
2023-05-02 5:06 ` [PATCH v2 3/4] ppc: spapr: cleanup spapr_exit_nested() " Nicholas Piggin
2023-05-02 6:25 ` Harsh Prateek Bora
2023-04-24 14:47 ` [PATCH v2 4/4] MAINTAINERS: Adding myself in the list for ppc/spapr Harsh Prateek Bora
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230424144712.1985425-4-harshpb@linux.ibm.com \
--to=harshpb@linux.ibm.com \
--cc=danielhb413@gmail.com \
--cc=farosas@suse.de \
--cc=npiggin@gmail.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).