From: "Philippe Mathieu-Daudé" <philmd@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
"Alex Bennée" <alex.bennee@linaro.org>,
xen-devel@lists.xenproject.org, kvm@vger.kernel.org,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Cameron Esfahani" <dirty@apple.com>,
"Roman Bolshakov" <r.bolshakov@yadro.com>,
"Eduardo Habkost" <eduardo@habkost.net>,
"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
"Yanan Wang" <wangyanan55@huawei.com>,
"Alexander Graf" <agraf@csgraf.de>,
"Peter Maydell" <peter.maydell@linaro.org>,
qemu-arm@nongnu.org
Subject: [PATCH 14/14] accel: Rename HVF struct hvf_vcpu_state -> struct AccelvCPUState
Date: Wed, 5 Apr 2023 12:18:11 +0200 [thread overview]
Message-ID: <20230405101811.76663-15-philmd@linaro.org> (raw)
In-Reply-To: <20230405101811.76663-1-philmd@linaro.org>
We want all accelerators to share the same opaque pointer in
CPUState.
Rename the 'hvf_vcpu_state' structure as 'AccelvCPUState'.
Use the generic 'accel' field of CPUState instead of 'hvf'.
Replace g_malloc0() by g_new0() for readability.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
include/hw/core/cpu.h | 3 --
include/sysemu/hvf_int.h | 2 +-
accel/hvf/hvf-accel-ops.c | 16 ++++-----
target/arm/hvf/hvf.c | 70 +++++++++++++++++++--------------------
4 files changed, 44 insertions(+), 47 deletions(-)
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 8d27861ed5..1dc5efe650 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -236,7 +236,6 @@ typedef struct SavedIOTLB {
struct KVMState;
struct kvm_run;
struct AccelvCPUState;
-struct hvf_vcpu_state;
/* work queue */
@@ -442,8 +441,6 @@ struct CPUState {
/* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
bool prctl_unalign_sigbus;
- struct hvf_vcpu_state *hvf;
-
/* track IOMMUs whose translations we've cached in the TCG TLB */
GArray *iommu_notifiers;
};
diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h
index 6545f7cd61..96ef51f4df 100644
--- a/include/sysemu/hvf_int.h
+++ b/include/sysemu/hvf_int.h
@@ -48,7 +48,7 @@ struct HVFState {
};
extern HVFState *hvf_state;
-struct hvf_vcpu_state {
+struct AccelvCPUState {
uint64_t fd;
void *exit;
bool vtimer_masked;
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
index 24913ca9c4..06ca1d59a4 100644
--- a/accel/hvf/hvf-accel-ops.c
+++ b/accel/hvf/hvf-accel-ops.c
@@ -363,19 +363,19 @@ type_init(hvf_type_init);
static void hvf_vcpu_destroy(CPUState *cpu)
{
- hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
+ hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd);
assert_hvf_ok(ret);
hvf_arch_vcpu_destroy(cpu);
- g_free(cpu->hvf);
- cpu->hvf = NULL;
+ g_free(cpu->accel);
+ cpu->accel = NULL;
}
static int hvf_init_vcpu(CPUState *cpu)
{
int r;
- cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
+ cpu->accel = g_new0(struct AccelvCPUState, 1);
/* init cpu signals */
struct sigaction sigact;
@@ -384,13 +384,13 @@ static int hvf_init_vcpu(CPUState *cpu)
sigact.sa_handler = dummy_signal;
sigaction(SIG_IPI, &sigact, NULL);
- pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
- sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
+ pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask);
+ sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI);
#ifdef __aarch64__
- r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
+ r = hv_vcpu_create(&cpu->accel->fd, (hv_vcpu_exit_t **)&cpu->accel->exit, NULL);
#else
- r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
+ r = hv_vcpu_create((hv_vcpuid_t *)&cpu->accel->fd, HV_VCPU_DEFAULT);
#endif
cpu->vcpu_dirty = 1;
assert_hvf_ok(r);
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index ad65603445..b85648b61c 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -366,29 +366,29 @@ int hvf_get_registers(CPUState *cpu)
int i;
for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
- ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val);
+ ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val);
*(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
assert_hvf_ok(ret);
}
for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
- ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
+ ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
&fpval);
memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
assert_hvf_ok(ret);
}
val = 0;
- ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val);
+ ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val);
assert_hvf_ok(ret);
vfp_set_fpcr(env, val);
val = 0;
- ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val);
+ ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val);
assert_hvf_ok(ret);
vfp_set_fpsr(env, val);
- ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val);
+ ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val);
assert_hvf_ok(ret);
pstate_write(env, val);
@@ -397,7 +397,7 @@ int hvf_get_registers(CPUState *cpu)
continue;
}
- ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val);
+ ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val);
assert_hvf_ok(ret);
arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
@@ -420,24 +420,24 @@ int hvf_put_registers(CPUState *cpu)
for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
- ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val);
+ ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val);
assert_hvf_ok(ret);
}
for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
- ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
+ ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg,
fpval);
assert_hvf_ok(ret);
}
- ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env));
+ ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env));
assert_hvf_ok(ret);
- ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env));
+ ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env));
assert_hvf_ok(ret);
- ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env));
+ ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env));
assert_hvf_ok(ret);
aarch64_save_sp(env, arm_current_el(env));
@@ -449,11 +449,11 @@ int hvf_put_registers(CPUState *cpu)
}
val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
- ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val);
+ ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val);
assert_hvf_ok(ret);
}
- ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset);
+ ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset);
assert_hvf_ok(ret);
return 0;
@@ -474,7 +474,7 @@ static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
flush_cpu_state(cpu);
if (rt < 31) {
- r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val);
+ r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val);
assert_hvf_ok(r);
}
}
@@ -487,7 +487,7 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt)
flush_cpu_state(cpu);
if (rt < 31) {
- r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val);
+ r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val);
assert_hvf_ok(r);
}
@@ -629,22 +629,22 @@ int hvf_arch_init_vcpu(CPUState *cpu)
assert(write_cpustate_to_list(arm_cpu, false));
/* Set CP_NO_RAW system registers on init */
- ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1,
+ ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1,
arm_cpu->midr);
assert_hvf_ok(ret);
- ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1,
+ ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1,
arm_cpu->mp_affinity);
assert_hvf_ok(ret);
- ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
+ ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
assert_hvf_ok(ret);
pfr |= env->gicv3state ? (1 << 24) : 0;
- ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
+ ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
assert_hvf_ok(ret);
/* We're limited to underlying hardware caps, override internal versions */
- ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
+ ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
&arm_cpu->isar.id_aa64mmfr0);
assert_hvf_ok(ret);
@@ -654,7 +654,7 @@ int hvf_arch_init_vcpu(CPUState *cpu)
void hvf_kick_vcpu_thread(CPUState *cpu)
{
cpus_kick_thread(cpu);
- hv_vcpus_exit(&cpu->hvf->fd, 1);
+ hv_vcpus_exit(&cpu->accel->fd, 1);
}
static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
@@ -1191,13 +1191,13 @@ static int hvf_inject_interrupts(CPUState *cpu)
{
if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
trace_hvf_inject_fiq();
- hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ,
+ hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ,
true);
}
if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
trace_hvf_inject_irq();
- hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ,
+ hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ,
true);
}
@@ -1231,7 +1231,7 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
*/
qatomic_mb_set(&cpu->thread_kicked, false);
qemu_mutex_unlock_iothread();
- pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
+ pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask);
qemu_mutex_lock_iothread();
}
@@ -1252,7 +1252,7 @@ static void hvf_wfi(CPUState *cpu)
return;
}
- r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
+ r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
assert_hvf_ok(r);
if (!(ctl & 1) || (ctl & 2)) {
@@ -1261,7 +1261,7 @@ static void hvf_wfi(CPUState *cpu)
return;
}
- r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
+ r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
assert_hvf_ok(r);
ticks_to_sleep = cval - hvf_vtimer_val();
@@ -1294,12 +1294,12 @@ static void hvf_sync_vtimer(CPUState *cpu)
uint64_t ctl;
bool irq_state;
- if (!cpu->hvf->vtimer_masked) {
+ if (!cpu->accel->vtimer_masked) {
/* We will get notified on vtimer changes by hvf, nothing to do */
return;
}
- r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
+ r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
assert_hvf_ok(r);
irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
@@ -1308,8 +1308,8 @@ static void hvf_sync_vtimer(CPUState *cpu)
if (!irq_state) {
/* Timer no longer asserting, we can unmask it */
- hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false);
- cpu->hvf->vtimer_masked = false;
+ hv_vcpu_set_vtimer_mask(cpu->accel->fd, false);
+ cpu->accel->vtimer_masked = false;
}
}
@@ -1317,7 +1317,7 @@ int hvf_vcpu_exec(CPUState *cpu)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
- hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit;
+ hv_vcpu_exit_t *hvf_exit = cpu->accel->exit;
hv_return_t r;
bool advance_pc = false;
@@ -1332,7 +1332,7 @@ int hvf_vcpu_exec(CPUState *cpu)
flush_cpu_state(cpu);
qemu_mutex_unlock_iothread();
- assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd));
+ assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
/* handle VMEXIT */
uint64_t exit_reason = hvf_exit->reason;
@@ -1346,7 +1346,7 @@ int hvf_vcpu_exec(CPUState *cpu)
break;
case HV_EXIT_REASON_VTIMER_ACTIVATED:
qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
- cpu->hvf->vtimer_masked = true;
+ cpu->accel->vtimer_masked = true;
return 0;
case HV_EXIT_REASON_CANCELED:
/* we got kicked, no exit to process */
@@ -1457,10 +1457,10 @@ int hvf_vcpu_exec(CPUState *cpu)
flush_cpu_state(cpu);
- r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc);
+ r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc);
assert_hvf_ok(r);
pc += 4;
- r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc);
+ r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc);
assert_hvf_ok(r);
}
--
2.38.1
next prev parent reply other threads:[~2023-04-05 10:20 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-05 10:17 [PATCH 00/14] accel: Share CPUState accel context (HAX/NVMM/WHPX/HVF) Philippe Mathieu-Daudé
2023-04-05 10:17 ` [PATCH 01/14] accel: Document generic accelerator headers Philippe Mathieu-Daudé
2023-04-07 23:01 ` Richard Henderson
2023-04-05 10:17 ` [PATCH 02/14] accel: Remove unused hThread variable on TCG/WHPX Philippe Mathieu-Daudé
2023-04-07 23:01 ` Richard Henderson
2023-04-05 10:18 ` [PATCH 03/14] accel: Fix a leak on Windows HAX Philippe Mathieu-Daudé
2023-04-07 23:01 ` Richard Henderson
2023-04-05 10:18 ` [PATCH 04/14] accel: Destroy HAX vCPU threads once done Philippe Mathieu-Daudé
2023-04-05 10:18 ` [PATCH 05/14] accel: Rename 'hax_vcpu' as 'accel' in CPUState Philippe Mathieu-Daudé
2023-04-07 23:03 ` Richard Henderson
2023-04-05 10:18 ` [PATCH 06/14] accel: Use a typedef for struct hax_vcpu_state Philippe Mathieu-Daudé
2023-04-05 10:18 ` [PATCH 07/14] accel: Rename struct hax_vcpu_state -> struct AccelvCPUState Philippe Mathieu-Daudé
2023-04-07 23:07 ` Richard Henderson
2023-04-07 23:09 ` Richard Henderson
2023-04-05 10:18 ` [PATCH 08/14] accel: Move HAX hThread to accelerator context Philippe Mathieu-Daudé
2023-04-07 23:07 ` Richard Henderson
2023-04-05 10:18 ` [PATCH 09/14] accel: Allocate NVMM vCPU using g_try_FOO() Philippe Mathieu-Daudé
2023-04-05 13:55 ` Alex Bennée
2023-04-05 15:18 ` Philippe Mathieu-Daudé
2023-04-05 10:18 ` [PATCH 10/14] accel: Rename NVMM struct qemu_vcpu -> struct AccelvCPUState Philippe Mathieu-Daudé
2023-04-07 23:11 ` Richard Henderson
2023-04-05 10:18 ` [PATCH 11/14] accel: Inline NVMM get_qemu_vcpu() Philippe Mathieu-Daudé
2023-04-07 23:11 ` Richard Henderson
2023-04-05 10:18 ` [PATCH 12/14] accel: Rename WHPX struct whpx_vcpu -> struct AccelvCPUState Philippe Mathieu-Daudé
2023-04-07 23:13 ` Richard Henderson
2023-04-05 10:18 ` [PATCH 13/14] accel: Inline WHPX get_whpx_vcpu() Philippe Mathieu-Daudé
2023-04-07 23:13 ` Richard Henderson
2023-04-05 10:18 ` Philippe Mathieu-Daudé [this message]
2023-04-07 23:14 ` [PATCH 14/14] accel: Rename HVF struct hvf_vcpu_state -> struct AccelvCPUState Richard Henderson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230405101811.76663-15-philmd@linaro.org \
--to=philmd@linaro.org \
--cc=agraf@csgraf.de \
--cc=alex.bennee@linaro.org \
--cc=dirty@apple.com \
--cc=eduardo@habkost.net \
--cc=kvm@vger.kernel.org \
--cc=marcel.apfelbaum@gmail.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=r.bolshakov@yadro.com \
--cc=wangyanan55@huawei.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).