From: "Philippe Mathieu-Daudé" <philmd@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Reinoud Zandijk" <reinoud@netbsd.org>,
qemu-arm@nongnu.org, kvm@vger.kernel.org,
"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
"Stefano Stabellini" <sstabellini@kernel.org>,
"Anthony Perard" <anthony.perard@citrix.com>,
"Yanan Wang" <wangyanan55@huawei.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Peter Maydell" <peter.maydell@linaro.org>,
"Roman Bolshakov" <rbolshakov@ddn.com>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Paul Durrant" <paul@xen.org>,
"Sunil Muthuswamy" <sunilmut@microsoft.com>,
"Alexander Graf" <agraf@csgraf.de>,
"Richard Henderson" <richard.henderson@linaro.org>,
xen-devel@lists.xenproject.org,
"Eduardo Habkost" <eduardo@habkost.net>,
"Cameron Esfahani" <dirty@apple.com>
Subject: [PATCH v2 10/16] accel: Rename NVMM 'struct qemu_vcpu' -> AccelCPUState
Date: Thu, 22 Jun 2023 18:08:17 +0200 [thread overview]
Message-ID: <20230622160823.71851-11-philmd@linaro.org> (raw)
In-Reply-To: <20230622160823.71851-1-philmd@linaro.org>
We want all accelerators to share the same opaque pointer in
CPUState. Rename NVMM 'qemu_vcpu' as 'AccelCPUState'; directly
use the typedef, remove unnecessary casts.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
target/i386/nvmm/nvmm-all.c | 34 +++++++++++++++++-----------------
1 file changed, 17 insertions(+), 17 deletions(-)
diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c
index 0588a328ae..e5ee4af084 100644
--- a/target/i386/nvmm/nvmm-all.c
+++ b/target/i386/nvmm/nvmm-all.c
@@ -26,7 +26,7 @@
#include <nvmm.h>
-struct qemu_vcpu {
+struct AccelCPUState {
struct nvmm_vcpu vcpu;
uint8_t tpr;
bool stop;
@@ -49,10 +49,10 @@ struct qemu_machine {
static bool nvmm_allowed;
static struct qemu_machine qemu_mach;
-static struct qemu_vcpu *
+static AccelCPUState *
get_qemu_vcpu(CPUState *cpu)
{
- return (struct qemu_vcpu *)cpu->accel;
+ return cpu->accel;
}
static struct nvmm_machine *
@@ -86,7 +86,7 @@ nvmm_set_registers(CPUState *cpu)
{
CPUX86State *env = cpu->env_ptr;
struct nvmm_machine *mach = get_nvmm_mach();
- struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(cpu);
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
struct nvmm_x64_state *state = vcpu->state;
uint64_t bitmap;
@@ -223,7 +223,7 @@ nvmm_get_registers(CPUState *cpu)
{
CPUX86State *env = cpu->env_ptr;
struct nvmm_machine *mach = get_nvmm_mach();
- struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(cpu);
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
struct nvmm_x64_state *state = vcpu->state;
@@ -347,7 +347,7 @@ static bool
nvmm_can_take_int(CPUState *cpu)
{
CPUX86State *env = cpu->env_ptr;
- struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(cpu);
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
struct nvmm_machine *mach = get_nvmm_mach();
@@ -372,7 +372,7 @@ nvmm_can_take_int(CPUState *cpu)
static bool
nvmm_can_take_nmi(CPUState *cpu)
{
- struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(cpu);
/*
* Contrary to INTs, NMIs always schedule an exit when they are
@@ -395,7 +395,7 @@ nvmm_vcpu_pre_run(CPUState *cpu)
{
CPUX86State *env = cpu->env_ptr;
struct nvmm_machine *mach = get_nvmm_mach();
- struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(cpu);
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
struct nvmm_x64_state *state = vcpu->state;
@@ -478,7 +478,7 @@ nvmm_vcpu_pre_run(CPUState *cpu)
static void
nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit)
{
- struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(cpu);
CPUX86State *env = cpu->env_ptr;
X86CPU *x86_cpu = X86_CPU(cpu);
uint64_t tpr;
@@ -565,7 +565,7 @@ static int
nvmm_handle_rdmsr(struct nvmm_machine *mach, CPUState *cpu,
struct nvmm_vcpu_exit *exit)
{
- struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(cpu);
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
struct nvmm_x64_state *state = vcpu->state;
@@ -610,7 +610,7 @@ static int
nvmm_handle_wrmsr(struct nvmm_machine *mach, CPUState *cpu,
struct nvmm_vcpu_exit *exit)
{
- struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(cpu);
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
struct nvmm_x64_state *state = vcpu->state;
@@ -686,7 +686,7 @@ nvmm_vcpu_loop(CPUState *cpu)
{
CPUX86State *env = cpu->env_ptr;
struct nvmm_machine *mach = get_nvmm_mach();
- struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(cpu);
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
struct nvmm_vcpu_exit *exit = vcpu->exit;
@@ -892,7 +892,7 @@ static void
nvmm_ipi_signal(int sigcpu)
{
if (current_cpu) {
- struct qemu_vcpu *qcpu = get_qemu_vcpu(current_cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(current_cpu);
#if NVMM_USER_VERSION >= 2
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
nvmm_vcpu_stop(vcpu);
@@ -926,7 +926,7 @@ nvmm_init_vcpu(CPUState *cpu)
struct nvmm_vcpu_conf_cpuid cpuid;
struct nvmm_vcpu_conf_tpr tpr;
Error *local_error = NULL;
- struct qemu_vcpu *qcpu;
+ AccelCPUState *qcpu;
int ret, err;
nvmm_init_cpu_signals();
@@ -942,7 +942,7 @@ nvmm_init_vcpu(CPUState *cpu)
}
}
- qcpu = g_malloc0(sizeof(*qcpu));
+ qcpu = g_new0(AccelCPUState, 1);
ret = nvmm_vcpu_create(mach, cpu->cpu_index, &qcpu->vcpu);
if (ret == -1) {
@@ -991,7 +991,7 @@ nvmm_init_vcpu(CPUState *cpu)
}
cpu->vcpu_dirty = true;
- cpu->accel = (struct AccelCPUState *)qcpu;
+ cpu->accel = qcpu;
return 0;
}
@@ -1023,7 +1023,7 @@ void
nvmm_destroy_vcpu(CPUState *cpu)
{
struct nvmm_machine *mach = get_nvmm_mach();
- struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = get_qemu_vcpu(cpu);
nvmm_vcpu_destroy(mach, &qcpu->vcpu);
g_free(cpu->accel);
--
2.38.1
next prev parent reply other threads:[~2023-06-22 16:10 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-22 16:08 [PATCH v2 00/16] accel: Share CPUState accel context (HAX/NVMM/WHPX/HVF) Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 01/16] MAINTAINERS: Update Roman Bolshakov email address Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 02/16] accel: Document generic accelerator headers Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 03/16] accel: Remove unused hThread variable on TCG/WHPX Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 04/16] accel: Fix a leak on Windows HAX Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 05/16] accel: Destroy HAX vCPU threads once done Philippe Mathieu-Daudé
2023-06-22 17:36 ` Richard Henderson
2023-06-22 16:08 ` [PATCH v2 06/16] accel: Rename 'hax_vcpu' as 'accel' in CPUState Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 07/16] accel: Rename HAX 'struct hax_vcpu_state' -> AccelCPUState Philippe Mathieu-Daudé
2023-06-22 17:46 ` Richard Henderson
2023-06-24 17:36 ` Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 08/16] accel: Move HAX hThread to accelerator context Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 09/16] accel: Remove NVMM unreachable error path Philippe Mathieu-Daudé
2023-06-22 17:47 ` Richard Henderson
2023-06-22 16:08 ` Philippe Mathieu-Daudé [this message]
2023-06-22 16:08 ` [PATCH v2 11/16] accel: Inline NVMM get_qemu_vcpu() Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 12/16] accel: Remove WHPX unreachable error path Philippe Mathieu-Daudé
2023-06-22 17:48 ` Richard Henderson
2023-06-22 16:08 ` [PATCH v2 13/16] accel: Rename WHPX 'struct whpx_vcpu' -> AccelCPUState Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 14/16] accel: Inline WHPX get_whpx_vcpu() Philippe Mathieu-Daudé
2023-06-22 16:08 ` [PATCH v2 15/16] accel: Rename 'cpu_state' -> 'cpu' Philippe Mathieu-Daudé
2023-06-22 17:51 ` Richard Henderson
2023-06-22 16:08 ` [PATCH v2 16/16] accel: Rename HVF 'struct hvf_vcpu_state' -> AccelCPUState Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230622160823.71851-11-philmd@linaro.org \
--to=philmd@linaro.org \
--cc=agraf@csgraf.de \
--cc=anthony.perard@citrix.com \
--cc=dirty@apple.com \
--cc=eduardo@habkost.net \
--cc=kvm@vger.kernel.org \
--cc=marcel.apfelbaum@gmail.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=rbolshakov@ddn.com \
--cc=reinoud@netbsd.org \
--cc=richard.henderson@linaro.org \
--cc=sstabellini@kernel.org \
--cc=sunilmut@microsoft.com \
--cc=wangyanan55@huawei.com \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).