From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1MQlAc-0002X1-8m for qemu-devel@nongnu.org; Tue, 14 Jul 2009 12:47:58 -0400 Received: from exim by lists.gnu.org with spam-scanned (Exim 4.43) id 1MQlAX-0002Us-I5 for qemu-devel@nongnu.org; Tue, 14 Jul 2009 12:47:57 -0400 Received: from [199.232.76.173] (port=50296 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1MQlAX-0002Um-Bb for qemu-devel@nongnu.org; Tue, 14 Jul 2009 12:47:53 -0400 Received: from mx2.redhat.com ([66.187.237.31]:52145) by monty-python.gnu.org with esmtp (Exim 4.60) (envelope-from ) id 1MQlAW-0007GB-PO for qemu-devel@nongnu.org; Tue, 14 Jul 2009 12:47:53 -0400 From: Glauber Costa Date: Tue, 14 Jul 2009 12:47:48 -0400 Message-Id: <1247590068-565-1-git-send-email-glommer@redhat.com> Subject: [Qemu-devel] [PATCH] introduce on_vcpu List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: Jan Kiszka , aliguori@us.ibm.com on_vcpu is a qemu-kvm function that will make sure that a specific piece of code will run on a requested cpu. We don't need that because we're restricted to -smp 1 right now, but those days are likely to end soon. So for the benefit of having qemu-kvm share more code with us, I'm introducing our own version of on_vcpu(). Right now, we either run a function on the current cpu, or abort the execution, because it would mean something is seriously wrong. As an example code, I "ported" kvm_update_guest_debug to use it, with some slight differences from qemu-kvm. This is probably 0.12 material Signed-off-by: Glauber Costa CC: Jan Kiszka --- kvm-all.c | 37 +++++++++++++++++++++++++++++++------ 1 files changed, 31 insertions(+), 6 deletions(-) diff --git a/kvm-all.c b/kvm-all.c index 4e913e5..1d91f2e 100644 --- a/kvm-all.c +++ b/kvm-all.c @@ -39,6 +39,8 @@ do { } while (0) #endif +CPUState *current_env; + typedef struct KVMSlot { target_phys_addr_t start_addr; @@ -145,6 +147,14 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); } +static void on_vcpu(CPUState *env, void (*func)(void *data), void *data) +{ + if (env == current_env) { + func(data); + return; + } + assert(1); +} int kvm_init_vcpu(CPUState *env) { @@ -578,6 +588,7 @@ int kvm_cpu_exec(CPUState *env) int ret; dprintf("kvm_cpu_exec()\n"); + current_env = env; do { if (env->exit_request) { @@ -902,18 +913,32 @@ int kvm_sw_breakpoints_active(CPUState *env) return !TAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints); } +struct kvm_set_guest_debug_data { + struct kvm_guest_debug dbg; + CPUState *env; + int err; +}; + +static void kvm_invoke_set_guest_debug(void *data) +{ + struct kvm_set_guest_debug_data *dbg_data = data; + dbg_data->err = kvm_vcpu_ioctl(dbg_data->env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg); +} + int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) { - struct kvm_guest_debug dbg; + struct kvm_set_guest_debug_data data; - dbg.control = 0; + data.dbg.control = 0; if (env->singlestep_enabled) - dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; + data.dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; - kvm_arch_update_guest_debug(env, &dbg); - dbg.control |= reinject_trap; + kvm_arch_update_guest_debug(env, &data.dbg); + data.dbg.control |= reinject_trap; + data.env = env; - return kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg); + on_vcpu(env, kvm_invoke_set_guest_debug, &data); + return data.err; } int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, -- 1.6.2.2