From: Jes Sorensen <jes@sgi.com>
To: Hollis Blanchard <hollisb@us.ibm.com>
Cc: Glauber Costa <glommer@gmail.com>,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Subject: Re: [patch] fold struct vcpu_info into CPUState
Date: Mon, 27 Oct 2008 17:06:37 +0100 [thread overview]
Message-ID: <4905E70D.2010400@sgi.com> (raw)
In-Reply-To: <1224875457.9634.63.camel@localhost.localdomain>
[-- Attachment #1: Type: text/plain, Size: 478 bytes --]
Hollis Blanchard wrote:
> This is 100% repeatable, and it doesn't happen without your patch
> applied.
>
> 0x10116f1c in kvm_update_interrupt_request (env=0x103d4718)
> at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/qemu-kvm.c:142
> 142 if (current_env->vcpu_info.created &&
> (gdb) bt
Hollis,
Ok, I looked at the logic in this function again and I think I got some
of it wrong when I did the conversion.
Could you try out this one instead.
Thanks,
Jes
[-- Attachment #2: 0010-qemu-kvm-vcpu_info-v3.1.patch --]
[-- Type: text/plain, Size: 15240 bytes --]
Merge vcpu_info into CPUState.
Moves definition of vcpu related structs to new header qemu-kvm-vcpu.h
and declares this struct in i386/ia64/ppc CPUState structs if USE_KVM
is defined. In addition conver qemu-kvm.c to pull vcpu_info out of
CPUState.
This eliminates ugly static sized array of struct vcpu_info.
Signed-off-by: Jes Sorensen <jes@sgi.com>
---
libkvm/kvm-common.h | 4 -
libkvm/libkvm.c | 14 ++--
libkvm/libkvm.h | 6 +-
qemu/qemu-kvm-vcpu.h | 34 +++++++++++
qemu/qemu-kvm.c | 146 +++++++++++++++++++++++++------------------------
qemu/target-i386/cpu.h | 4 +
qemu/target-ia64/cpu.h | 5 +
qemu/target-ppc/cpu.h | 5 +
8 files changed, 136 insertions(+), 82 deletions(-)
Index: kvm-userspace.git/libkvm/kvm-common.h
===================================================================
--- kvm-userspace.git.orig/libkvm/kvm-common.h
+++ kvm-userspace.git/libkvm/kvm-common.h
@@ -84,11 +84,11 @@
void kvm_show_code(kvm_context_t kvm, int vcpu);
int handle_halt(kvm_context_t kvm, int vcpu);
-int handle_shutdown(kvm_context_t kvm, int vcpu);
+int handle_shutdown(kvm_context_t kvm, void *env);
void post_kvm_run(kvm_context_t kvm, int vcpu);
int pre_kvm_run(kvm_context_t kvm, int vcpu);
int handle_io_window(kvm_context_t kvm);
-int handle_debug(kvm_context_t kvm, int vcpu);
+int handle_debug(kvm_context_t kvm, void *env);
int try_push_interrupts(kvm_context_t kvm);
#endif
Index: kvm-userspace.git/libkvm/libkvm.c
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.c
+++ kvm-userspace.git/libkvm/libkvm.c
@@ -738,9 +738,9 @@
return 0;
}
-int handle_debug(kvm_context_t kvm, int vcpu)
+int handle_debug(kvm_context_t kvm, void *env)
{
- return kvm->callbacks->debug(kvm->opaque, vcpu);
+ return kvm->callbacks->debug(kvm->opaque, env);
}
int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
@@ -822,9 +822,9 @@
return kvm->callbacks->halt(kvm->opaque, vcpu);
}
-int handle_shutdown(kvm_context_t kvm, int vcpu)
+int handle_shutdown(kvm_context_t kvm, void *env)
{
- return kvm->callbacks->shutdown(kvm->opaque, vcpu);
+ return kvm->callbacks->shutdown(kvm->opaque, env);
}
int try_push_interrupts(kvm_context_t kvm)
@@ -872,7 +872,7 @@
#endif
}
-int kvm_run(kvm_context_t kvm, int vcpu)
+int kvm_run(kvm_context_t kvm, int vcpu, void *env)
{
int r;
int fd = kvm->vcpu_fd[vcpu];
@@ -948,7 +948,7 @@
r = handle_io(kvm, run, vcpu);
break;
case KVM_EXIT_DEBUG:
- r = handle_debug(kvm, vcpu);
+ r = handle_debug(kvm, env);
break;
case KVM_EXIT_MMIO:
r = handle_mmio(kvm, run);
@@ -962,7 +962,7 @@
#endif
break;
case KVM_EXIT_SHUTDOWN:
- r = handle_shutdown(kvm, vcpu);
+ r = handle_shutdown(kvm, env);
break;
#if defined(__s390__)
case KVM_EXIT_S390_SIEIC:
Index: kvm-userspace.git/libkvm/libkvm.h
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.h
+++ kvm-userspace.git/libkvm/libkvm.h
@@ -55,7 +55,7 @@
/// generic memory writes to unmapped memory (For MMIO devices)
int (*mmio_write)(void *opaque, uint64_t addr, uint8_t *data,
int len);
- int (*debug)(void *opaque, int vcpu);
+ int (*debug)(void *opaque, void *env);
/*!
* \brief Called when the VCPU issues an 'hlt' instruction.
*
@@ -63,7 +63,7 @@
* on the host CPU.
*/
int (*halt)(void *opaque, int vcpu);
- int (*shutdown)(void *opaque, int vcpu);
+ int (*shutdown)(void *opaque, void *env);
int (*io_window)(void *opaque);
int (*try_push_interrupts)(void *opaque);
int (*try_push_nmi)(void *opaque);
@@ -181,7 +181,7 @@
* return except for when an error has occured, or when you have sent it
* an EINTR signal.
*/
-int kvm_run(kvm_context_t kvm, int vcpu);
+int kvm_run(kvm_context_t kvm, int vcpu, void *env);
/*!
* \brief Get interrupt flag from on last exit to userspace
Index: kvm-userspace.git/qemu/qemu-kvm-vcpu.h
===================================================================
--- /dev/null
+++ kvm-userspace.git/qemu/qemu-kvm-vcpu.h
@@ -0,0 +1,34 @@
+/*
+ * qemu/kvm vcpu definitions
+ *
+ * Copyright (C) 2006-2008 Qumranet Technologies
+ *
+ * Licensed under the terms of the GNU GPL version 2 or higher.
+ */
+#ifndef QEMU_KVM_VCPU_H
+#define QEMU_KVM_VCPU_H
+
+#include <pthread.h>
+
+struct qemu_kvm_work_item {
+ struct qemu_kvm_work_item *next;
+ void (*func)(void *data);
+ void *data;
+ int done;
+};
+
+/*
+ * KVM vcpu struct
+ */
+struct vcpu_info {
+ int sipi_needed;
+ int init;
+ pthread_t thread;
+ int signalled;
+ int stop;
+ int stopped;
+ int created;
+ struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
+};
+
+#endif
Index: kvm-userspace.git/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.c
+++ kvm-userspace.git/qemu/qemu-kvm.c
@@ -22,13 +22,13 @@
#include "compatfd.h"
#include "qemu-kvm.h"
+#include "qemu-kvm-vcpu.h"
#include <libkvm.h>
#include <pthread.h>
#include <sys/utsname.h>
#include <sys/syscall.h>
#include <sys/mman.h>
-#define bool _Bool
#define false 0
#define true 1
@@ -43,31 +43,12 @@
pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
-__thread struct vcpu_info *vcpu;
+__thread struct CPUState *current_env;
static int qemu_system_ready;
#define SIG_IPI (SIGRTMIN+4)
-struct qemu_kvm_work_item {
- struct qemu_kvm_work_item *next;
- void (*func)(void *data);
- void *data;
- bool done;
-};
-
-struct vcpu_info {
- CPUState *env;
- int sipi_needed;
- int init;
- pthread_t thread;
- int signalled;
- int stop;
- int stopped;
- int created;
- struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
-} vcpu_info[256];
-
pthread_t io_thread;
static int io_thread_fd = -1;
static int io_thread_sigfd = -1;
@@ -93,7 +74,20 @@
CPUState *qemu_kvm_cpu_env(int index)
{
- return vcpu_info[index].env;
+ CPUState *penv;
+
+ if (current_env->cpu_index == index)
+ return current_env;
+
+ penv = first_cpu;
+
+ while (penv) {
+ if (penv->cpu_index == index)
+ return penv;
+ penv = (CPUState *)penv->next_cpu;
+ }
+
+ return NULL;
}
static void sig_ipi_handler(int n)
@@ -102,10 +96,10 @@
static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
{
- struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+ struct vcpu_info *vi = &env->vcpu_info;
struct qemu_kvm_work_item wi;
- if (vi == vcpu) {
+ if (env == current_env) {
func(data);
return;
}
@@ -127,7 +121,7 @@
static void inject_interrupt(void *data)
{
- cpu_interrupt(vcpu->env, (int)data);
+ cpu_interrupt(current_env, (int)data);
}
void kvm_inject_interrupt(CPUState *env, int mask)
@@ -140,29 +134,33 @@
int signal = 0;
if (env) {
- if (!vcpu)
+ if (current_env && !current_env->vcpu_info.created)
signal = 1;
- if (vcpu && env != vcpu->env && !vcpu_info[env->cpu_index].signalled)
+ /*
+ * Testing for vcpu_info.created here is really redundant
+ */
+ if (current_env && current_env->vcpu_info.created &&
+ env != current_env && !env->vcpu_info.signalled)
signal = 1;
if (signal) {
- vcpu_info[env->cpu_index].signalled = 1;
- if (vcpu_info[env->cpu_index].thread)
- pthread_kill(vcpu_info[env->cpu_index].thread, SIG_IPI);
+ env->vcpu_info.signalled = 1;
+ if (env->vcpu_info.thread)
+ pthread_kill(env->vcpu_info.thread, SIG_IPI);
}
}
}
void kvm_update_after_sipi(CPUState *env)
{
- vcpu_info[env->cpu_index].sipi_needed = 1;
+ env->vcpu_info.sipi_needed = 1;
kvm_update_interrupt_request(env);
}
void kvm_apic_init(CPUState *env)
{
if (env->cpu_index != 0)
- vcpu_info[env->cpu_index].init = 1;
+ env->vcpu_info.init = 1;
kvm_update_interrupt_request(env);
}
@@ -227,7 +225,7 @@
{
int r;
- r = kvm_run(kvm_context, env->cpu_index);
+ r = kvm_run(kvm_context, env->cpu_index, env);
if (r < 0) {
printf("kvm_run returned %d\n", r);
exit(1);
@@ -240,7 +238,7 @@
static int has_work(CPUState *env)
{
- if (!vm_running || (env && vcpu_info[env->cpu_index].stopped))
+ if (!vm_running || (env && env->vcpu_info.stopped))
return 0;
if (!env->halted)
return 1;
@@ -249,7 +247,7 @@
static void flush_queued_work(CPUState *env)
{
- struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+ struct vcpu_info *vi = &env->vcpu_info;
struct qemu_kvm_work_item *wi;
if (!vi->queued_work_first)
@@ -266,6 +264,7 @@
static void kvm_main_loop_wait(CPUState *env, int timeout)
{
+ struct vcpu_info *vi = &env->vcpu_info;
struct timespec ts;
int r, e;
siginfo_t siginfo;
@@ -291,49 +290,55 @@
cpu_single_env = env;
flush_queued_work(env);
- if (vcpu_info[env->cpu_index].stop) {
- vcpu_info[env->cpu_index].stop = 0;
- vcpu_info[env->cpu_index].stopped = 1;
+ if (vi->stop) {
+ vi->stop = 0;
+ vi->stopped = 1;
pthread_cond_signal(&qemu_pause_cond);
}
- vcpu_info[env->cpu_index].signalled = 0;
+ vi->signalled = 0;
}
static int all_threads_paused(void)
{
- int i;
+ CPUState *penv = first_cpu;
+
+ while (penv) {
+ if (penv->vcpu_info.stop)
+ return 0;
+ penv = (CPUState *)penv->next_cpu;
+ }
- for (i = 0; i < smp_cpus; ++i)
- if (vcpu_info[i].stop)
- return 0;
return 1;
}
static void pause_all_threads(void)
{
- int i;
+ CPUState *penv = first_cpu;
assert(!cpu_single_env);
- for (i = 0; i < smp_cpus; ++i) {
- vcpu_info[i].stop = 1;
- pthread_kill(vcpu_info[i].thread, SIG_IPI);
+ while (penv) {
+ penv->vcpu_info.stop = 1;
+ pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+ penv = (CPUState *)penv->next_cpu;
}
+
while (!all_threads_paused())
qemu_cond_wait(&qemu_pause_cond);
}
static void resume_all_threads(void)
{
- int i;
+ CPUState *penv = first_cpu;
assert(!cpu_single_env);
- for (i = 0; i < smp_cpus; ++i) {
- vcpu_info[i].stop = 0;
- vcpu_info[i].stopped = 0;
- pthread_kill(vcpu_info[i].thread, SIG_IPI);
+ while (penv) {
+ penv->vcpu_info.stop = 0;
+ penv->vcpu_info.stopped = 0;
+ pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+ penv = (CPUState *)penv->next_cpu;
}
}
@@ -348,7 +353,7 @@
static void update_regs_for_sipi(CPUState *env)
{
kvm_arch_update_regs_for_sipi(env);
- vcpu_info[env->cpu_index].sipi_needed = 0;
+ env->vcpu_info.sipi_needed = 0;
}
static void update_regs_for_init(CPUState *env)
@@ -361,11 +366,11 @@
#ifdef TARGET_I386
/* restore SIPI vector */
- if(vcpu_info[env->cpu_index].sipi_needed)
+ if(env->vcpu_info.sipi_needed)
env->segs[R_CS] = cs;
-
- vcpu_info[env->cpu_index].init = 0;
#endif
+
+ env->vcpu_info.init = 0;
kvm_arch_load_regs(env);
}
@@ -387,21 +392,23 @@
void qemu_kvm_system_reset(void)
{
- int i;
+ CPUState *penv = first_cpu;
pause_all_threads();
qemu_system_reset();
- for (i = 0; i < smp_cpus; ++i)
- kvm_arch_cpu_reset(vcpu_info[i].env);
+ while (penv) {
+ kvm_arch_cpu_reset(penv);
+ penv = (CPUState *)penv->next_cpu;
+ }
resume_all_threads();
}
static int kvm_main_loop_cpu(CPUState *env)
{
- struct vcpu_info *info = &vcpu_info[env->cpu_index];
+ struct vcpu_info *info = &env->vcpu_info;
setup_kernel_sigmask(env);
@@ -442,9 +449,8 @@
CPUState *env = _env;
sigset_t signals;
- vcpu = &vcpu_info[env->cpu_index];
- vcpu->env = env;
- vcpu->env->thread_id = kvm_get_thread_id();
+ current_env = env;
+ env->thread_id = kvm_get_thread_id();
sigfillset(&signals);
sigprocmask(SIG_BLOCK, &signals, NULL);
kvm_create_vcpu(kvm_context, env->cpu_index);
@@ -452,7 +458,7 @@
/* signal VCPU creation */
pthread_mutex_lock(&qemu_mutex);
- vcpu->created = 1;
+ current_env->vcpu_info.created = 1;
pthread_cond_signal(&qemu_vcpu_cond);
/* and wait for machine initialization */
@@ -466,9 +472,9 @@
void kvm_init_new_ap(int cpu, CPUState *env)
{
- pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
+ pthread_create(&env->vcpu_info.thread, NULL, ap_main_loop, env);
- while (vcpu_info[cpu].created == 0)
+ while (env->vcpu_info.created == 0)
qemu_cond_wait(&qemu_vcpu_cond);
}
@@ -624,10 +630,10 @@
return 0;
}
-static int kvm_debug(void *opaque, int vcpu)
+static int kvm_debug(void *opaque, struct CPUState *env)
{
kvm_debug_stop_requested = 1;
- vcpu_info[vcpu].stopped = 1;
+ env->vcpu_info.stopped = 1;
return 1;
}
@@ -721,10 +727,10 @@
return kvm_arch_halt(opaque, vcpu);
}
-static int kvm_shutdown(void *opaque, int vcpu)
+static int kvm_shutdown(void *opaque, struct CPUState *env)
{
/* stop the current vcpu from going back to guest mode */
- vcpu_info[cpu_single_env->cpu_index].stopped = 1;
+ env->vcpu_info.stopped = 1;
qemu_system_reset_request();
return 1;
Index: kvm-userspace.git/qemu/target-i386/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-i386/cpu.h
+++ kvm-userspace.git/qemu/target-i386/cpu.h
@@ -45,6 +45,7 @@
#include "cpu-defs.h"
#include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
#define R_EAX 0
#define R_ECX 1
@@ -622,6 +623,9 @@
#define NR_IRQ_WORDS (256/ BITS_PER_LONG)
uint32_t kvm_interrupt_bitmap[NR_IRQ_WORDS];
+#ifdef USE_KVM
+ struct vcpu_info vcpu_info;
+#endif
/* in order to simplify APIC support, we leave this pointer to the
user */
struct APICState *apic_state;
Index: kvm-userspace.git/qemu/target-ia64/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ia64/cpu.h
+++ kvm-userspace.git/qemu/target-ia64/cpu.h
@@ -40,10 +40,15 @@
#include "cpu-defs.h"
#include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
+
typedef struct CPUIA64State {
CPU_COMMON;
uint32_t hflags;
int mp_state;
+#ifdef USE_KVM
+ struct vcpu_info vcpu_info;
+#endif
} CPUIA64State;
#define CPUState CPUIA64State
Index: kvm-userspace.git/qemu/target-ppc/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ppc/cpu.h
+++ kvm-userspace.git/qemu/target-ppc/cpu.h
@@ -22,6 +22,7 @@
#include "config.h"
#include <inttypes.h>
+#include "qemu-kvm-vcpu.h"
//#define PPC_EMULATE_32BITS_HYPV
@@ -578,6 +579,10 @@
CPU_COMMON
+#ifdef USE_KVM
+ struct vcpu_info vcpu_info;
+#endif
+
int access_type; /* when a memory exception occurs, the access
type is stored here */
prev parent reply other threads:[~2008-10-27 16:06 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-09-29 15:24 [patch] fold struct vcpu_info into CPUState Jes Sorensen
2008-10-05 10:02 ` Avi Kivity
2008-10-05 20:48 ` Glauber Costa
2008-10-13 22:24 ` Glauber Costa
2008-10-17 15:28 ` Jes Sorensen
2008-10-17 21:27 ` Glauber Costa
2008-10-24 15:57 ` Jes Sorensen
2008-10-24 19:10 ` Hollis Blanchard
2008-10-27 9:48 ` Jes Sorensen
2008-10-27 16:02 ` Hollis Blanchard
2008-10-28 16:25 ` [patch] v4 - " Jes Sorensen
2008-10-29 13:01 ` Anthony Liguori
2008-10-29 13:04 ` Jes Sorensen
2008-10-29 13:09 ` Anthony Liguori
2008-10-27 16:06 ` Jes Sorensen [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4905E70D.2010400@sgi.com \
--to=jes@sgi.com \
--cc=glommer@gmail.com \
--cc=hollisb@us.ibm.com \
--cc=kvm-ia64@vger.kernel.org \
--cc=kvm@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).