* [patch] remove vcpu_info array v5
@ 2008-10-30 14:09 Jes Sorensen
2008-11-04 11:54 ` Avi Kivity
0 siblings, 1 reply; 26+ messages in thread
From: Jes Sorensen @ 2008-10-30 14:09 UTC (permalink / raw)
To: Avi Kivity, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, "kvm-ia64@vger.kernel.org" <kvm
[-- Attachment #1: Type: text/plain, Size: 449 bytes --]
Hi,
So here we go, trying to catch up on the PCI passthrough patch revision
number, here's v5 of the struct vcpu_info patch.
In the end I decided to merge the contents of struct vcpu_info directly
into CPU_COMMON as it was silly to create new files just to remove them
in the next patch again.
This boots for me on ia64 and builds on x86_64, obviously it is 100%
perfect <tm>!
Comments, bug reports, or upstream merge, welcome :-)
Cheers,
Jes
[-- Attachment #2: 0010-qemu-kvm-vcpu_info-v5.patch --]
[-- Type: text/plain, Size: 21663 bytes --]
Merge vcpu_info into CPUState.
Move contents of struct vcpu_info directly into CPU_COMMON. Rename
struct qemu_kvm_work_item to qemu_work_item as it really isn't KVM
specific.
This eliminates the ugly static sized array of struct vcpu_info.
Signed-off-by: Jes Sorensen <jes@sgi.com>
---
libkvm/kvm-common.h | 8 +-
libkvm/libkvm.c | 28 +++----
libkvm/libkvm.h | 10 +-
qemu/cpu-defs.h | 15 +++-
qemu/hw/acpi.c | 18 ++++
qemu/qemu-common.h | 8 ++
qemu/qemu-kvm-ia64.c | 4 -
qemu/qemu-kvm-powerpc.c | 5 -
qemu/qemu-kvm-x86.c | 11 +--
qemu/qemu-kvm.c | 175 ++++++++++++++++++++++--------------------------
qemu/qemu-kvm.h | 6 -
qemu/target-ia64/cpu.h | 1
12 files changed, 156 insertions(+), 133 deletions(-)
Index: kvm-userspace.git/libkvm/kvm-common.h
===================================================================
--- kvm-userspace.git.orig/libkvm/kvm-common.h
+++ kvm-userspace.git/libkvm/kvm-common.h
@@ -84,11 +84,11 @@
void kvm_show_code(kvm_context_t kvm, int vcpu);
int handle_halt(kvm_context_t kvm, int vcpu);
-int handle_shutdown(kvm_context_t kvm, int vcpu);
-void post_kvm_run(kvm_context_t kvm, int vcpu);
-int pre_kvm_run(kvm_context_t kvm, int vcpu);
+int handle_shutdown(kvm_context_t kvm, void *env);
+void post_kvm_run(kvm_context_t kvm, void *env);
+int pre_kvm_run(kvm_context_t kvm, void *env);
int handle_io_window(kvm_context_t kvm);
-int handle_debug(kvm_context_t kvm, int vcpu);
+int handle_debug(kvm_context_t kvm, void *env);
int try_push_interrupts(kvm_context_t kvm);
#endif
Index: kvm-userspace.git/libkvm/libkvm.c
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.c
+++ kvm-userspace.git/libkvm/libkvm.c
@@ -738,9 +738,9 @@
return 0;
}
-int handle_debug(kvm_context_t kvm, int vcpu)
+int handle_debug(kvm_context_t kvm, void *env)
{
- return kvm->callbacks->debug(kvm->opaque, vcpu);
+ return kvm->callbacks->debug(kvm->opaque, env);
}
int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
@@ -822,9 +822,9 @@
return kvm->callbacks->halt(kvm->opaque, vcpu);
}
-int handle_shutdown(kvm_context_t kvm, int vcpu)
+int handle_shutdown(kvm_context_t kvm, void *env)
{
- return kvm->callbacks->shutdown(kvm->opaque, vcpu);
+ return kvm->callbacks->shutdown(kvm->opaque, env);
}
int try_push_interrupts(kvm_context_t kvm)
@@ -837,14 +837,14 @@
return kvm->callbacks->try_push_nmi(kvm->opaque);
}
-void post_kvm_run(kvm_context_t kvm, int vcpu)
+void post_kvm_run(kvm_context_t kvm, void *env)
{
- kvm->callbacks->post_kvm_run(kvm->opaque, vcpu);
+ kvm->callbacks->post_kvm_run(kvm->opaque, env);
}
-int pre_kvm_run(kvm_context_t kvm, int vcpu)
+int pre_kvm_run(kvm_context_t kvm, void *env)
{
- return kvm->callbacks->pre_kvm_run(kvm->opaque, vcpu);
+ return kvm->callbacks->pre_kvm_run(kvm->opaque, env);
}
int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
@@ -872,7 +872,7 @@
#endif
}
-int kvm_run(kvm_context_t kvm, int vcpu)
+int kvm_run(kvm_context_t kvm, int vcpu, void *env)
{
int r;
int fd = kvm->vcpu_fd[vcpu];
@@ -886,19 +886,19 @@
if (!kvm->irqchip_in_kernel)
run->request_interrupt_window = try_push_interrupts(kvm);
#endif
- r = pre_kvm_run(kvm, vcpu);
+ r = pre_kvm_run(kvm, env);
if (r)
return r;
r = ioctl(fd, KVM_RUN, 0);
if (r == -1 && errno != EINTR && errno != EAGAIN) {
r = -errno;
- post_kvm_run(kvm, vcpu);
+ post_kvm_run(kvm, env);
fprintf(stderr, "kvm_run: %s\n", strerror(-r));
return r;
}
- post_kvm_run(kvm, vcpu);
+ post_kvm_run(kvm, env);
#if defined(KVM_CAP_COALESCED_MMIO)
if (kvm->coalesced_mmio) {
@@ -948,7 +948,7 @@
r = handle_io(kvm, run, vcpu);
break;
case KVM_EXIT_DEBUG:
- r = handle_debug(kvm, vcpu);
+ r = handle_debug(kvm, env);
break;
case KVM_EXIT_MMIO:
r = handle_mmio(kvm, run);
@@ -962,7 +962,7 @@
#endif
break;
case KVM_EXIT_SHUTDOWN:
- r = handle_shutdown(kvm, vcpu);
+ r = handle_shutdown(kvm, env);
break;
#if defined(__s390__)
case KVM_EXIT_S390_SIEIC:
Index: kvm-userspace.git/libkvm/libkvm.h
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.h
+++ kvm-userspace.git/libkvm/libkvm.h
@@ -55,7 +55,7 @@
/// generic memory writes to unmapped memory (For MMIO devices)
int (*mmio_write)(void *opaque, uint64_t addr, uint8_t *data,
int len);
- int (*debug)(void *opaque, int vcpu);
+ int (*debug)(void *opaque, void *env);
/*!
* \brief Called when the VCPU issues an 'hlt' instruction.
*
@@ -63,12 +63,12 @@
* on the host CPU.
*/
int (*halt)(void *opaque, int vcpu);
- int (*shutdown)(void *opaque, int vcpu);
+ int (*shutdown)(void *opaque, void *env);
int (*io_window)(void *opaque);
int (*try_push_interrupts)(void *opaque);
int (*try_push_nmi)(void *opaque);
- void (*post_kvm_run)(void *opaque, int vcpu);
- int (*pre_kvm_run)(void *opaque, int vcpu);
+ void (*post_kvm_run)(void *opaque, void *env);
+ int (*pre_kvm_run)(void *opaque, void *env);
int (*tpr_access)(void *opaque, int vcpu, uint64_t rip, int is_write);
#if defined(__powerpc__)
int (*powerpc_dcr_read)(int vcpu, uint32_t dcrn, uint32_t *data);
@@ -181,7 +181,7 @@
* return except for when an error has occured, or when you have sent it
* an EINTR signal.
*/
-int kvm_run(kvm_context_t kvm, int vcpu);
+int kvm_run(kvm_context_t kvm, int vcpu, void *env);
/*!
* \brief Get interrupt flag from on last exit to userspace
Index: kvm-userspace.git/qemu/cpu-defs.h
===================================================================
--- kvm-userspace.git.orig/qemu/cpu-defs.h
+++ kvm-userspace.git/qemu/cpu-defs.h
@@ -27,6 +27,7 @@
#include "config.h"
#include <setjmp.h>
#include <inttypes.h>
+#include <pthread.h>
#include "osdep.h"
#ifndef TARGET_LONG_BITS
@@ -142,6 +143,9 @@
} icount_decr_u16;
#endif
+/* forward decleration */
+struct qemu_work_item;
+
#define CPU_TEMP_BUF_NLONGS 128
#define CPU_COMMON \
struct TranslationBlock *current_tb; /* currently executing TB */ \
@@ -200,6 +204,15 @@
/* user data */ \
void *opaque; \
\
- const char *cpu_model_str;
+ const char *cpu_model_str; \
+ \
+ int sipi_needed; \
+ int init; \
+ pthread_t thread; \
+ int signalled; \
+ int stop; \
+ int stopped; \
+ int created; \
+ struct qemu_work_item *queued_work_first, *queued_work_last;
#endif
Index: kvm-userspace.git/qemu/hw/acpi.c
===================================================================
--- kvm-userspace.git.orig/qemu/hw/acpi.c
+++ kvm-userspace.git/qemu/hw/acpi.c
@@ -722,6 +722,24 @@
}
#if defined(TARGET_I386) || defined(TARGET_X86_64)
+#ifdef USE_KVM
+static CPUState *qemu_kvm_cpu_env(int index)
+{
+ CPUState *penv;
+
+ penv = first_cpu;
+
+ while (penv) {
+ if (penv->cpu_index == index)
+ return penv;
+ penv = (CPUState *)penv->next_cpu;
+ }
+
+ return NULL;
+}
+#endif
+
+
void qemu_system_cpu_hot_add(int cpu, int state)
{
CPUState *env;
Index: kvm-userspace.git/qemu/qemu-common.h
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-common.h
+++ kvm-userspace.git/qemu/qemu-common.h
@@ -143,4 +143,12 @@
void cpu_save(QEMUFile *f, void *opaque);
int cpu_load(QEMUFile *f, void *opaque, int version_id);
+/* work queue */
+struct qemu_work_item {
+ struct qemu_kvm_work_item *next;
+ void (*func)(void *data);
+ void *data;
+ int done;
+};
+
#endif
Index: kvm-userspace.git/qemu/qemu-kvm-ia64.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-ia64.c
+++ kvm-userspace.git/qemu/qemu-kvm-ia64.c
@@ -39,11 +39,11 @@
return 1;
}
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
{
}
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
{
}
Index: kvm-userspace.git/qemu/qemu-kvm-powerpc.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-powerpc.c
+++ kvm-userspace.git/qemu/qemu-kvm-powerpc.c
@@ -142,14 +142,13 @@
return 1;
}
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
{
return;
}
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
{
- CPUState *env = qemu_kvm_cpu_env(vcpu);
cpu_single_env = env;
}
Index: kvm-userspace.git/qemu/qemu-kvm-x86.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-x86.c
+++ kvm-userspace.git/qemu/qemu-kvm-x86.c
@@ -618,17 +618,16 @@
return 1;
}
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
{
- CPUState *env = cpu_single_env;
-
if (!kvm_irqchip_in_kernel(kvm_context))
- kvm_set_cr8(kvm_context, vcpu, cpu_get_apic_tpr(env));
+ kvm_set_cr8(kvm_context, env->cpu_index, cpu_get_apic_tpr(env));
}
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
{
- CPUState *env = qemu_kvm_cpu_env(vcpu);
+ int vcpu = env->cpu_index;
+
cpu_single_env = env;
env->eflags = kvm_get_interrupt_flag(kvm_context, vcpu)
Index: kvm-userspace.git/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.c
+++ kvm-userspace.git/qemu/qemu-kvm.c
@@ -28,7 +28,6 @@
#include <sys/syscall.h>
#include <sys/mman.h>
-#define bool _Bool
#define false 0
#define true 1
@@ -43,31 +42,12 @@
pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
-__thread struct vcpu_info *vcpu;
+__thread struct CPUState *current_env;
static int qemu_system_ready;
#define SIG_IPI (SIGRTMIN+4)
-struct qemu_kvm_work_item {
- struct qemu_kvm_work_item *next;
- void (*func)(void *data);
- void *data;
- bool done;
-};
-
-struct vcpu_info {
- CPUState *env;
- int sipi_needed;
- int init;
- pthread_t thread;
- int signalled;
- int stop;
- int stopped;
- int created;
- struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
-} vcpu_info[256];
-
pthread_t io_thread;
static int io_thread_fd = -1;
static int io_thread_sigfd = -1;
@@ -91,43 +71,37 @@
cpu_single_env = env;
}
-CPUState *qemu_kvm_cpu_env(int index)
-{
- return vcpu_info[index].env;
-}
-
static void sig_ipi_handler(int n)
{
}
static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
{
- struct vcpu_info *vi = &vcpu_info[env->cpu_index];
- struct qemu_kvm_work_item wi;
+ struct qemu_work_item wi;
- if (vi == vcpu) {
+ if (env == current_env) {
func(data);
return;
}
wi.func = func;
wi.data = data;
- if (!vi->queued_work_first)
- vi->queued_work_first = &wi;
+ if (!env->queued_work_first)
+ env->queued_work_first = &wi;
else
- vi->queued_work_last->next = &wi;
- vi->queued_work_last = &wi;
+ env->queued_work_last->next = &wi;
+ env->queued_work_last = &wi;
wi.next = NULL;
wi.done = false;
- pthread_kill(vi->thread, SIG_IPI);
+ pthread_kill(env->thread, SIG_IPI);
while (!wi.done)
qemu_cond_wait(&qemu_work_cond);
}
static void inject_interrupt(void *data)
{
- cpu_interrupt(vcpu->env, (int)data);
+ cpu_interrupt(current_env, (int)data);
}
void kvm_inject_interrupt(CPUState *env, int mask)
@@ -140,29 +114,33 @@
int signal = 0;
if (env) {
- if (!vcpu)
+ if (current_env && !current_env->created)
signal = 1;
- if (vcpu && env != vcpu->env && !vcpu_info[env->cpu_index].signalled)
+ /*
+ * Testing for created here is really redundant
+ */
+ if (current_env && current_env->created &&
+ env != current_env && !env->signalled)
signal = 1;
if (signal) {
- vcpu_info[env->cpu_index].signalled = 1;
- if (vcpu_info[env->cpu_index].thread)
- pthread_kill(vcpu_info[env->cpu_index].thread, SIG_IPI);
+ env->signalled = 1;
+ if (env->thread)
+ pthread_kill(env->thread, SIG_IPI);
}
}
}
void kvm_update_after_sipi(CPUState *env)
{
- vcpu_info[env->cpu_index].sipi_needed = 1;
+ env->sipi_needed = 1;
kvm_update_interrupt_request(env);
}
void kvm_apic_init(CPUState *env)
{
if (env->cpu_index != 0)
- vcpu_info[env->cpu_index].init = 1;
+ env->init = 1;
kvm_update_interrupt_request(env);
}
@@ -178,18 +156,19 @@
return kvm_arch_try_push_nmi(opaque);
}
-static void post_kvm_run(void *opaque, int vcpu)
+static void post_kvm_run(void *opaque, void *data)
{
+ CPUState *env = (CPUState *)data;
pthread_mutex_lock(&qemu_mutex);
- kvm_arch_post_kvm_run(opaque, vcpu);
+ kvm_arch_post_kvm_run(opaque, env);
}
-static int pre_kvm_run(void *opaque, int vcpu)
+static int pre_kvm_run(void *opaque, void *data)
{
- CPUState *env = qemu_kvm_cpu_env(vcpu);
+ CPUState *env = (CPUState *)data;
- kvm_arch_pre_kvm_run(opaque, vcpu);
+ kvm_arch_pre_kvm_run(opaque, env);
if (env->interrupt_request & CPU_INTERRUPT_EXIT)
return 1;
@@ -227,7 +206,7 @@
{
int r;
- r = kvm_run(kvm_context, env->cpu_index);
+ r = kvm_run(kvm_context, env->cpu_index, env);
if (r < 0) {
printf("kvm_run returned %d\n", r);
exit(1);
@@ -240,7 +219,7 @@
static int has_work(CPUState *env)
{
- if (!vm_running || (env && vcpu_info[env->cpu_index].stopped))
+ if (!vm_running || (env && env->stopped))
return 0;
if (!env->halted)
return 1;
@@ -249,18 +228,17 @@
static void flush_queued_work(CPUState *env)
{
- struct vcpu_info *vi = &vcpu_info[env->cpu_index];
- struct qemu_kvm_work_item *wi;
+ struct qemu_work_item *wi;
- if (!vi->queued_work_first)
+ if (!env->queued_work_first)
return;
- while ((wi = vi->queued_work_first)) {
- vi->queued_work_first = wi->next;
+ while ((wi = env->queued_work_first)) {
+ env->queued_work_first = wi->next;
wi->func(wi->data);
wi->done = true;
}
- vi->queued_work_last = NULL;
+ env->queued_work_last = NULL;
pthread_cond_broadcast(&qemu_work_cond);
}
@@ -291,49 +269,55 @@
cpu_single_env = env;
flush_queued_work(env);
- if (vcpu_info[env->cpu_index].stop) {
- vcpu_info[env->cpu_index].stop = 0;
- vcpu_info[env->cpu_index].stopped = 1;
+ if (env->stop) {
+ env->stop = 0;
+ env->stopped = 1;
pthread_cond_signal(&qemu_pause_cond);
}
- vcpu_info[env->cpu_index].signalled = 0;
+ env->signalled = 0;
}
static int all_threads_paused(void)
{
- int i;
+ CPUState *penv = first_cpu;
+
+ while (penv) {
+ if (penv->stop)
+ return 0;
+ penv = (CPUState *)penv->next_cpu;
+ }
- for (i = 0; i < smp_cpus; ++i)
- if (vcpu_info[i].stop)
- return 0;
return 1;
}
static void pause_all_threads(void)
{
- int i;
+ CPUState *penv = first_cpu;
assert(!cpu_single_env);
- for (i = 0; i < smp_cpus; ++i) {
- vcpu_info[i].stop = 1;
- pthread_kill(vcpu_info[i].thread, SIG_IPI);
+ while (penv) {
+ penv->stop = 1;
+ pthread_kill(penv->thread, SIG_IPI);
+ penv = (CPUState *)penv->next_cpu;
}
+
while (!all_threads_paused())
qemu_cond_wait(&qemu_pause_cond);
}
static void resume_all_threads(void)
{
- int i;
+ CPUState *penv = first_cpu;
assert(!cpu_single_env);
- for (i = 0; i < smp_cpus; ++i) {
- vcpu_info[i].stop = 0;
- vcpu_info[i].stopped = 0;
- pthread_kill(vcpu_info[i].thread, SIG_IPI);
+ while (penv) {
+ penv->stop = 0;
+ penv->stopped = 0;
+ pthread_kill(penv->thread, SIG_IPI);
+ penv = (CPUState *)penv->next_cpu;
}
}
@@ -348,7 +332,7 @@
static void update_regs_for_sipi(CPUState *env)
{
kvm_arch_update_regs_for_sipi(env);
- vcpu_info[env->cpu_index].sipi_needed = 0;
+ env->sipi_needed = 0;
}
static void update_regs_for_init(CPUState *env)
@@ -361,11 +345,11 @@
#ifdef TARGET_I386
/* restore SIPI vector */
- if(vcpu_info[env->cpu_index].sipi_needed)
+ if(env->sipi_needed)
env->segs[R_CS] = cs;
-
- vcpu_info[env->cpu_index].init = 0;
#endif
+
+ env->init = 0;
kvm_arch_load_regs(env);
}
@@ -387,22 +371,22 @@
void qemu_kvm_system_reset(void)
{
- int i;
+ CPUState *penv = first_cpu;
pause_all_threads();
qemu_system_reset();
- for (i = 0; i < smp_cpus; ++i)
- kvm_arch_cpu_reset(vcpu_info[i].env);
+ while (penv) {
+ kvm_arch_cpu_reset(penv);
+ penv = (CPUState *)penv->next_cpu;
+ }
resume_all_threads();
}
static int kvm_main_loop_cpu(CPUState *env)
{
- struct vcpu_info *info = &vcpu_info[env->cpu_index];
-
setup_kernel_sigmask(env);
pthread_mutex_lock(&qemu_mutex);
@@ -423,12 +407,12 @@
if (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI))
env->halted = 0;
if (!kvm_irqchip_in_kernel(kvm_context)) {
- if (info->init)
+ if (env->init)
update_regs_for_init(env);
- if (info->sipi_needed)
+ if (env->sipi_needed)
update_regs_for_sipi(env);
}
- if (!env->halted && !info->init)
+ if (!env->halted && !env->init)
kvm_cpu_exec(env);
env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
kvm_main_loop_wait(env, 0);
@@ -442,9 +426,8 @@
CPUState *env = _env;
sigset_t signals;
- vcpu = &vcpu_info[env->cpu_index];
- vcpu->env = env;
- vcpu->env->thread_id = kvm_get_thread_id();
+ current_env = env;
+ env->thread_id = kvm_get_thread_id();
sigfillset(&signals);
sigprocmask(SIG_BLOCK, &signals, NULL);
kvm_create_vcpu(kvm_context, env->cpu_index);
@@ -452,7 +435,7 @@
/* signal VCPU creation */
pthread_mutex_lock(&qemu_mutex);
- vcpu->created = 1;
+ current_env->created = 1;
pthread_cond_signal(&qemu_vcpu_cond);
/* and wait for machine initialization */
@@ -466,9 +449,9 @@
void kvm_init_new_ap(int cpu, CPUState *env)
{
- pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
+ pthread_create(&env->thread, NULL, ap_main_loop, env);
- while (vcpu_info[cpu].created == 0)
+ while (env->created == 0)
qemu_cond_wait(&qemu_vcpu_cond);
}
@@ -624,10 +607,12 @@
return 0;
}
-static int kvm_debug(void *opaque, int vcpu)
+static int kvm_debug(void *opaque, void *data)
{
+ struct CPUState *env = (struct CPUState *)data;
+
kvm_debug_stop_requested = 1;
- vcpu_info[vcpu].stopped = 1;
+ env->stopped = 1;
return 1;
}
@@ -721,10 +706,12 @@
return kvm_arch_halt(opaque, vcpu);
}
-static int kvm_shutdown(void *opaque, int vcpu)
+static int kvm_shutdown(void *opaque, void *data)
{
+ struct CPUState *env = (struct CPUState *)data;
+
/* stop the current vcpu from going back to guest mode */
- vcpu_info[cpu_single_env->cpu_index].stopped = 1;
+ env->stopped = 1;
qemu_system_reset_request();
return 1;
Index: kvm-userspace.git/qemu/qemu-kvm.h
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.h
+++ kvm-userspace.git/qemu/qemu-kvm.h
@@ -63,16 +63,14 @@
void kvm_arch_load_regs(CPUState *env);
int kvm_arch_qemu_init_env(CPUState *cenv);
int kvm_arch_halt(void *opaque, int vcpu);
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu);
-void kvm_arch_post_kvm_run(void *opaque, int vcpu);
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env);
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env);
int kvm_arch_has_work(CPUState *env);
int kvm_arch_try_push_interrupts(void *opaque);
int kvm_arch_try_push_nmi(void *opaque);
void kvm_arch_update_regs_for_sipi(CPUState *env);
void kvm_arch_cpu_reset(CPUState *env);
-CPUState *qemu_kvm_cpu_env(int index);
-
void qemu_kvm_aio_wait_start(void);
void qemu_kvm_aio_wait(void);
void qemu_kvm_aio_wait_end(void);
Index: kvm-userspace.git/qemu/target-ia64/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ia64/cpu.h
+++ kvm-userspace.git/qemu/target-ia64/cpu.h
@@ -40,6 +40,7 @@
#include "cpu-defs.h"
#include "softfloat.h"
+
typedef struct CPUIA64State {
CPU_COMMON;
uint32_t hflags;
^ permalink raw reply [flat|nested] 26+ messages in thread* Re: [patch] remove vcpu_info array v5
2008-10-30 14:09 [patch] remove vcpu_info array v5 Jes Sorensen
@ 2008-11-04 11:54 ` Avi Kivity
2008-11-04 13:55 ` Glauber Costa
2008-11-10 13:24 ` Jes Sorensen
0 siblings, 2 replies; 26+ messages in thread
From: Avi Kivity @ 2008-11-04 11:54 UTC (permalink / raw)
To: Jes Sorensen
Cc: Anthony Liguori, Hollis Blanchard, kvm@vger.kernel.org,
kvm-ia64@vger.kernel.org, Glauber de Oliveira Costa
Jes Sorensen wrote:
>
> So here we go, trying to catch up on the PCI passthrough patch revision
> number, here's v5 of the struct vcpu_info patch.
>
> In the end I decided to merge the contents of struct vcpu_info directly
> into CPU_COMMON as it was silly to create new files just to remove them
> in the next patch again.
>
> This boots for me on ia64 and builds on x86_64, obviously it is 100%
> perfect <tm>!
>
> Comments, bug reports, or upstream merge, welcome :-)
> Merge vcpu_info into CPUState.
>
> Move contents of struct vcpu_info directly into CPU_COMMON. Rename
> struct qemu_kvm_work_item to qemu_work_item as it really isn't KVM
> specific.
>
>
Well, it is actually kvm specific, since qemu doesn't have vccpu threads.
> -int kvm_run(kvm_context_t kvm, int vcpu)
> +int kvm_run(kvm_context_t kvm, int vcpu, void *env)
>
That's a little ugly -- passing two arguments which describe the vcpu.
How about passing env to kvm_create_vcpu() instead?
> #define CPU_TEMP_BUF_NLONGS 128
> #define CPU_COMMON \
> struct TranslationBlock *current_tb; /* currently executing TB */ \
> @@ -200,6 +204,15 @@
> /* user data */ \
> void *opaque; \
> \
> - const char *cpu_model_str;
> + const char *cpu_model_str; \
> + \
> + int sipi_needed; \
> + int init; \
> + pthread_t thread; \
> + int signalled; \
> + int stop; \
> + int stopped; \
> + int created; \
> + struct qemu_work_item *queued_work_first, *queued_work_last;
>
struct kvm_stuff { ... } and put that in as a member, so it's easier to
remember this is a kvm addition.
>
> #if defined(TARGET_I386) || defined(TARGET_X86_64)
> +#ifdef USE_KVM
> +static CPUState *qemu_kvm_cpu_env(int index)
> +{
> + CPUState *penv;
> +
> + penv = first_cpu;
> +
> + while (penv) {
> + if (penv->cpu_index == index)
> + return penv;
> + penv = (CPUState *)penv->next_cpu;
> + }
> +
> + return NULL;
> +}
> +#endif
>
This can't be the best way of determining the env pointer from a cpu number.
> @@ -143,4 +143,12 @@
> void cpu_save(QEMUFile *f, void *opaque);
> int cpu_load(QEMUFile *f, void *opaque, int version_id);
>
> +/* work queue */
> +struct qemu_work_item {
> + struct qemu_kvm_work_item *next;
>
Missed rename here.
> + void (*func)(void *data);
> + void *data;
> + int done;
> +};
> +
>
Please keep this in kvm specific files.
> @@ -28,7 +28,6 @@
> #include <sys/syscall.h>
> #include <sys/mman.h>
>
> -#define bool _Bool
>
why?
Please separate into a patch that does the kvm_run int vcpu -> void
*vcpu conversion, and then the vcpu_info -> env conversion.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread* Re: [patch] remove vcpu_info array v5
2008-11-04 11:54 ` Avi Kivity
@ 2008-11-04 13:55 ` Glauber Costa
2008-11-04 14:30 ` Avi Kivity
2008-11-10 13:24 ` Jes Sorensen
1 sibling, 1 reply; 26+ messages in thread
From: Glauber Costa @ 2008-11-04 13:55 UTC (permalink / raw)
To: Avi Kivity
Cc: Jes Sorensen, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
On Tue, Nov 4, 2008 at 9:54 AM, Avi Kivity <avi@redhat.com> wrote:
> Jes Sorensen wrote:
>>
>> So here we go, trying to catch up on the PCI passthrough patch revision
>> number, here's v5 of the struct vcpu_info patch.
>>
>> In the end I decided to merge the contents of struct vcpu_info directly
>> into CPU_COMMON as it was silly to create new files just to remove them
>> in the next patch again.
>>
>> This boots for me on ia64 and builds on x86_64, obviously it is 100%
>> perfect <tm>!
>>
>> Comments, bug reports, or upstream merge, welcome :-)
>> Merge vcpu_info into CPUState.
>>
>> Move contents of struct vcpu_info directly into CPU_COMMON. Rename
>> struct qemu_kvm_work_item to qemu_work_item as it really isn't KVM
>> specific.
>>
>>
>
> Well, it is actually kvm specific, since qemu doesn't have vccpu threads.
>
>> -int kvm_run(kvm_context_t kvm, int vcpu)
>> +int kvm_run(kvm_context_t kvm, int vcpu, void *env)
>>
>
> That's a little ugly -- passing two arguments which describe the vcpu. How
> about passing env to kvm_create_vcpu() instead?
>
>> #define CPU_TEMP_BUF_NLONGS 128
>> #define CPU_COMMON \
>> struct TranslationBlock *current_tb; /* currently executing TB */ \
>> @@ -200,6 +204,15 @@
>> /* user data */ \
>> void *opaque; \
>> \
>> - const char *cpu_model_str;
>> + const char *cpu_model_str; \
>> + \
>> + int sipi_needed; \
>> + int init; \
>> + pthread_t thread; \
>> + int signalled; \
>> + int stop; \
>> + int stopped; \
>> + int created; \
>> + struct qemu_work_item *queued_work_first, *queued_work_last;
>>
>
> struct kvm_stuff { ... } and put that in as a member, so it's easier to
> remember this is a kvm addition.
>
>> #if defined(TARGET_I386) || defined(TARGET_X86_64)
>> +#ifdef USE_KVM
>> +static CPUState *qemu_kvm_cpu_env(int index)
>> +{
>> + CPUState *penv;
>> +
>> + penv = first_cpu;
>> +
>> + while (penv) {
>> + if (penv->cpu_index == index)
>> + return penv;
>> + penv = (CPUState *)penv->next_cpu;
>> + }
>> +
>> + return NULL;
>> +}
>> +#endif
>>
>
> This can't be the best way of determining the env pointer from a cpu number.
By far it is not. But at least it does not depend on any kvm-specific
data, and works
for both qemu and kvm. So it's better to be this way.
If we change this scheme to a better one, like a hash table, then
it'll improve both qemu and kvm.
>> @@ -143,4 +143,12 @@
>> void cpu_save(QEMUFile *f, void *opaque);
>> int cpu_load(QEMUFile *f, void *opaque, int version_id);
>> +/* work queue */
>> +struct qemu_work_item {
>> + struct qemu_kvm_work_item *next;
>>
>
> Missed rename here.
>
>> + void (*func)(void *data);
>> + void *data;
>> + int done;
>> +};
>> +
>>
>
> Please keep this in kvm specific files.
>
>
>> @@ -28,7 +28,6 @@
>> #include <sys/syscall.h>
>> #include <sys/mman.h>
>> -#define bool _Bool
>>
>
> why?
>
> Please separate into a patch that does the kvm_run int vcpu -> void *vcpu
> conversion, and then the vcpu_info -> env conversion.
>
> --
> error compiling committee.c: too many arguments to function
>
>
--
Glauber Costa.
"Free as in Freedom"
http://glommer.net
"The less confident you are, the more serious you have to act."
^ permalink raw reply [flat|nested] 26+ messages in thread* Re: [patch] remove vcpu_info array v5
2008-11-04 13:55 ` Glauber Costa
@ 2008-11-04 14:30 ` Avi Kivity
2008-11-04 14:35 ` Glauber Costa
2008-11-10 12:30 ` Jes Sorensen
0 siblings, 2 replies; 26+ messages in thread
From: Avi Kivity @ 2008-11-04 14:30 UTC (permalink / raw)
To: Glauber Costa
Cc: Jes Sorensen, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Glauber Costa wrote:
>> This can't be the best way of determining the env pointer from a cpu number.
>>
> By far it is not. But at least it does not depend on any kvm-specific
> data, and works
> for both qemu and kvm. So it's better to be this way.
>
> If we change this scheme to a better one, like a hash table, then
> it'll improve both qemu and kvm.
>
>
There are simpler data structures. An example is an array. Since we
have an upper limit on the number of cpus (->max_cpus) there are no
issues with scaling (unlike with the algorithm above).
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-04 14:30 ` Avi Kivity
@ 2008-11-04 14:35 ` Glauber Costa
2008-11-04 14:40 ` Avi Kivity
2008-11-10 12:30 ` Jes Sorensen
1 sibling, 1 reply; 26+ messages in thread
From: Glauber Costa @ 2008-11-04 14:35 UTC (permalink / raw)
To: Avi Kivity
Cc: Jes Sorensen, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
On Tue, Nov 4, 2008 at 12:30 PM, Avi Kivity <avi@redhat.com> wrote:
> Glauber Costa wrote:
>>>
>>> This can't be the best way of determining the env pointer from a cpu
>>> number.
>>>
>>
>> By far it is not. But at least it does not depend on any kvm-specific
>> data, and works
>> for both qemu and kvm. So it's better to be this way.
>>
>> If we change this scheme to a better one, like a hash table, then
>> it'll improve both qemu and kvm.
>>
>>
>
> There are simpler data structures. An example is an array. Since we have
> an upper limit on the number of cpus (->max_cpus) there are no issues with
> scaling (unlike with the algorithm above).
Yes, but I believe the whole point of jes patches is to remove those limits.
--
Glauber Costa.
"Free as in Freedom"
http://glommer.net
"The less confident you are, the more serious you have to act."
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-04 14:35 ` Glauber Costa
@ 2008-11-04 14:40 ` Avi Kivity
2008-11-04 14:43 ` Glauber Costa
2008-11-10 13:30 ` Jes Sorensen
0 siblings, 2 replies; 26+ messages in thread
From: Avi Kivity @ 2008-11-04 14:40 UTC (permalink / raw)
To: Glauber Costa
Cc: Jes Sorensen, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Glauber Costa wrote:
>> There are simpler data structures. An example is an array. Since we have
>> an upper limit on the number of cpus (->max_cpus) there are no issues with
>> scaling (unlike with the algorithm above).
>>
>
> Yes, but I believe the whole point of jes patches is to remove those limits.
>
So we could linear search huge lists? Or code hashes where arrays would
do? No, sir.
An array of pointers statically sized at MAX_CPUS is fine. If you
insist, you can realloc() it on demand.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-04 14:40 ` Avi Kivity
@ 2008-11-04 14:43 ` Glauber Costa
2008-11-04 14:47 ` Avi Kivity
2008-11-10 13:30 ` Jes Sorensen
1 sibling, 1 reply; 26+ messages in thread
From: Glauber Costa @ 2008-11-04 14:43 UTC (permalink / raw)
To: Avi Kivity
Cc: Jes Sorensen, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
On Tue, Nov 4, 2008 at 12:40 PM, Avi Kivity <avi@redhat.com> wrote:
> Glauber Costa wrote:
>>>
>>> There are simpler data structures. An example is an array. Since we
>>> have
>>> an upper limit on the number of cpus (->max_cpus) there are no issues
>>> with
>>> scaling (unlike with the algorithm above).
>>>
>>
>> Yes, but I believe the whole point of jes patches is to remove those
>> limits.
>>
>
> So we could linear search huge lists? Or code hashes where arrays would do?
> No, sir.
>
> An array of pointers statically sized at MAX_CPUS is fine. If you insist,
> you can realloc() it on demand.
Yes, I agree. having MAX_CPUS is fine, and if we need to, we can increase that.
but again: then the direction of the flow is better be have qemu to
use a env array,
and then we can use the same method for kvm.
> --
> error compiling committee.c: too many arguments to function
>
>
--
Glauber Costa.
"Free as in Freedom"
http://glommer.net
"The less confident you are, the more serious you have to act."
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-04 14:43 ` Glauber Costa
@ 2008-11-04 14:47 ` Avi Kivity
0 siblings, 0 replies; 26+ messages in thread
From: Avi Kivity @ 2008-11-04 14:47 UTC (permalink / raw)
To: Glauber Costa
Cc: Jes Sorensen, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Glauber Costa wrote:
>>> Yes, but I believe the whole point of jes patches is to remove those
>>> limits.
>>>
>>>
>> So we could linear search huge lists? Or code hashes where arrays would do?
>> No, sir.
>>
>> An array of pointers statically sized at MAX_CPUS is fine. If you insist,
>> you can realloc() it on demand.
>>
>
> Yes, I agree. having MAX_CPUS is fine, and if we need to, we can increase that.
> but again: then the direction of the flow is better be have qemu to
> use a env array,
> and then we can use the same method for kvm.
>
I only object to the linear search; not to the folding of vcpu_info into
CPUState.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-04 14:40 ` Avi Kivity
2008-11-04 14:43 ` Glauber Costa
@ 2008-11-10 13:30 ` Jes Sorensen
2008-11-10 13:40 ` Avi Kivity
1 sibling, 1 reply; 26+ messages in thread
From: Jes Sorensen @ 2008-11-10 13:30 UTC (permalink / raw)
To: Avi Kivity
Cc: Glauber Costa, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Avi Kivity wrote:
>> Yes, but I believe the whole point of jes patches is to remove those
>> limits.
>>
>
> So we could linear search huge lists? Or code hashes where arrays would
> do? No, sir.
>
> An array of pointers statically sized at MAX_CPUS is fine. If you
> insist, you can realloc() it on demand.
Sorry, but this is riciculous. Please try and take a look at where the
search is performed. It's *solely* in the ACPI hot plug code.
realloc() is not an option, unless of course you are in favor of putting
a big lock around all access to such an array.
My patch gets rid of the pointless MAX_CPUS sized array, which doesn't
buy us anything. In fact, most of the changes in my patch makes the
code simpler, because it removes a stack of silly cases where qemu uses
env->cpu_index to get into the array, just to hide CPUState from libkvm,
just to have the callback in QEMU go from int vcpu back to CPUState.
Lets just do it right and get rid of this silliness.
Jes
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-10 13:30 ` Jes Sorensen
@ 2008-11-10 13:40 ` Avi Kivity
2008-11-10 15:22 ` Anthony Liguori
2008-11-10 15:47 ` Jes Sorensen
0 siblings, 2 replies; 26+ messages in thread
From: Avi Kivity @ 2008-11-10 13:40 UTC (permalink / raw)
To: Jes Sorensen
Cc: Glauber Costa, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Jes Sorensen wrote:
>
> Sorry, but this is riciculous. Please try and take a look at where the
> search is performed. It's *solely* in the ACPI hot plug code.
>
There are other places we want a cpu from a cpu_index. Like the apic
code (admittedly that's only using the userspace apic, which is
non-default, but still).
> realloc() is not an option, unless of course you are in favor of putting
> a big lock around all access to such an array.
>
Right now qemu is single-threaded anyway. The only parallelism is when
executing guest code.
Of course we'll want to change that.
> My patch gets rid of the pointless MAX_CPUS sized array, which doesn't
> buy us anything. In fact, most of the changes in my patch makes the
> code simpler, because it removes a stack of silly cases where qemu uses
> env->cpu_index to get into the array, just to hide CPUState from libkvm,
> just to have the callback in QEMU go from int vcpu back to CPUState.
>
> Lets just do it right and get rid of this silliness.
I agree that vcpu_info should die. But we should still have a fast way
of getting a cpu from a cpu_index. I don't see a problem with a static
array of pointers, make it 16K in size if you want. Our real scaling
limits are much lower; they involve the big qemu lock and the single
iothread which will both limit scaling far below any static vcpu array.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-10 13:40 ` Avi Kivity
@ 2008-11-10 15:22 ` Anthony Liguori
2008-11-10 15:45 ` Avi Kivity
2008-11-10 15:47 ` Jes Sorensen
1 sibling, 1 reply; 26+ messages in thread
From: Anthony Liguori @ 2008-11-10 15:22 UTC (permalink / raw)
To: Avi Kivity
Cc: Jes Sorensen, Glauber Costa, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Avi Kivity wrote:
>
>> My patch gets rid of the pointless MAX_CPUS sized array, which doesn't
>> buy us anything. In fact, most of the changes in my patch makes the
>> code simpler, because it removes a stack of silly cases where qemu uses
>> env->cpu_index to get into the array, just to hide CPUState from libkvm,
>> just to have the callback in QEMU go from int vcpu back to CPUState.
>>
>> Lets just do it right and get rid of this silliness.
>
> I agree that vcpu_info should die. But we should still have a fast
> way of getting a cpu from a cpu_index. I don't see a problem with a
> static array of pointers, make it 16K in size if you want. Our real
> scaling limits are much lower; they involve the big qemu lock and the
> single iothread which will both limit scaling far below any static
> vcpu array.
Just do a linear search of the CPUState list and be done with it. This
smells of premature optimization greatly. I would be amazed if walking
the CPUState list is ever on the fast path or will ever be. Really, if
you need to go from cpu_index => CPUState, it suggests you're doing
something wrong.
Regards,
Anthony Liguori
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-10 15:22 ` Anthony Liguori
@ 2008-11-10 15:45 ` Avi Kivity
0 siblings, 0 replies; 26+ messages in thread
From: Avi Kivity @ 2008-11-10 15:45 UTC (permalink / raw)
To: Anthony Liguori
Cc: Jes Sorensen, Glauber Costa, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Anthony Liguori wrote:
>
> Just do a linear search of the CPUState list and be done with it.
> This smells of premature optimization greatly. I would be amazed if
> walking the CPUState list is ever on the fast path or will ever be.
> Really, if you need to go from cpu_index => CPUState, it suggests
> you're doing something wrong.
>
apic_bus_deliver() in hw/apic.c (it uses a static array of local apics,
but under you proposal it would need to be converted to a list as well).
happens once per interrupt. With external interrupts you might
precompute it, but IPIs definitely need fast int->ptr conversion.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-10 13:40 ` Avi Kivity
2008-11-10 15:22 ` Anthony Liguori
@ 2008-11-10 15:47 ` Jes Sorensen
2008-11-10 15:54 ` Avi Kivity
1 sibling, 1 reply; 26+ messages in thread
From: Jes Sorensen @ 2008-11-10 15:47 UTC (permalink / raw)
To: Avi Kivity
Cc: Glauber Costa, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Avi Kivity wrote:
> Jes Sorensen wrote:
>>
>> Sorry, but this is riciculous. Please try and take a look at where the
>> search is performed. It's *solely* in the ACPI hot plug code.
>
> There are other places we want a cpu from a cpu_index. Like the apic
> code (admittedly that's only using the userspace apic, which is
> non-default, but still).
Hi Avi,
Well this was based on what I ran into building the code. I didn't hit
any issues where the apic code tried to do this.
>> My patch gets rid of the pointless MAX_CPUS sized array, which doesn't
>> buy us anything. In fact, most of the changes in my patch makes the
>> code simpler, because it removes a stack of silly cases where qemu uses
>> env->cpu_index to get into the array, just to hide CPUState from libkvm,
>> just to have the callback in QEMU go from int vcpu back to CPUState.
>>
>> Lets just do it right and get rid of this silliness.
>
> I agree that vcpu_info should die. But we should still have a fast way
> of getting a cpu from a cpu_index. I don't see a problem with a static
> array of pointers, make it 16K in size if you want. Our real scaling
> limits are much lower; they involve the big qemu lock and the single
> iothread which will both limit scaling far below any static vcpu array.
If we have reasons where we actually rely on the cpu number, and thats
not counting end-user pretty-number-print concerns, then I found that
we practically never use the cpu number. If we really use it a lot, I
agree we need a fast way, I just didn't hit it in my builds. What did I
miss?
Jes
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-10 15:47 ` Jes Sorensen
@ 2008-11-10 15:54 ` Avi Kivity
2008-11-10 15:58 ` Jes Sorensen
0 siblings, 1 reply; 26+ messages in thread
From: Avi Kivity @ 2008-11-10 15:54 UTC (permalink / raw)
To: Jes Sorensen
Cc: Glauber Costa, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Jes Sorensen wrote:
> If we have reasons where we actually rely on the cpu number, and thats
> not counting end-user pretty-number-print concerns, then I found that
> we practically never use the cpu number. If we really use it a lot, I
> agree we need a fast way, I just didn't hit it in my builds. What did I
> miss?
>
The code I mentioned only runs if the -no-kvm-irqchip option is passed.
It's not the highest performing option...
So this isn't used a lot. But cpus definitely use cpu numbers (as apic
ids), so qemu needs to be prepared to handle this. As to scalability,
that takes will take a lot more work than removing/changing arbitrary
limits. Look at qemu_mutex and weep.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-10 15:54 ` Avi Kivity
@ 2008-11-10 15:58 ` Jes Sorensen
2008-11-11 9:26 ` Avi Kivity
0 siblings, 1 reply; 26+ messages in thread
From: Jes Sorensen @ 2008-11-10 15:58 UTC (permalink / raw)
To: Avi Kivity
Cc: Glauber Costa, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Avi Kivity wrote:
> The code I mentioned only runs if the -no-kvm-irqchip option is passed.
> It's not the highest performing option...
What I meant was that I was able to compile the code, and there was only
one piece left that needed that function, which is why I moved it and
made it static in the acpi code.
> So this isn't used a lot. But cpus definitely use cpu numbers (as apic
> ids), so qemu needs to be prepared to handle this. As to scalability,
> that takes will take a lot more work than removing/changing arbitrary
> limits. Look at qemu_mutex and weep.
Well we have to start somewhere. Just because there's limits in other
areas too, doesn't mean we should discard valid improvements elsewhere.
I'll get to some of that at some point, but there's soo many cards used
to build this house....
Cheers,
Jes
PS: I'll look at your other suggestions to my patch tomorrow.
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-10 15:58 ` Jes Sorensen
@ 2008-11-11 9:26 ` Avi Kivity
2008-11-12 13:02 ` Jes Sorensen
0 siblings, 1 reply; 26+ messages in thread
From: Avi Kivity @ 2008-11-11 9:26 UTC (permalink / raw)
To: Jes Sorensen
Cc: Glauber Costa, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Jes Sorensen wrote:
> Avi Kivity wrote:
>> The code I mentioned only runs if the -no-kvm-irqchip option is
>> passed. It's not the highest performing option...
>
> What I meant was that I was able to compile the code, and there was only
> one piece left that needed that function, which is why I moved it and
> made it static in the acpi code.
>
That's because there is another static array in acpi.c...
>> So this isn't used a lot. But cpus definitely use cpu numbers (as
>> apic ids), so qemu needs to be prepared to handle this. As to
>> scalability, that takes will take a lot more work than
>> removing/changing arbitrary limits. Look at qemu_mutex and weep.
>
> Well we have to start somewhere. Just because there's limits in other
> areas too, doesn't mean we should discard valid improvements elsewhere.
> I'll get to some of that at some point, but there's soo many cards used
> to build this house....
Your patch builds the 16383rd floor while ignoring the 15th. Qemu
scaling issues are much, much more painful than what you were addressing.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-11 9:26 ` Avi Kivity
@ 2008-11-12 13:02 ` Jes Sorensen
2008-11-12 13:09 ` Glauber Costa
2008-11-12 13:12 ` Avi Kivity
0 siblings, 2 replies; 26+ messages in thread
From: Jes Sorensen @ 2008-11-12 13:02 UTC (permalink / raw)
To: Avi Kivity
Cc: Glauber Costa, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Avi Kivity wrote:
> Jes Sorensen wrote:
>> What I meant was that I was able to compile the code, and there was only
>> one piece left that needed that function, which is why I moved it and
>> made it static in the acpi code.
>>
>
> That's because there is another static array in acpi.c...
Which array are you talking about? ACPI has it's own limitations, like
we need ACPI 3.0 to go beyond 256 processors :-(
>> Well we have to start somewhere. Just because there's limits in other
>> areas too, doesn't mean we should discard valid improvements elsewhere.
>> I'll get to some of that at some point, but there's soo many cards used
>> to build this house....
>
> Your patch builds the 16383rd floor while ignoring the 15th. Qemu
> scaling issues are much, much more painful than what you were addressing.
Well I am not ignoring the 15th floor, I am just putting in expandable
rails that can go all the way to the 16383rd. Next I'll get to look at
putting in the button to make the elevator stop at the 15th+ floors :)
Cheers,
Jes
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-12 13:02 ` Jes Sorensen
@ 2008-11-12 13:09 ` Glauber Costa
2008-11-12 13:14 ` Avi Kivity
2008-11-12 13:12 ` Avi Kivity
1 sibling, 1 reply; 26+ messages in thread
From: Glauber Costa @ 2008-11-12 13:09 UTC (permalink / raw)
To: Jes Sorensen
Cc: Avi Kivity, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
On Wed, Nov 12, 2008 at 11:02 AM, Jes Sorensen <jes@sgi.com> wrote:
> Avi Kivity wrote:
>>
>> Jes Sorensen wrote:
>>>
>>> What I meant was that I was able to compile the code, and there was only
>>> one piece left that needed that function, which is why I moved it and
>>> made it static in the acpi code.
>>>
>>
>> That's because there is another static array in acpi.c...
>
> Which array are you talking about? ACPI has it's own limitations, like
> we need ACPI 3.0 to go beyond 256 processors :-(
>
>>> Well we have to start somewhere. Just because there's limits in other
>>> areas too, doesn't mean we should discard valid improvements elsewhere.
>>> I'll get to some of that at some point, but there's soo many cards used
>>> to build this house....
>>
>> Your patch builds the 16383rd floor while ignoring the 15th. Qemu scaling
>> issues are much, much more painful than what you were addressing.
>
> Well I am not ignoring the 15th floor, I am just putting in expandable
> rails that can go all the way to the 16383rd. Next I'll get to look at
> putting in the button to make the elevator stop at the 15th+ floors :)
>
> Cheers,
> Jes
>
I think the main point here is that currently, one of the showstopers
for scalability,
is the lack of fine grained locking. And we don't really know how the
code will look like
after that. It can very well change completely, rendering this work as
totally void.
--
Glauber Costa.
"Free as in Freedom"
http://glommer.net
"The less confident you are, the more serious you have to act."
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-12 13:09 ` Glauber Costa
@ 2008-11-12 13:14 ` Avi Kivity
0 siblings, 0 replies; 26+ messages in thread
From: Avi Kivity @ 2008-11-12 13:14 UTC (permalink / raw)
To: Glauber Costa
Cc: Jes Sorensen, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Glauber Costa wrote:
> I think the main point here is that currently, one of the showstopers
> for scalability,
> is the lack of fine grained locking. And we don't really know how the
> code will look like
> after that. It can very well change completely, rendering this work as
> totally void.
>
I don't think it will be that bad. Fine-graining locks is an art that
is well known, and it's usually not too painful except to those who have
to divine the locking rules from the code afterwards.
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-12 13:02 ` Jes Sorensen
2008-11-12 13:09 ` Glauber Costa
@ 2008-11-12 13:12 ` Avi Kivity
1 sibling, 0 replies; 26+ messages in thread
From: Avi Kivity @ 2008-11-12 13:12 UTC (permalink / raw)
To: Jes Sorensen
Cc: Glauber Costa, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Jes Sorensen wrote:
> Avi Kivity wrote:
>> Jes Sorensen wrote:
>>> What I meant was that I was able to compile the code, and there was
>>> only
>>> one piece left that needed that function, which is why I moved it and
>>> made it static in the acpi code.
>>>
>>
>> That's because there is another static array in acpi.c...
>
> Which array are you talking about? ACPI has it's own limitations, like
> we need ACPI 3.0 to go beyond 256 processors :-(
>
Sorry. apic.c:
static APICState *local_apics[MAX_APICS + 1];
A lucky typo, though, since it brought the acpi issue into the discussion.
>>> Well we have to start somewhere. Just because there's limits in other
>>> areas too, doesn't mean we should discard valid improvements elsewhere.
>>> I'll get to some of that at some point, but there's soo many cards used
>>> to build this house....
>>
>> Your patch builds the 16383rd floor while ignoring the 15th. Qemu
>> scaling issues are much, much more painful than what you were
>> addressing.
>
> Well I am not ignoring the 15th floor, I am just putting in expandable
> rails that can go all the way to the 16383rd. Next I'll get to look at
> putting in the button to make the elevator stop at the 15th+ floors :)
Well I want 16384 buttons in the elevator, not one button you need to
press 16383 times to get to your floor :)
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-04 14:30 ` Avi Kivity
2008-11-04 14:35 ` Glauber Costa
@ 2008-11-10 12:30 ` Jes Sorensen
1 sibling, 0 replies; 26+ messages in thread
From: Jes Sorensen @ 2008-11-10 12:30 UTC (permalink / raw)
To: Avi Kivity
Cc: Glauber Costa, Anthony Liguori, Hollis Blanchard,
kvm@vger.kernel.org, kvm-ia64@vger.kernel.org
Avi Kivity wrote:
> Glauber Costa wrote:
>> By far it is not. But at least it does not depend on any kvm-specific
>> data, and works
>> for both qemu and kvm. So it's better to be this way.
>>
>> If we change this scheme to a better one, like a hash table, then
>> it'll improve both qemu and kvm.
>
> There are simpler data structures. An example is an array. Since we
> have an upper limit on the number of cpus (->max_cpus) there are no
> issues with scaling (unlike with the algorithm above).
Well one of the goals with my patch was to get rid of that nasty limit,
so that we can scale. Static sized arrays are bad.
Jes
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] remove vcpu_info array v5
2008-11-04 11:54 ` Avi Kivity
2008-11-04 13:55 ` Glauber Costa
@ 2008-11-10 13:24 ` Jes Sorensen
2008-11-10 13:48 ` Avi Kivity
1 sibling, 1 reply; 26+ messages in thread
From: Jes Sorensen @ 2008-11-10 13:24 UTC (permalink / raw)
To: Avi Kivity
Cc: Anthony Liguori, Hollis Blanchard, kvm@vger.kernel.org,
kvm-ia64@vger.kernel.org, Glauber de Oliveira Costa
Avi Kivity wrote:
> Jes Sorensen wrote:
>> Move contents of struct vcpu_info directly into CPU_COMMON. Rename
>> struct qemu_kvm_work_item to qemu_work_item as it really isn't KVM
>> specific.
>
> Well, it is actually kvm specific, since qemu doesn't have vccpu threads.
Hi Avi,
Anthony suggested I moved things directly in there, after my previous
patch kept it as a struct.
>> -int kvm_run(kvm_context_t kvm, int vcpu)
>> +int kvm_run(kvm_context_t kvm, int vcpu, void *env)
>>
>
> That's a little ugly -- passing two arguments which describe the vcpu.
> How about passing env to kvm_create_vcpu() instead?
I am all in favor of doing this, but in order to do so, we have to
teach libkvm about CPUState - are you ok with that?
>
> struct kvm_stuff { ... } and put that in as a member, so it's easier to
> remember this is a kvm addition.
If we can agree, I really don't care too much. There's already plenty
of stuff in CPU_COMMON that isn't used by specific users, so I didn't
think this was an issue.
>>
>> #if defined(TARGET_I386) || defined(TARGET_X86_64)
>> +#ifdef USE_KVM
>> +static CPUState *qemu_kvm_cpu_env(int index)
>> +{
>> + CPUState *penv;
>> +
>> + penv = first_cpu;
>> +
>> + while (penv) {
>> + if (penv->cpu_index == index)
>> + return penv;
>> + penv = (CPUState *)penv->next_cpu;
>> + }
>> +
>> + return NULL;
>> +}
>> +#endif
>>
>
> This can't be the best way of determining the env pointer from a cpu
> number.
It's not pretty, but this is only used in the ACPU hotplug code now, so
it's less performance critical.
>> +/* work queue */
>> +struct qemu_work_item {
>> + struct qemu_kvm_work_item *next;
>>
>
> Missed rename here.
>
>> + void (*func)(void *data);
>> + void *data;
>> + int done;
>> +};
>> +
>>
>
> Please keep this in kvm specific files.
Why? There really isn't anything specific about a work queue list
like this. Making it an available functionality for QEMU shouldn't
hurt.
>> -#define bool _Bool
>>
>
> why?
It was unused and didn't add any value.
> Please separate into a patch that does the kvm_run int vcpu -> void
> *vcpu conversion, and then the vcpu_info -> env conversion.
As mentioned above, if I am going to go that way, we need to teach
libkvm about 'env'. I am all in favor, but I am not going to do that
unless I can get an ack that it's acceptable to do so.
Cheers,
Jes
^ permalink raw reply [flat|nested] 26+ messages in thread* Re: [patch] remove vcpu_info array v5
2008-11-10 13:24 ` Jes Sorensen
@ 2008-11-10 13:48 ` Avi Kivity
2008-11-13 16:46 ` [patch] pass opague CPUState through libkvm instead of int vcpu Jes Sorensen
0 siblings, 1 reply; 26+ messages in thread
From: Avi Kivity @ 2008-11-10 13:48 UTC (permalink / raw)
To: Jes Sorensen
Cc: Anthony Liguori, Hollis Blanchard, kvm@vger.kernel.org,
kvm-ia64@vger.kernel.org, Glauber de Oliveira Costa
Jes Sorensen wrote:
> Avi Kivity wrote:
>> Jes Sorensen wrote:
>>> Move contents of struct vcpu_info directly into CPU_COMMON. Rename
>>> struct qemu_kvm_work_item to qemu_work_item as it really isn't KVM
>>> specific.
>>
>> Well, it is actually kvm specific, since qemu doesn't have vccpu
>> threads.
>
> Hi Avi,
>
> Anthony suggested I moved things directly in there, after my previous
> patch kept it as a struct.
>
The movement is fine, just keep it named kvm so we know it is kvm specific.
>>> -int kvm_run(kvm_context_t kvm, int vcpu)
>>> +int kvm_run(kvm_context_t kvm, int vcpu, void *env)
>>>
>>
>> That's a little ugly -- passing two arguments which describe the
>> vcpu. How about passing env to kvm_create_vcpu() instead?
>
> I am all in favor of doing this, but in order to do so, we have to
> teach libkvm about CPUState - are you ok with that?
No, libkvm is independent of qemu.
We can make kvm_create_vcpu(kvm_context_t kvm, int vcpu, void *env), and
pass 'env' as the opaque to all the callbacks, instead of the vcpu number.
>
>>
>> struct kvm_stuff { ... } and put that in as a member, so it's easier
>> to remember this is a kvm addition.
>
> If we can agree, I really don't care too much. There's already plenty
> of stuff in CPU_COMMON that isn't used by specific users, so I didn't
> think this was an issue.
>
It's more of a maintenance issue so that I can keep track of what is in
qemu upstream and what is local. Merges are already painful enough.
>>>
>>
>> Please keep this in kvm specific files.
>
> Why? There really isn't anything specific about a work queue list
> like this. Making it an available functionality for QEMU shouldn't
> hurt.
>
It's only relevant if you have per-vcpu threads, which qemu doesn't have.
>>> -#define bool _Bool
>>>
>>
>> why?
>
> It was unused and didn't add any value.
>
So separate patch.
>> Please separate into a patch that does the kvm_run int vcpu -> void
>> *vcpu conversion, and then the vcpu_info -> env conversion.
>
> As mentioned above, if I am going to go that way, we need to teach
> libkvm about 'env'. I am all in favor, but I am not going to do that
> unless I can get an ack that it's acceptable to do so.
How's my suggestion above?
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread* [patch] pass opague CPUState through libkvm instead of int vcpu
2008-11-10 13:48 ` Avi Kivity
@ 2008-11-13 16:46 ` Jes Sorensen
2008-11-18 12:55 ` Avi Kivity
0 siblings, 1 reply; 26+ messages in thread
From: Jes Sorensen @ 2008-11-13 16:46 UTC (permalink / raw)
To: Avi Kivity
Cc: Anthony Liguori, Hollis Blanchard, kvm@vger.kernel.org,
kvm-ia64@vger.kernel.org, Glauber de Oliveira Costa
[-- Attachment #1: Type: text/plain, Size: 798 bytes --]
Avi Kivity wrote:
> Jes Sorensen wrote:
>>> That's a little ugly -- passing two arguments which describe the
>>> vcpu. How about passing env to kvm_create_vcpu() instead?
>>
>> I am all in favor of doing this, but in order to do so, we have to
>> teach libkvm about CPUState - are you ok with that?
>
> No, libkvm is independent of qemu.
>
> We can make kvm_create_vcpu(kvm_context_t kvm, int vcpu, void *env), and
> pass 'env' as the opaque to all the callbacks, instead of the vcpu number.
Hi,
I pulled out the part of the patch that does the opague 'env' pointer.
It should be pretty straight forward - the vcpu_info patch will go on
top of this, but I'll work on that tomorrow.
Let me know what you think - this is about as small I can make this
patch :-)
Tested on ia64.
Cheers,
Jes
[-- Attachment #2: 0009-qemu-kvm-opaque-cpustate.patch --]
[-- Type: text/plain, Size: 9724 bytes --]
Change code to pass around opague pointer to CPUState through libkvm,
avoiding conversion from CPUState to int vcpu and back in the callbacks
into qemu.
Signed-off-by: Jes Sorensen <jes@sgi.com>
---
libkvm/kvm-common.h | 8 ++++----
libkvm/libkvm.c | 28 ++++++++++++++--------------
libkvm/libkvm.h | 10 +++++-----
qemu/qemu-kvm-ia64.c | 4 ++--
qemu/qemu-kvm-powerpc.c | 5 ++---
qemu/qemu-kvm-x86.c | 11 +++++------
qemu/qemu-kvm.c | 21 ++++++++++++---------
qemu/qemu-kvm.h | 4 ++--
8 files changed, 46 insertions(+), 45 deletions(-)
Index: kvm-userspace.git/libkvm/kvm-common.h
===================================================================
--- kvm-userspace.git.orig/libkvm/kvm-common.h
+++ kvm-userspace.git/libkvm/kvm-common.h
@@ -84,11 +84,11 @@
void kvm_show_code(kvm_context_t kvm, int vcpu);
int handle_halt(kvm_context_t kvm, int vcpu);
-int handle_shutdown(kvm_context_t kvm, int vcpu);
-void post_kvm_run(kvm_context_t kvm, int vcpu);
-int pre_kvm_run(kvm_context_t kvm, int vcpu);
+int handle_shutdown(kvm_context_t kvm, void *env);
+void post_kvm_run(kvm_context_t kvm, void *env);
+int pre_kvm_run(kvm_context_t kvm, void *env);
int handle_io_window(kvm_context_t kvm);
-int handle_debug(kvm_context_t kvm, int vcpu);
+int handle_debug(kvm_context_t kvm, void *env);
int try_push_interrupts(kvm_context_t kvm);
#endif
Index: kvm-userspace.git/libkvm/libkvm.c
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.c
+++ kvm-userspace.git/libkvm/libkvm.c
@@ -738,9 +738,9 @@
return 0;
}
-int handle_debug(kvm_context_t kvm, int vcpu)
+int handle_debug(kvm_context_t kvm, void *env)
{
- return kvm->callbacks->debug(kvm->opaque, vcpu);
+ return kvm->callbacks->debug(kvm->opaque, env);
}
int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
@@ -822,9 +822,9 @@
return kvm->callbacks->halt(kvm->opaque, vcpu);
}
-int handle_shutdown(kvm_context_t kvm, int vcpu)
+int handle_shutdown(kvm_context_t kvm, void *env)
{
- return kvm->callbacks->shutdown(kvm->opaque, vcpu);
+ return kvm->callbacks->shutdown(kvm->opaque, env);
}
int try_push_interrupts(kvm_context_t kvm)
@@ -837,14 +837,14 @@
return kvm->callbacks->try_push_nmi(kvm->opaque);
}
-void post_kvm_run(kvm_context_t kvm, int vcpu)
+void post_kvm_run(kvm_context_t kvm, void *env)
{
- kvm->callbacks->post_kvm_run(kvm->opaque, vcpu);
+ kvm->callbacks->post_kvm_run(kvm->opaque, env);
}
-int pre_kvm_run(kvm_context_t kvm, int vcpu)
+int pre_kvm_run(kvm_context_t kvm, void *env)
{
- return kvm->callbacks->pre_kvm_run(kvm->opaque, vcpu);
+ return kvm->callbacks->pre_kvm_run(kvm->opaque, env);
}
int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
@@ -872,7 +872,7 @@
#endif
}
-int kvm_run(kvm_context_t kvm, int vcpu)
+int kvm_run(kvm_context_t kvm, int vcpu, void *env)
{
int r;
int fd = kvm->vcpu_fd[vcpu];
@@ -886,19 +886,19 @@
if (!kvm->irqchip_in_kernel)
run->request_interrupt_window = try_push_interrupts(kvm);
#endif
- r = pre_kvm_run(kvm, vcpu);
+ r = pre_kvm_run(kvm, env);
if (r)
return r;
r = ioctl(fd, KVM_RUN, 0);
if (r == -1 && errno != EINTR && errno != EAGAIN) {
r = -errno;
- post_kvm_run(kvm, vcpu);
+ post_kvm_run(kvm, env);
fprintf(stderr, "kvm_run: %s\n", strerror(-r));
return r;
}
- post_kvm_run(kvm, vcpu);
+ post_kvm_run(kvm, env);
#if defined(KVM_CAP_COALESCED_MMIO)
if (kvm->coalesced_mmio) {
@@ -948,7 +948,7 @@
r = handle_io(kvm, run, vcpu);
break;
case KVM_EXIT_DEBUG:
- r = handle_debug(kvm, vcpu);
+ r = handle_debug(kvm, env);
break;
case KVM_EXIT_MMIO:
r = handle_mmio(kvm, run);
@@ -962,7 +962,7 @@
#endif
break;
case KVM_EXIT_SHUTDOWN:
- r = handle_shutdown(kvm, vcpu);
+ r = handle_shutdown(kvm, env);
break;
#if defined(__s390__)
case KVM_EXIT_S390_SIEIC:
Index: kvm-userspace.git/libkvm/libkvm.h
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.h
+++ kvm-userspace.git/libkvm/libkvm.h
@@ -55,7 +55,7 @@
/// generic memory writes to unmapped memory (For MMIO devices)
int (*mmio_write)(void *opaque, uint64_t addr, uint8_t *data,
int len);
- int (*debug)(void *opaque, int vcpu);
+ int (*debug)(void *opaque, void *env);
/*!
* \brief Called when the VCPU issues an 'hlt' instruction.
*
@@ -63,12 +63,12 @@
* on the host CPU.
*/
int (*halt)(void *opaque, int vcpu);
- int (*shutdown)(void *opaque, int vcpu);
+ int (*shutdown)(void *opaque, void *env);
int (*io_window)(void *opaque);
int (*try_push_interrupts)(void *opaque);
int (*try_push_nmi)(void *opaque);
- void (*post_kvm_run)(void *opaque, int vcpu);
- int (*pre_kvm_run)(void *opaque, int vcpu);
+ void (*post_kvm_run)(void *opaque, void *env);
+ int (*pre_kvm_run)(void *opaque, void *env);
int (*tpr_access)(void *opaque, int vcpu, uint64_t rip, int is_write);
#if defined(__powerpc__)
int (*powerpc_dcr_read)(int vcpu, uint32_t dcrn, uint32_t *data);
@@ -181,7 +181,7 @@
* return except for when an error has occured, or when you have sent it
* an EINTR signal.
*/
-int kvm_run(kvm_context_t kvm, int vcpu);
+int kvm_run(kvm_context_t kvm, int vcpu, void *env);
/*!
* \brief Get interrupt flag from on last exit to userspace
Index: kvm-userspace.git/qemu/qemu-kvm-ia64.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-ia64.c
+++ kvm-userspace.git/qemu/qemu-kvm-ia64.c
@@ -39,11 +39,11 @@
return 1;
}
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
{
}
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
{
}
Index: kvm-userspace.git/qemu/qemu-kvm-powerpc.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-powerpc.c
+++ kvm-userspace.git/qemu/qemu-kvm-powerpc.c
@@ -142,14 +142,13 @@
return 1;
}
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
{
return;
}
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
{
- CPUState *env = qemu_kvm_cpu_env(vcpu);
cpu_single_env = env;
}
Index: kvm-userspace.git/qemu/qemu-kvm-x86.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-x86.c
+++ kvm-userspace.git/qemu/qemu-kvm-x86.c
@@ -619,17 +619,16 @@
return 1;
}
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
{
- CPUState *env = cpu_single_env;
-
if (!kvm_irqchip_in_kernel(kvm_context))
- kvm_set_cr8(kvm_context, vcpu, cpu_get_apic_tpr(env));
+ kvm_set_cr8(kvm_context, env->cpu_index, cpu_get_apic_tpr(env));
}
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
{
- CPUState *env = qemu_kvm_cpu_env(vcpu);
+ int vcpu = env->cpu_index;
+
cpu_single_env = env;
env->eflags = kvm_get_interrupt_flag(kvm_context, vcpu)
Index: kvm-userspace.git/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.c
+++ kvm-userspace.git/qemu/qemu-kvm.c
@@ -181,18 +181,19 @@
return kvm_arch_try_push_nmi(opaque);
}
-static void post_kvm_run(void *opaque, int vcpu)
+static void post_kvm_run(void *opaque, void *data)
{
+ CPUState *env = (CPUState *)data;
pthread_mutex_lock(&qemu_mutex);
- kvm_arch_post_kvm_run(opaque, vcpu);
+ kvm_arch_post_kvm_run(opaque, env);
}
-static int pre_kvm_run(void *opaque, int vcpu)
+static int pre_kvm_run(void *opaque, void *data)
{
- CPUState *env = qemu_kvm_cpu_env(vcpu);
+ CPUState *env = (CPUState *)data;
- kvm_arch_pre_kvm_run(opaque, vcpu);
+ kvm_arch_pre_kvm_run(opaque, env);
if (env->interrupt_request & CPU_INTERRUPT_EXIT)
return 1;
@@ -230,7 +231,7 @@
{
int r;
- r = kvm_run(kvm_context, env->cpu_index);
+ r = kvm_run(kvm_context, env->cpu_index, env);
if (r < 0) {
printf("kvm_run returned %d\n", r);
exit(1);
@@ -635,10 +636,12 @@
return 0;
}
-static int kvm_debug(void *opaque, int vcpu)
+static int kvm_debug(void *opaque, void *data)
{
+ struct CPUState *env = (struct CPUState *)data;
+
kvm_debug_stop_requested = 1;
- vcpu_info[vcpu].stopped = 1;
+ vcpu_info[env->cpu_index].stopped = 1;
return 1;
}
@@ -732,7 +735,7 @@
return kvm_arch_halt(opaque, vcpu);
}
-static int kvm_shutdown(void *opaque, int vcpu)
+static int kvm_shutdown(void *opaque, void *data)
{
/* stop the current vcpu from going back to guest mode */
vcpu_info[cpu_single_env->cpu_index].stopped = 1;
Index: kvm-userspace.git/qemu/qemu-kvm.h
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.h
+++ kvm-userspace.git/qemu/qemu-kvm.h
@@ -62,8 +62,8 @@
void kvm_arch_load_regs(CPUState *env);
int kvm_arch_qemu_init_env(CPUState *cenv);
int kvm_arch_halt(void *opaque, int vcpu);
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu);
-void kvm_arch_post_kvm_run(void *opaque, int vcpu);
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env);
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env);
int kvm_arch_has_work(CPUState *env);
int kvm_arch_try_push_interrupts(void *opaque);
int kvm_arch_try_push_nmi(void *opaque);
^ permalink raw reply [flat|nested] 26+ messages in thread* Re: [patch] pass opague CPUState through libkvm instead of int vcpu
2008-11-13 16:46 ` [patch] pass opague CPUState through libkvm instead of int vcpu Jes Sorensen
@ 2008-11-18 12:55 ` Avi Kivity
2008-11-18 12:59 ` Jes Sorensen
0 siblings, 1 reply; 26+ messages in thread
From: Avi Kivity @ 2008-11-18 12:55 UTC (permalink / raw)
To: Jes Sorensen
Cc: Anthony Liguori, Hollis Blanchard, kvm@vger.kernel.org,
kvm-ia64@vger.kernel.org, Glauber de Oliveira Costa
Jes Sorensen wrote:
> I pulled out the part of the patch that does the opague 'env' pointer.
> It should be pretty straight forward - the vcpu_info patch will go on
> top of this, but I'll work on that tomorrow.
>
> Let me know what you think - this is about as small I can make this
> patch :-)
It's good; applied, thanks.
There's a slight redundancy in that some callbacks are passed two
opaques (per-cpu and per-vm), but it won't kill anyone.
(btw, this could have been done much more simply: declare a thread local
variable to hold the current env)
--
error compiling committee.c: too many arguments to function
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [patch] pass opague CPUState through libkvm instead of int vcpu
2008-11-18 12:55 ` Avi Kivity
@ 2008-11-18 12:59 ` Jes Sorensen
0 siblings, 0 replies; 26+ messages in thread
From: Jes Sorensen @ 2008-11-18 12:59 UTC (permalink / raw)
To: Avi Kivity
Cc: Anthony Liguori, Hollis Blanchard, kvm@vger.kernel.org,
kvm-ia64@vger.kernel.org, Glauber de Oliveira Costa
Avi Kivity wrote:
> Jes Sorensen wrote:
> It's good; applied, thanks.
>
> There's a slight redundancy in that some callbacks are passed two
> opaques (per-cpu and per-vm), but it won't kill anyone.
>
> (btw, this could have been done much more simply: declare a thread local
> variable to hold the current env)
The thread local current env is coming in the vcpu_info merge patch :-)
Cheers,
Jes
^ permalink raw reply [flat|nested] 26+ messages in thread
end of thread, other threads:[~2008-11-18 12:59 UTC | newest]
Thread overview: 26+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-10-30 14:09 [patch] remove vcpu_info array v5 Jes Sorensen
2008-11-04 11:54 ` Avi Kivity
2008-11-04 13:55 ` Glauber Costa
2008-11-04 14:30 ` Avi Kivity
2008-11-04 14:35 ` Glauber Costa
2008-11-04 14:40 ` Avi Kivity
2008-11-04 14:43 ` Glauber Costa
2008-11-04 14:47 ` Avi Kivity
2008-11-10 13:30 ` Jes Sorensen
2008-11-10 13:40 ` Avi Kivity
2008-11-10 15:22 ` Anthony Liguori
2008-11-10 15:45 ` Avi Kivity
2008-11-10 15:47 ` Jes Sorensen
2008-11-10 15:54 ` Avi Kivity
2008-11-10 15:58 ` Jes Sorensen
2008-11-11 9:26 ` Avi Kivity
2008-11-12 13:02 ` Jes Sorensen
2008-11-12 13:09 ` Glauber Costa
2008-11-12 13:14 ` Avi Kivity
2008-11-12 13:12 ` Avi Kivity
2008-11-10 12:30 ` Jes Sorensen
2008-11-10 13:24 ` Jes Sorensen
2008-11-10 13:48 ` Avi Kivity
2008-11-13 16:46 ` [patch] pass opague CPUState through libkvm instead of int vcpu Jes Sorensen
2008-11-18 12:55 ` Avi Kivity
2008-11-18 12:59 ` Jes Sorensen
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox