kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [patch] fold struct vcpu_info into CPUState
@ 2008-09-29 15:24 Jes Sorensen
  2008-10-05 10:02 ` Avi Kivity
  2008-10-13 22:24 ` Glauber Costa
  0 siblings, 2 replies; 15+ messages in thread
From: Jes Sorensen @ 2008-09-29 15:24 UTC (permalink / raw)
  To: kvm, kvm-ia64

[-- Attachment #1: Type: text/plain, Size: 367 bytes --]

Hi,

Trying to get rid of the static declaration of the vcpu_info array, this
patch folds it into CPUState. I have only been able to test it on ia64,
but it seems to behave as expect, however ia64 SMP support is still a
bit flakey, so I would appreciate it if someone would test this on x86
and PPC. It builds on x86_64 at least.

Comments appreciated.

Thanks,
Jes


[-- Attachment #2: 0010-qemu-kvm-vcpu_info.patch --]
[-- Type: text/plain, Size: 11149 bytes --]

Merge vcpu_info into CPUState.

Moves definition of vcpu related structs to new header qemu-kvm-vcpu.h
and declares this struct in i386/ia64/ppc CPUState structs if USE_KVM
is defined. In addition conver qemu-kvm.c to pull vcpu_info out of
CPUState.

This eliminates ugly static sized array of struct vcpu_info.

Signed-off-by: Jes Sorensen <jes@sgi.com>

---
 qemu/qemu-kvm-vcpu.h   |   34 +++++++++++
 qemu/qemu-kvm.c        |  141 ++++++++++++++++++++++++++-----------------------
 qemu/target-i386/cpu.h |    4 +
 qemu/target-ia64/cpu.h |    5 +
 qemu/target-ppc/cpu.h  |    5 +
 5 files changed, 125 insertions(+), 64 deletions(-)

Index: kvm-userspace.git/qemu/qemu-kvm-vcpu.h
===================================================================
--- /dev/null
+++ kvm-userspace.git/qemu/qemu-kvm-vcpu.h
@@ -0,0 +1,34 @@
+/*
+ * qemu/kvm vcpu definitions
+ *
+ * Copyright (C) 2006-2008 Qumranet Technologies
+ *
+ * Licensed under the terms of the GNU GPL version 2 or higher.
+ */
+#ifndef QEMU_KVM_VCPU_H
+#define QEMU_KVM_VCPU_H
+
+#include <pthread.h>
+
+struct qemu_kvm_work_item {
+    struct qemu_kvm_work_item *next;
+    void (*func)(void *data);
+    void *data;
+    int done;
+};
+
+/*
+ * KVM vcpu struct
+ */
+struct vcpu_info {
+    int sipi_needed;
+    int init;
+    pthread_t thread;
+    int signalled;
+    int stop;
+    int stopped;
+    int created;
+    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
+};
+
+#endif
Index: kvm-userspace.git/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.c
+++ kvm-userspace.git/qemu/qemu-kvm.c
@@ -22,13 +22,13 @@
 #include "compatfd.h"
 
 #include "qemu-kvm.h"
+#include "qemu-kvm-vcpu.h"
 #include <libkvm.h>
 #include <pthread.h>
 #include <sys/utsname.h>
 #include <sys/syscall.h>
 #include <sys/mman.h>
 
-#define bool _Bool
 #define false 0
 #define true 1
 
@@ -43,31 +43,12 @@
 pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
-__thread struct vcpu_info *vcpu;
+__thread struct CPUState *current_env;
 
 static int qemu_system_ready;
 
 #define SIG_IPI (SIGRTMIN+4)
 
-struct qemu_kvm_work_item {
-    struct qemu_kvm_work_item *next;
-    void (*func)(void *data);
-    void *data;
-    bool done;
-};
-
-struct vcpu_info {
-    CPUState *env;
-    int sipi_needed;
-    int init;
-    pthread_t thread;
-    int signalled;
-    int stop;
-    int stopped;
-    int created;
-    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
-} vcpu_info[256];
-
 pthread_t io_thread;
 static int io_thread_fd = -1;
 static int io_thread_sigfd = -1;
@@ -93,7 +74,20 @@
 
 CPUState *qemu_kvm_cpu_env(int index)
 {
-    return vcpu_info[index].env;
+    CPUState *penv;
+
+    if (current_env->cpu_index == index)
+        return current_env;
+
+    penv = first_cpu;
+
+    while (penv) {
+        if (penv->cpu_index == index)
+            return penv;
+        penv = (CPUState *)penv->next_cpu;
+    }
+
+    return NULL;
 }
 
 static void sig_ipi_handler(int n)
@@ -102,10 +96,10 @@
 
 static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+    struct vcpu_info *vi = &env->vcpu_info;
     struct qemu_kvm_work_item wi;
 
-    if (vi == vcpu) {
+    if (env == current_env) {
         func(data);
         return;
     }
@@ -130,29 +124,33 @@
     int signal = 0;
 
     if (env) {
-        if (!vcpu)
+        if (!current_env->vcpu_info.created)
             signal = 1;
-        if (vcpu && env != vcpu->env && !vcpu_info[env->cpu_index].signalled)
+        /*
+         * Testing for vcpu_info.created here is really redundant
+         */
+        if (current_env->vcpu_info.created &&
+            env != current_env && env->vcpu_info.signalled)
             signal = 1;
 
         if (signal) {
-            vcpu_info[env->cpu_index].signalled = 1;
-                if (vcpu_info[env->cpu_index].thread)
-                    pthread_kill(vcpu_info[env->cpu_index].thread, SIG_IPI);
+            env->vcpu_info.signalled = 1;
+            if (env->vcpu_info.thread)
+                pthread_kill(env->vcpu_info.thread, SIG_IPI);
         }
     }
 }
 
 void kvm_update_after_sipi(CPUState *env)
 {
-    vcpu_info[env->cpu_index].sipi_needed = 1;
+    env->vcpu_info.sipi_needed = 1;
     kvm_update_interrupt_request(env);
 }
 
 void kvm_apic_init(CPUState *env)
 {
     if (env->cpu_index != 0)
-	vcpu_info[env->cpu_index].init = 1;
+	env->vcpu_info.init = 1;
     kvm_update_interrupt_request(env);
 }
 
@@ -225,7 +223,7 @@
 
 static int has_work(CPUState *env)
 {
-    if (!vm_running || (env && vcpu_info[env->cpu_index].stopped))
+    if (!vm_running || (env && env->vcpu_info.stopped))
 	return 0;
     if (!env->halted)
 	return 1;
@@ -234,7 +232,7 @@
 
 static void flush_queued_work(CPUState *env)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+    struct vcpu_info *vi = &env->vcpu_info;
     struct qemu_kvm_work_item *wi;
 
     if (!vi->queued_work_first)
@@ -251,6 +249,7 @@
 
 static void kvm_main_loop_wait(CPUState *env, int timeout)
 {
+    struct vcpu_info *vi = &env->vcpu_info;
     struct timespec ts;
     int r, e;
     siginfo_t siginfo;
@@ -276,49 +275,55 @@
     cpu_single_env = env;
     flush_queued_work(env);
 
-    if (vcpu_info[env->cpu_index].stop) {
-	vcpu_info[env->cpu_index].stop = 0;
-	vcpu_info[env->cpu_index].stopped = 1;
+    if (vi->stop) {
+	vi->stop = 0;
+	vi->stopped = 1;
 	pthread_cond_signal(&qemu_pause_cond);
     }
 
-    vcpu_info[env->cpu_index].signalled = 0;
+    vi->signalled = 0;
 }
 
 static int all_threads_paused(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
+
+    while (penv) {
+        if (penv->vcpu_info.stop)
+            return 0;
+        penv = (CPUState *)penv->next_cpu;
+    }
 
-    for (i = 0; i < smp_cpus; ++i)
-	if (vcpu_info[i].stop)
-	    return 0;
     return 1;
 }
 
 static void pause_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 1;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->vcpu_info.stop = 1;
+        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
+
     while (!all_threads_paused())
 	qemu_cond_wait(&qemu_pause_cond);
 }
 
 static void resume_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 0;
-	vcpu_info[i].stopped = 0;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->vcpu_info.stop = 0;
+        penv->vcpu_info.stopped = 0;
+        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
 }
 
@@ -333,8 +338,8 @@
 static void update_regs_for_sipi(CPUState *env)
 {
     kvm_arch_update_regs_for_sipi(env);
-    vcpu_info[env->cpu_index].sipi_needed = 0;
-    vcpu_info[env->cpu_index].init = 0;
+    env->vcpu_info.sipi_needed = 0;
+    env->vcpu_info.init = 0;
 }
 
 static void update_regs_for_init(CPUState *env)
@@ -361,21 +366,23 @@
 
 void qemu_kvm_system_reset(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     pause_all_threads();
 
     qemu_system_reset();
 
-    for (i = 0; i < smp_cpus; ++i)
-	kvm_arch_cpu_reset(vcpu_info[i].env);
+    while (penv) {
+        kvm_arch_cpu_reset(penv);
+        penv = (CPUState *)penv->next_cpu;
+    }
 
     resume_all_threads();
 }
 
 static int kvm_main_loop_cpu(CPUState *env)
 {
-    struct vcpu_info *info = &vcpu_info[env->cpu_index];
+    struct vcpu_info *info = &env->vcpu_info;
 
     setup_kernel_sigmask(env);
 
@@ -414,9 +421,8 @@
     CPUState *env = _env;
     sigset_t signals;
 
-    vcpu = &vcpu_info[env->cpu_index];
-    vcpu->env = env;
-    vcpu->env->thread_id = kvm_get_thread_id();
+    current_env = env;
+    env->thread_id = kvm_get_thread_id();
     sigfillset(&signals);
     sigprocmask(SIG_BLOCK, &signals, NULL);
     kvm_create_vcpu(kvm_context, env->cpu_index);
@@ -424,7 +430,7 @@
 
     /* signal VCPU creation */
     pthread_mutex_lock(&qemu_mutex);
-    vcpu->created = 1;
+    current_env->vcpu_info.created = 1;
     pthread_cond_signal(&qemu_vcpu_cond);
 
     /* and wait for machine initialization */
@@ -438,9 +444,9 @@
 
 void kvm_init_new_ap(int cpu, CPUState *env)
 {
-    pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
+    pthread_create(&env->vcpu_info.thread, NULL, ap_main_loop, env);
 
-    while (vcpu_info[cpu].created == 0)
+    while (env->vcpu_info.created == 0)
 	qemu_cond_wait(&qemu_vcpu_cond);
 }
 
@@ -598,8 +604,12 @@
 
 static int kvm_debug(void *opaque, int vcpu)
 {
+    CPUState *env;
+
+    env = qemu_kvm_cpu_env(vcpu);
+
     kvm_debug_stop_requested = 1;
-    vcpu_info[vcpu].stopped = 1;
+    env->vcpu_info.stopped = 1;
     return 1;
 }
 
@@ -695,8 +705,11 @@
 
 static int kvm_shutdown(void *opaque, int vcpu)
 {
+    CPUState *env;
+
+    env = qemu_kvm_cpu_env(cpu_single_env->cpu_index);
     /* stop the current vcpu from going back to guest mode */
-    vcpu_info[cpu_single_env->cpu_index].stopped = 1;
+    env->vcpu_info.stopped = 1;
 
     qemu_system_reset_request();
     return 1;
Index: kvm-userspace.git/qemu/target-i386/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-i386/cpu.h
+++ kvm-userspace.git/qemu/target-i386/cpu.h
@@ -45,6 +45,7 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
 
 #define R_EAX 0
 #define R_ECX 1
@@ -601,6 +602,9 @@
 #define NR_IRQ_WORDS (256/ BITS_PER_LONG)
     uint32_t kvm_interrupt_bitmap[NR_IRQ_WORDS];
 
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
     /* in order to simplify APIC support, we leave this pointer to the
        user */
     struct APICState *apic_state;
Index: kvm-userspace.git/qemu/target-ia64/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ia64/cpu.h
+++ kvm-userspace.git/qemu/target-ia64/cpu.h
@@ -41,10 +41,15 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
+
 typedef struct CPUIA64State {
     CPU_COMMON;
     uint32_t hflags;
     int mp_state;
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
 } CPUIA64State;
 
 #define CPUState CPUIA64State
Index: kvm-userspace.git/qemu/target-ppc/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ppc/cpu.h
+++ kvm-userspace.git/qemu/target-ppc/cpu.h
@@ -22,6 +22,7 @@
 
 #include "config.h"
 #include <inttypes.h>
+#include "qemu-kvm-vcpu.h"
 
 //#define PPC_EMULATE_32BITS_HYPV
 
@@ -578,6 +579,10 @@
 
     CPU_COMMON
 
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
+
     int access_type; /* when a memory exception occurs, the access
                         type is stored here */
 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] fold struct vcpu_info into CPUState
  2008-09-29 15:24 [patch] fold struct vcpu_info into CPUState Jes Sorensen
@ 2008-10-05 10:02 ` Avi Kivity
  2008-10-05 20:48   ` Glauber Costa
  2008-10-13 22:24 ` Glauber Costa
  1 sibling, 1 reply; 15+ messages in thread
From: Avi Kivity @ 2008-10-05 10:02 UTC (permalink / raw)
  To: Jes Sorensen; +Cc: kvm, kvm-ia64, Glauber Costa

Jes Sorensen wrote:
> Hi,
>
> Trying to get rid of the static declaration of the vcpu_info array, this
> patch folds it into CPUState. I have only been able to test it on ia64,
> but it seems to behave as expect, however ia64 SMP support is still a
> bit flakey, so I would appreciate it if someone would test this on x86
> and PPC. It builds on x86_64 at least.
>
> Comments appreciated.

Looks reasonable to me.  Glommer, please review this as well.  Does this 
help or interfere with your efforts?

-- 
error compiling committee.c: too many arguments to function


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] fold struct vcpu_info into CPUState
  2008-10-05 10:02 ` Avi Kivity
@ 2008-10-05 20:48   ` Glauber Costa
  0 siblings, 0 replies; 15+ messages in thread
From: Glauber Costa @ 2008-10-05 20:48 UTC (permalink / raw)
  To: Avi Kivity; +Cc: Jes Sorensen, kvm, kvm-ia64, Glauber Costa

On Sun, Oct 5, 2008 at 12:02 PM, Avi Kivity <avi@redhat.com> wrote:
> Jes Sorensen wrote:
>>
>> Hi,
>>
>> Trying to get rid of the static declaration of the vcpu_info array, this
>> patch folds it into CPUState. I have only been able to test it on ia64,
>> but it seems to behave as expect, however ia64 SMP support is still a
>> bit flakey, so I would appreciate it if someone would test this on x86
>> and PPC. It builds on x86_64 at least.
>>
>> Comments appreciated.
>
> Looks reasonable to me.  Glommer, please review this as well.  Does this
> help or interfere with your efforts?
I am travelling this week, and will only be able to provide an
extensible review in a few days. However, me and Jes have already
discussed, and it seem to get along fine with what I am doing.

It makes sense for the accelerator to be able to register an opaque of
its own in cpu state, so I will be working on a patch for exactly that
shortly

>
> --
> error compiling committee.c: too many arguments to function
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>



-- 
Glauber  Costa.
"Free as in Freedom"
http://glommer.net

"The less confident you are, the more serious you have to act."

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] fold struct vcpu_info into CPUState
  2008-09-29 15:24 [patch] fold struct vcpu_info into CPUState Jes Sorensen
  2008-10-05 10:02 ` Avi Kivity
@ 2008-10-13 22:24 ` Glauber Costa
  2008-10-17 15:28   ` Jes Sorensen
  1 sibling, 1 reply; 15+ messages in thread
From: Glauber Costa @ 2008-10-13 22:24 UTC (permalink / raw)
  To: Jes Sorensen; +Cc: kvm, kvm-ia64

> Index: kvm-userspace.git/qemu/qemu-kvm-vcpu.h
> ===================================================================
> --- /dev/null
> +++ kvm-userspace.git/qemu/qemu-kvm-vcpu.h
can't it be just qemu-kvm.h ?

> Index: kvm-userspace.git/qemu/qemu-kvm.c
> ===================================================================
> --- kvm-userspace.git.orig/qemu/qemu-kvm.c
> +++ kvm-userspace.git/qemu/qemu-kvm.c
> @@ -22,13 +22,13 @@
>  #include "compatfd.h"
>
> +__thread struct CPUState *current_env;
>
>  static int qemu_system_ready;
>
>  #define SIG_IPI (SIGRTMIN+4)
>

>  pthread_t io_thread;
>  static int io_thread_fd = -1;
>  static int io_thread_sigfd = -1;
> @@ -93,7 +74,20 @@
>
>  CPUState *qemu_kvm_cpu_env(int index)
>  {
> -    return vcpu_info[index].env;
> +    CPUState *penv;
> +
> +    if (current_env->cpu_index == index)
> +        return current_env;
> +
> +    penv = first_cpu;
> +
> +    while (penv) {
> +        if (penv->cpu_index == index)
> +            return penv;
> +        penv = (CPUState *)penv->next_cpu;
> +    }
> +
> +    return NULL;
>  }

This doesn't seem right. This function exists because we used to have
a vcpu and and env structs, that were separated but
should be tied together in some uses.
Now, there's absolutely nothing in here that is not qemu-specific.
This is just a function to return and env given a cpu number.
You'll lose the current_env optimization that probably matters a lot
in your case, but I'm afraid you will just have to live with that:
it sucks for qemu too, and when it is fixed, it will be fixed for both
(means getting rid of the ugly cpu_single_env)

>     if (env) {
> -        if (!vcpu)
> +        if (!current_env->vcpu_info.created)
>             signal = 1;

!vcpu is probably meant to catch the case in witch the vcpu tls
variable is not yet assigned. By dereferencing current_env here,
you are probably doing an invalid access. So unless you can prove this
is not an issue, should add another test.

> -        if (vcpu && env != vcpu->env &&
> !vcpu_info[env->cpu_index].signalled)
> +        /*
> +         * Testing for vcpu_info.created here is really redundant
> +         */
> +        if (current_env->vcpu_info.created &&
> +            env != current_env && env->vcpu_info.signalled)

should be !env->vcpu_info.signalled instead?

>  static void flush_queued_work(CPUState *env)
>  {
> -    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
> +    struct vcpu_info *vi = &env->vcpu_info;
>     struct qemu_kvm_work_item *wi;

"vi" is not a good name, since emacs users will likely complain.
vcpu_info is a much better name.


The rest seems pretty straightforward.

-- 
Glauber  Costa.
"Free as in Freedom"
http://glommer.net

"The less confident you are, the more serious you have to act."

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] fold struct vcpu_info into CPUState
  2008-10-13 22:24 ` Glauber Costa
@ 2008-10-17 15:28   ` Jes Sorensen
  2008-10-17 21:27     ` Glauber Costa
  0 siblings, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2008-10-17 15:28 UTC (permalink / raw)
  To: Glauber Costa; +Cc: kvm, kvm-ia64

[-- Attachment #1: Type: text/plain, Size: 2381 bytes --]

Glauber Costa wrote:
>> @@ -93,7 +74,20 @@
>>
>>  CPUState *qemu_kvm_cpu_env(int index)
>>  {
[snip]
>>  }
> 
> This doesn't seem right. This function exists because we used to have
> a vcpu and and env structs, that were separated but
> should be tied together in some uses.
> Now, there's absolutely nothing in here that is not qemu-specific.
> This is just a function to return and env given a cpu number.
> You'll lose the current_env optimization that probably matters a lot
> in your case, but I'm afraid you will just have to live with that:
> it sucks for qemu too, and when it is fixed, it will be fixed for both
> (means getting rid of the ugly cpu_single_env)

Hi Glauber,

I see this function as a temporary transition type thing, it wasn't
my intention for it to stay permanently, but it's not really called from
a lot of performance critical places for now.

>>     if (env) {
>> -        if (!vcpu)
>> +        if (!current_env->vcpu_info.created)
>>             signal = 1;
> 
> !vcpu is probably meant to catch the case in witch the vcpu tls
> variable is not yet assigned. By dereferencing current_env here,
> you are probably doing an invalid access. So unless you can prove this
> is not an issue, should add another test.

Hmmm I think the test got lost because I moved this over in multiple
steps. It was a thread variable 'cpu_info' prior, but we have no
guarantee here that current_env is valid. Thanks.

>> -        if (vcpu && env != vcpu->env &&
>> !vcpu_info[env->cpu_index].signalled)
>> +        /*
>> +         * Testing for vcpu_info.created here is really redundant
>> +         */
>> +        if (current_env->vcpu_info.created &&
>> +            env != current_env && env->vcpu_info.signalled)
> 
> should be !env->vcpu_info.signalled instead?

Good catch!

>>  static void flush_queued_work(CPUState *env)
>>  {
>> -    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
>> +    struct vcpu_info *vi = &env->vcpu_info;
>>     struct qemu_kvm_work_item *wi;
> 
> "vi" is not a good name, since emacs users will likely complain.
> vcpu_info is a much better name.

I'm an Emacs user! It's just a temporary variable, I really don't think
this matters. We could name it 'ed' just to be different, if you prefer.

Here's an updated version of that patch.

Cheers,
Jes

PS: I am gone through Wednesday, so should give you a few days to
     review :)

[-- Attachment #2: 0010-qemu-kvm-vcpu_info-v2.patch --]
[-- Type: text/plain, Size: 11375 bytes --]

Merge vcpu_info into CPUState.

Moves definition of vcpu related structs to new header qemu-kvm-vcpu.h
and declares this struct in i386/ia64/ppc CPUState structs if USE_KVM
is defined. In addition conver qemu-kvm.c to pull vcpu_info out of
CPUState.

This eliminates ugly static sized array of struct vcpu_info.

Signed-off-by: Jes Sorensen <jes@sgi.com>

---
 qemu/qemu-kvm-vcpu.h   |   34 +++++++++++
 qemu/qemu-kvm.c        |  143 ++++++++++++++++++++++++++-----------------------
 qemu/target-i386/cpu.h |    4 +
 qemu/target-ia64/cpu.h |    5 +
 qemu/target-ppc/cpu.h  |    5 +
 5 files changed, 126 insertions(+), 65 deletions(-)

Index: kvm-userspace.git/qemu/qemu-kvm-vcpu.h
===================================================================
--- /dev/null
+++ kvm-userspace.git/qemu/qemu-kvm-vcpu.h
@@ -0,0 +1,34 @@
+/*
+ * qemu/kvm vcpu definitions
+ *
+ * Copyright (C) 2006-2008 Qumranet Technologies
+ *
+ * Licensed under the terms of the GNU GPL version 2 or higher.
+ */
+#ifndef QEMU_KVM_VCPU_H
+#define QEMU_KVM_VCPU_H
+
+#include <pthread.h>
+
+struct qemu_kvm_work_item {
+    struct qemu_kvm_work_item *next;
+    void (*func)(void *data);
+    void *data;
+    int done;
+};
+
+/*
+ * KVM vcpu struct
+ */
+struct vcpu_info {
+    int sipi_needed;
+    int init;
+    pthread_t thread;
+    int signalled;
+    int stop;
+    int stopped;
+    int created;
+    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
+};
+
+#endif
Index: kvm-userspace.git/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.c
+++ kvm-userspace.git/qemu/qemu-kvm.c
@@ -22,13 +22,13 @@
 #include "compatfd.h"
 
 #include "qemu-kvm.h"
+#include "qemu-kvm-vcpu.h"
 #include <libkvm.h>
 #include <pthread.h>
 #include <sys/utsname.h>
 #include <sys/syscall.h>
 #include <sys/mman.h>
 
-#define bool _Bool
 #define false 0
 #define true 1
 
@@ -43,31 +43,12 @@
 pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
-__thread struct vcpu_info *vcpu;
+__thread struct CPUState *current_env;
 
 static int qemu_system_ready;
 
 #define SIG_IPI (SIGRTMIN+4)
 
-struct qemu_kvm_work_item {
-    struct qemu_kvm_work_item *next;
-    void (*func)(void *data);
-    void *data;
-    bool done;
-};
-
-struct vcpu_info {
-    CPUState *env;
-    int sipi_needed;
-    int init;
-    pthread_t thread;
-    int signalled;
-    int stop;
-    int stopped;
-    int created;
-    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
-} vcpu_info[256];
-
 pthread_t io_thread;
 static int io_thread_fd = -1;
 static int io_thread_sigfd = -1;
@@ -93,7 +74,20 @@
 
 CPUState *qemu_kvm_cpu_env(int index)
 {
-    return vcpu_info[index].env;
+    CPUState *penv;
+
+    if (current_env->cpu_index == index)
+        return current_env;
+
+    penv = first_cpu;
+
+    while (penv) {
+        if (penv->cpu_index == index)
+            return penv;
+        penv = (CPUState *)penv->next_cpu;
+    }
+
+    return NULL;
 }
 
 static void sig_ipi_handler(int n)
@@ -102,10 +96,10 @@
 
 static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+    struct vcpu_info *vi = &env->vcpu_info;
     struct qemu_kvm_work_item wi;
 
-    if (vi == vcpu) {
+    if (env == current_env) {
         func(data);
         return;
     }
@@ -127,7 +121,7 @@
 
 static void inject_interrupt(void *data)
 {
-    cpu_interrupt(vcpu->env, (int)data);
+    cpu_interrupt(current_env, (int)data);
 }
 
 void kvm_inject_interrupt(CPUState *env, int mask)
@@ -140,29 +134,33 @@
     int signal = 0;
 
     if (env) {
-        if (!vcpu)
+        if (current_env && !current_env->vcpu_info.created)
             signal = 1;
-        if (vcpu && env != vcpu->env && !vcpu_info[env->cpu_index].signalled)
+        /*
+         * Testing for vcpu_info.created here is really redundant
+         */
+        if (current_env->vcpu_info.created &&
+            env != current_env && !env->vcpu_info.signalled)
             signal = 1;
 
         if (signal) {
-            vcpu_info[env->cpu_index].signalled = 1;
-                if (vcpu_info[env->cpu_index].thread)
-                    pthread_kill(vcpu_info[env->cpu_index].thread, SIG_IPI);
+            env->vcpu_info.signalled = 1;
+            if (env->vcpu_info.thread)
+                pthread_kill(env->vcpu_info.thread, SIG_IPI);
         }
     }
 }
 
 void kvm_update_after_sipi(CPUState *env)
 {
-    vcpu_info[env->cpu_index].sipi_needed = 1;
+    env->vcpu_info.sipi_needed = 1;
     kvm_update_interrupt_request(env);
 }
 
 void kvm_apic_init(CPUState *env)
 {
     if (env->cpu_index != 0)
-	vcpu_info[env->cpu_index].init = 1;
+	env->vcpu_info.init = 1;
     kvm_update_interrupt_request(env);
 }
 
@@ -240,7 +238,7 @@
 
 static int has_work(CPUState *env)
 {
-    if (!vm_running || (env && vcpu_info[env->cpu_index].stopped))
+    if (!vm_running || (env && env->vcpu_info.stopped))
 	return 0;
     if (!env->halted)
 	return 1;
@@ -249,7 +247,7 @@
 
 static void flush_queued_work(CPUState *env)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+    struct vcpu_info *vi = &env->vcpu_info;
     struct qemu_kvm_work_item *wi;
 
     if (!vi->queued_work_first)
@@ -266,6 +264,7 @@
 
 static void kvm_main_loop_wait(CPUState *env, int timeout)
 {
+    struct vcpu_info *vi = &env->vcpu_info;
     struct timespec ts;
     int r, e;
     siginfo_t siginfo;
@@ -291,49 +290,55 @@
     cpu_single_env = env;
     flush_queued_work(env);
 
-    if (vcpu_info[env->cpu_index].stop) {
-	vcpu_info[env->cpu_index].stop = 0;
-	vcpu_info[env->cpu_index].stopped = 1;
+    if (vi->stop) {
+	vi->stop = 0;
+	vi->stopped = 1;
 	pthread_cond_signal(&qemu_pause_cond);
     }
 
-    vcpu_info[env->cpu_index].signalled = 0;
+    vi->signalled = 0;
 }
 
 static int all_threads_paused(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
+
+    while (penv) {
+        if (penv->vcpu_info.stop)
+            return 0;
+        penv = (CPUState *)penv->next_cpu;
+    }
 
-    for (i = 0; i < smp_cpus; ++i)
-	if (vcpu_info[i].stop)
-	    return 0;
     return 1;
 }
 
 static void pause_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 1;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->vcpu_info.stop = 1;
+        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
+
     while (!all_threads_paused())
 	qemu_cond_wait(&qemu_pause_cond);
 }
 
 static void resume_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 0;
-	vcpu_info[i].stopped = 0;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->vcpu_info.stop = 0;
+        penv->vcpu_info.stopped = 0;
+        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
 }
 
@@ -348,8 +353,8 @@
 static void update_regs_for_sipi(CPUState *env)
 {
     kvm_arch_update_regs_for_sipi(env);
-    vcpu_info[env->cpu_index].sipi_needed = 0;
-    vcpu_info[env->cpu_index].init = 0;
+    env->vcpu_info.sipi_needed = 0;
+    env->vcpu_info.init = 0;
 }
 
 static void update_regs_for_init(CPUState *env)
@@ -376,21 +381,23 @@
 
 void qemu_kvm_system_reset(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     pause_all_threads();
 
     qemu_system_reset();
 
-    for (i = 0; i < smp_cpus; ++i)
-	kvm_arch_cpu_reset(vcpu_info[i].env);
+    while (penv) {
+        kvm_arch_cpu_reset(penv);
+        penv = (CPUState *)penv->next_cpu;
+    }
 
     resume_all_threads();
 }
 
 static int kvm_main_loop_cpu(CPUState *env)
 {
-    struct vcpu_info *info = &vcpu_info[env->cpu_index];
+    struct vcpu_info *info = &env->vcpu_info;
 
     setup_kernel_sigmask(env);
 
@@ -429,9 +436,8 @@
     CPUState *env = _env;
     sigset_t signals;
 
-    vcpu = &vcpu_info[env->cpu_index];
-    vcpu->env = env;
-    vcpu->env->thread_id = kvm_get_thread_id();
+    current_env = env;
+    env->thread_id = kvm_get_thread_id();
     sigfillset(&signals);
     sigprocmask(SIG_BLOCK, &signals, NULL);
     kvm_create_vcpu(kvm_context, env->cpu_index);
@@ -439,7 +445,7 @@
 
     /* signal VCPU creation */
     pthread_mutex_lock(&qemu_mutex);
-    vcpu->created = 1;
+    current_env->vcpu_info.created = 1;
     pthread_cond_signal(&qemu_vcpu_cond);
 
     /* and wait for machine initialization */
@@ -453,9 +459,9 @@
 
 void kvm_init_new_ap(int cpu, CPUState *env)
 {
-    pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
+    pthread_create(&env->vcpu_info.thread, NULL, ap_main_loop, env);
 
-    while (vcpu_info[cpu].created == 0)
+    while (env->vcpu_info.created == 0)
 	qemu_cond_wait(&qemu_vcpu_cond);
 }
 
@@ -613,8 +619,12 @@
 
 static int kvm_debug(void *opaque, int vcpu)
 {
+    CPUState *env;
+
+    env = qemu_kvm_cpu_env(vcpu);
+
     kvm_debug_stop_requested = 1;
-    vcpu_info[vcpu].stopped = 1;
+    env->vcpu_info.stopped = 1;
     return 1;
 }
 
@@ -710,8 +720,11 @@
 
 static int kvm_shutdown(void *opaque, int vcpu)
 {
+    CPUState *env;
+
+    env = qemu_kvm_cpu_env(cpu_single_env->cpu_index);
     /* stop the current vcpu from going back to guest mode */
-    vcpu_info[cpu_single_env->cpu_index].stopped = 1;
+    env->vcpu_info.stopped = 1;
 
     qemu_system_reset_request();
     return 1;
Index: kvm-userspace.git/qemu/target-i386/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-i386/cpu.h
+++ kvm-userspace.git/qemu/target-i386/cpu.h
@@ -45,6 +45,7 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
 
 #define R_EAX 0
 #define R_ECX 1
@@ -622,6 +623,9 @@
 #define NR_IRQ_WORDS (256/ BITS_PER_LONG)
     uint32_t kvm_interrupt_bitmap[NR_IRQ_WORDS];
 
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
     /* in order to simplify APIC support, we leave this pointer to the
        user */
     struct APICState *apic_state;
Index: kvm-userspace.git/qemu/target-ia64/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ia64/cpu.h
+++ kvm-userspace.git/qemu/target-ia64/cpu.h
@@ -40,10 +40,15 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
+
 typedef struct CPUIA64State {
     CPU_COMMON;
     uint32_t hflags;
     int mp_state;
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
 } CPUIA64State;
 
 #define CPUState CPUIA64State
Index: kvm-userspace.git/qemu/target-ppc/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ppc/cpu.h
+++ kvm-userspace.git/qemu/target-ppc/cpu.h
@@ -22,6 +22,7 @@
 
 #include "config.h"
 #include <inttypes.h>
+#include "qemu-kvm-vcpu.h"
 
 //#define PPC_EMULATE_32BITS_HYPV
 
@@ -578,6 +579,10 @@
 
     CPU_COMMON
 
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
+
     int access_type; /* when a memory exception occurs, the access
                         type is stored here */
 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] fold struct vcpu_info into CPUState
  2008-10-17 15:28   ` Jes Sorensen
@ 2008-10-17 21:27     ` Glauber Costa
  2008-10-24 15:57       ` Jes Sorensen
  0 siblings, 1 reply; 15+ messages in thread
From: Glauber Costa @ 2008-10-17 21:27 UTC (permalink / raw)
  To: Jes Sorensen; +Cc: kvm, kvm-ia64

On Fri, Oct 17, 2008 at 1:28 PM, Jes Sorensen <jes@sgi.com> wrote:
> Glauber Costa wrote:
>>>
>>> @@ -93,7 +74,20 @@
>>>
>>>  CPUState *qemu_kvm_cpu_env(int index)
>>>  {
>
> [snip]
>>>
>>>  }
>>
>> This doesn't seem right. This function exists because we used to have
>> a vcpu and and env structs, that were separated but
>> should be tied together in some uses.
>> Now, there's absolutely nothing in here that is not qemu-specific.
>> This is just a function to return and env given a cpu number.
>> You'll lose the current_env optimization that probably matters a lot
>> in your case, but I'm afraid you will just have to live with that:
>> it sucks for qemu too, and when it is fixed, it will be fixed for both
>> (means getting rid of the ugly cpu_single_env)
>
> Hi Glauber,
>
> I see this function as a temporary transition type thing, it wasn't
> my intention for it to stay permanently, but it's not really called from
> a lot of performance critical places for now.

One more reason to provide a general purpose qemu function, instead of
a kvm specific one.

As for the patch itself, I'll re-review it later. stay tuned

>>>    if (env) {
>>> -        if (!vcpu)
>>> +        if (!current_env->vcpu_info.created)
>>>            signal = 1;
>>
>> !vcpu is probably meant to catch the case in witch the vcpu tls
>> variable is not yet assigned. By dereferencing current_env here,
>> you are probably doing an invalid access. So unless you can prove this
>> is not an issue, should add another test.
>
> Hmmm I think the test got lost because I moved this over in multiple
> steps. It was a thread variable 'cpu_info' prior, but we have no
> guarantee here that current_env is valid. Thanks.
>
>>> -        if (vcpu && env != vcpu->env &&
>>> !vcpu_info[env->cpu_index].signalled)
>>> +        /*
>>> +         * Testing for vcpu_info.created here is really redundant
>>> +         */
>>> +        if (current_env->vcpu_info.created &&
>>> +            env != current_env && env->vcpu_info.signalled)
>>
>> should be !env->vcpu_info.signalled instead?
>
> Good catch!
>
>>>  static void flush_queued_work(CPUState *env)
>>>  {
>>> -    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
>>> +    struct vcpu_info *vi = &env->vcpu_info;
>>>    struct qemu_kvm_work_item *wi;
>>
>> "vi" is not a good name, since emacs users will likely complain.
>> vcpu_info is a much better name.
>
> I'm an Emacs user! It's just a temporary variable, I really don't think
> this matters. We could name it 'ed' just to be different, if you prefer.
>
> Here's an updated version of that patch.
>
> Cheers,
> Jes
>
> PS: I am gone through Wednesday, so should give you a few days to
>    review :)
>
> Merge vcpu_info into CPUState.
>
> Moves definition of vcpu related structs to new header qemu-kvm-vcpu.h
> and declares this struct in i386/ia64/ppc CPUState structs if USE_KVM
> is defined. In addition conver qemu-kvm.c to pull vcpu_info out of
> CPUState.
>
> This eliminates ugly static sized array of struct vcpu_info.
>
> Signed-off-by: Jes Sorensen <jes@sgi.com>
>
> ---
>  qemu/qemu-kvm-vcpu.h   |   34 +++++++++++
>  qemu/qemu-kvm.c        |  143
> ++++++++++++++++++++++++++-----------------------
>  qemu/target-i386/cpu.h |    4 +
>  qemu/target-ia64/cpu.h |    5 +
>  qemu/target-ppc/cpu.h  |    5 +
>  5 files changed, 126 insertions(+), 65 deletions(-)
>
> Index: kvm-userspace.git/qemu/qemu-kvm-vcpu.h
> ===================================================================
> --- /dev/null
> +++ kvm-userspace.git/qemu/qemu-kvm-vcpu.h
> @@ -0,0 +1,34 @@
> +/*
> + * qemu/kvm vcpu definitions
> + *
> + * Copyright (C) 2006-2008 Qumranet Technologies
> + *
> + * Licensed under the terms of the GNU GPL version 2 or higher.
> + */
> +#ifndef QEMU_KVM_VCPU_H
> +#define QEMU_KVM_VCPU_H
> +
> +#include <pthread.h>
> +
> +struct qemu_kvm_work_item {
> +    struct qemu_kvm_work_item *next;
> +    void (*func)(void *data);
> +    void *data;
> +    int done;
> +};
> +
> +/*
> + * KVM vcpu struct
> + */
> +struct vcpu_info {
> +    int sipi_needed;
> +    int init;
> +    pthread_t thread;
> +    int signalled;
> +    int stop;
> +    int stopped;
> +    int created;
> +    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
> +};
> +
> +#endif
> Index: kvm-userspace.git/qemu/qemu-kvm.c
> ===================================================================
> --- kvm-userspace.git.orig/qemu/qemu-kvm.c
> +++ kvm-userspace.git/qemu/qemu-kvm.c
> @@ -22,13 +22,13 @@
>  #include "compatfd.h"
>
>  #include "qemu-kvm.h"
> +#include "qemu-kvm-vcpu.h"
>  #include <libkvm.h>
>  #include <pthread.h>
>  #include <sys/utsname.h>
>  #include <sys/syscall.h>
>  #include <sys/mman.h>
>
> -#define bool _Bool
>  #define false 0
>  #define true 1
>
> @@ -43,31 +43,12 @@
>  pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
>  pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
>  pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
> -__thread struct vcpu_info *vcpu;
> +__thread struct CPUState *current_env;
>
>  static int qemu_system_ready;
>
>  #define SIG_IPI (SIGRTMIN+4)
>
> -struct qemu_kvm_work_item {
> -    struct qemu_kvm_work_item *next;
> -    void (*func)(void *data);
> -    void *data;
> -    bool done;
> -};
> -
> -struct vcpu_info {
> -    CPUState *env;
> -    int sipi_needed;
> -    int init;
> -    pthread_t thread;
> -    int signalled;
> -    int stop;
> -    int stopped;
> -    int created;
> -    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
> -} vcpu_info[256];
> -
>  pthread_t io_thread;
>  static int io_thread_fd = -1;
>  static int io_thread_sigfd = -1;
> @@ -93,7 +74,20 @@
>
>  CPUState *qemu_kvm_cpu_env(int index)
>  {
> -    return vcpu_info[index].env;
> +    CPUState *penv;
> +
> +    if (current_env->cpu_index == index)
> +        return current_env;
> +
> +    penv = first_cpu;
> +
> +    while (penv) {
> +        if (penv->cpu_index == index)
> +            return penv;
> +        penv = (CPUState *)penv->next_cpu;
> +    }
> +
> +    return NULL;
>  }
>
>  static void sig_ipi_handler(int n)
> @@ -102,10 +96,10 @@
>
>  static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
>  {
> -    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
> +    struct vcpu_info *vi = &env->vcpu_info;
>     struct qemu_kvm_work_item wi;
>
> -    if (vi == vcpu) {
> +    if (env == current_env) {
>         func(data);
>         return;
>     }
> @@ -127,7 +121,7 @@
>
>  static void inject_interrupt(void *data)
>  {
> -    cpu_interrupt(vcpu->env, (int)data);
> +    cpu_interrupt(current_env, (int)data);
>  }
>
>  void kvm_inject_interrupt(CPUState *env, int mask)
> @@ -140,29 +134,33 @@
>     int signal = 0;
>
>     if (env) {
> -        if (!vcpu)
> +        if (current_env && !current_env->vcpu_info.created)
>             signal = 1;
> -        if (vcpu && env != vcpu->env &&
> !vcpu_info[env->cpu_index].signalled)
> +        /*
> +         * Testing for vcpu_info.created here is really redundant
> +         */
> +        if (current_env->vcpu_info.created &&
> +            env != current_env && !env->vcpu_info.signalled)
>             signal = 1;
>
>         if (signal) {
> -            vcpu_info[env->cpu_index].signalled = 1;
> -                if (vcpu_info[env->cpu_index].thread)
> -                    pthread_kill(vcpu_info[env->cpu_index].thread,
> SIG_IPI);
> +            env->vcpu_info.signalled = 1;
> +            if (env->vcpu_info.thread)
> +                pthread_kill(env->vcpu_info.thread, SIG_IPI);
>         }
>     }
>  }
>
>  void kvm_update_after_sipi(CPUState *env)
>  {
> -    vcpu_info[env->cpu_index].sipi_needed = 1;
> +    env->vcpu_info.sipi_needed = 1;
>     kvm_update_interrupt_request(env);
>  }
>
>  void kvm_apic_init(CPUState *env)
>  {
>     if (env->cpu_index != 0)
> -       vcpu_info[env->cpu_index].init = 1;
> +       env->vcpu_info.init = 1;
>     kvm_update_interrupt_request(env);
>  }
>
> @@ -240,7 +238,7 @@
>
>  static int has_work(CPUState *env)
>  {
> -    if (!vm_running || (env && vcpu_info[env->cpu_index].stopped))
> +    if (!vm_running || (env && env->vcpu_info.stopped))
>        return 0;
>     if (!env->halted)
>        return 1;
> @@ -249,7 +247,7 @@
>
>  static void flush_queued_work(CPUState *env)
>  {
> -    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
> +    struct vcpu_info *vi = &env->vcpu_info;
>     struct qemu_kvm_work_item *wi;
>
>     if (!vi->queued_work_first)
> @@ -266,6 +264,7 @@
>
>  static void kvm_main_loop_wait(CPUState *env, int timeout)
>  {
> +    struct vcpu_info *vi = &env->vcpu_info;
>     struct timespec ts;
>     int r, e;
>     siginfo_t siginfo;
> @@ -291,49 +290,55 @@
>     cpu_single_env = env;
>     flush_queued_work(env);
>
> -    if (vcpu_info[env->cpu_index].stop) {
> -       vcpu_info[env->cpu_index].stop = 0;
> -       vcpu_info[env->cpu_index].stopped = 1;
> +    if (vi->stop) {
> +       vi->stop = 0;
> +       vi->stopped = 1;
>        pthread_cond_signal(&qemu_pause_cond);
>     }
>
> -    vcpu_info[env->cpu_index].signalled = 0;
> +    vi->signalled = 0;
>  }
>
>  static int all_threads_paused(void)
>  {
> -    int i;
> +    CPUState *penv = first_cpu;
> +
> +    while (penv) {
> +        if (penv->vcpu_info.stop)
> +            return 0;
> +        penv = (CPUState *)penv->next_cpu;
> +    }
>
> -    for (i = 0; i < smp_cpus; ++i)
> -       if (vcpu_info[i].stop)
> -           return 0;
>     return 1;
>  }
>
>  static void pause_all_threads(void)
>  {
> -    int i;
> +    CPUState *penv = first_cpu;
>
>     assert(!cpu_single_env);
>
> -    for (i = 0; i < smp_cpus; ++i) {
> -       vcpu_info[i].stop = 1;
> -       pthread_kill(vcpu_info[i].thread, SIG_IPI);
> +    while (penv) {
> +        penv->vcpu_info.stop = 1;
> +        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
> +        penv = (CPUState *)penv->next_cpu;
>     }
> +
>     while (!all_threads_paused())
>        qemu_cond_wait(&qemu_pause_cond);
>  }
>
>  static void resume_all_threads(void)
>  {
> -    int i;
> +    CPUState *penv = first_cpu;
>
>     assert(!cpu_single_env);
>
> -    for (i = 0; i < smp_cpus; ++i) {
> -       vcpu_info[i].stop = 0;
> -       vcpu_info[i].stopped = 0;
> -       pthread_kill(vcpu_info[i].thread, SIG_IPI);
> +    while (penv) {
> +        penv->vcpu_info.stop = 0;
> +        penv->vcpu_info.stopped = 0;
> +        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
> +        penv = (CPUState *)penv->next_cpu;
>     }
>  }
>
> @@ -348,8 +353,8 @@
>  static void update_regs_for_sipi(CPUState *env)
>  {
>     kvm_arch_update_regs_for_sipi(env);
> -    vcpu_info[env->cpu_index].sipi_needed = 0;
> -    vcpu_info[env->cpu_index].init = 0;
> +    env->vcpu_info.sipi_needed = 0;
> +    env->vcpu_info.init = 0;
>  }
>
>  static void update_regs_for_init(CPUState *env)
> @@ -376,21 +381,23 @@
>
>  void qemu_kvm_system_reset(void)
>  {
> -    int i;
> +    CPUState *penv = first_cpu;
>
>     pause_all_threads();
>
>     qemu_system_reset();
>
> -    for (i = 0; i < smp_cpus; ++i)
> -       kvm_arch_cpu_reset(vcpu_info[i].env);
> +    while (penv) {
> +        kvm_arch_cpu_reset(penv);
> +        penv = (CPUState *)penv->next_cpu;
> +    }
>
>     resume_all_threads();
>  }
>
>  static int kvm_main_loop_cpu(CPUState *env)
>  {
> -    struct vcpu_info *info = &vcpu_info[env->cpu_index];
> +    struct vcpu_info *info = &env->vcpu_info;
>
>     setup_kernel_sigmask(env);
>
> @@ -429,9 +436,8 @@
>     CPUState *env = _env;
>     sigset_t signals;
>
> -    vcpu = &vcpu_info[env->cpu_index];
> -    vcpu->env = env;
> -    vcpu->env->thread_id = kvm_get_thread_id();
> +    current_env = env;
> +    env->thread_id = kvm_get_thread_id();
>     sigfillset(&signals);
>     sigprocmask(SIG_BLOCK, &signals, NULL);
>     kvm_create_vcpu(kvm_context, env->cpu_index);
> @@ -439,7 +445,7 @@
>
>     /* signal VCPU creation */
>     pthread_mutex_lock(&qemu_mutex);
> -    vcpu->created = 1;
> +    current_env->vcpu_info.created = 1;
>     pthread_cond_signal(&qemu_vcpu_cond);
>
>     /* and wait for machine initialization */
> @@ -453,9 +459,9 @@
>
>  void kvm_init_new_ap(int cpu, CPUState *env)
>  {
> -    pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
> +    pthread_create(&env->vcpu_info.thread, NULL, ap_main_loop, env);
>
> -    while (vcpu_info[cpu].created == 0)
> +    while (env->vcpu_info.created == 0)
>        qemu_cond_wait(&qemu_vcpu_cond);
>  }
>
> @@ -613,8 +619,12 @@
>
>  static int kvm_debug(void *opaque, int vcpu)
>  {
> +    CPUState *env;
> +
> +    env = qemu_kvm_cpu_env(vcpu);
> +
>     kvm_debug_stop_requested = 1;
> -    vcpu_info[vcpu].stopped = 1;
> +    env->vcpu_info.stopped = 1;
>     return 1;
>  }
>
> @@ -710,8 +720,11 @@
>
>  static int kvm_shutdown(void *opaque, int vcpu)
>  {
> +    CPUState *env;
> +
> +    env = qemu_kvm_cpu_env(cpu_single_env->cpu_index);
>     /* stop the current vcpu from going back to guest mode */
> -    vcpu_info[cpu_single_env->cpu_index].stopped = 1;
> +    env->vcpu_info.stopped = 1;
>
>     qemu_system_reset_request();
>     return 1;
> Index: kvm-userspace.git/qemu/target-i386/cpu.h
> ===================================================================
> --- kvm-userspace.git.orig/qemu/target-i386/cpu.h
> +++ kvm-userspace.git/qemu/target-i386/cpu.h
> @@ -45,6 +45,7 @@
>  #include "cpu-defs.h"
>
>  #include "softfloat.h"
> +#include "qemu-kvm-vcpu.h"
>
>  #define R_EAX 0
>  #define R_ECX 1
> @@ -622,6 +623,9 @@
>  #define NR_IRQ_WORDS (256/ BITS_PER_LONG)
>     uint32_t kvm_interrupt_bitmap[NR_IRQ_WORDS];
>
> +#ifdef USE_KVM
> +    struct vcpu_info vcpu_info;
> +#endif
>     /* in order to simplify APIC support, we leave this pointer to the
>        user */
>     struct APICState *apic_state;
> Index: kvm-userspace.git/qemu/target-ia64/cpu.h
> ===================================================================
> --- kvm-userspace.git.orig/qemu/target-ia64/cpu.h
> +++ kvm-userspace.git/qemu/target-ia64/cpu.h
> @@ -40,10 +40,15 @@
>  #include "cpu-defs.h"
>
>  #include "softfloat.h"
> +#include "qemu-kvm-vcpu.h"
> +
>  typedef struct CPUIA64State {
>     CPU_COMMON;
>     uint32_t hflags;
>     int mp_state;
> +#ifdef USE_KVM
> +    struct vcpu_info vcpu_info;
> +#endif
>  } CPUIA64State;
>
>  #define CPUState CPUIA64State
> Index: kvm-userspace.git/qemu/target-ppc/cpu.h
> ===================================================================
> --- kvm-userspace.git.orig/qemu/target-ppc/cpu.h
> +++ kvm-userspace.git/qemu/target-ppc/cpu.h
> @@ -22,6 +22,7 @@
>
>  #include "config.h"
>  #include <inttypes.h>
> +#include "qemu-kvm-vcpu.h"
>
>  //#define PPC_EMULATE_32BITS_HYPV
>
> @@ -578,6 +579,10 @@
>
>     CPU_COMMON
>
> +#ifdef USE_KVM
> +    struct vcpu_info vcpu_info;
> +#endif
> +
>     int access_type; /* when a memory exception occurs, the access
>                         type is stored here */
>
>
>



-- 
Glauber  Costa.
"Free as in Freedom"
http://glommer.net

"The less confident you are, the more serious you have to act."

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] fold struct vcpu_info into CPUState
  2008-10-17 21:27     ` Glauber Costa
@ 2008-10-24 15:57       ` Jes Sorensen
  2008-10-24 19:10         ` Hollis Blanchard
  0 siblings, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2008-10-24 15:57 UTC (permalink / raw)
  To: Glauber Costa; +Cc: kvm, kvm-ia64, Hollis Blanchard

[-- Attachment #1: Type: text/plain, Size: 383 bytes --]

Hi,

Here's an updated version of the patch, which builds against Avi's
current tree. Please note this is a bit of a work in progress version
as I am trying to eliminate the need for the vcpu -> env linear
conversion, but I wanted to send out something before heading off for
the weekend.

Hollis if you have time to check this out for PPC, I would much
appreciate it.

Cheers,
Jes


[-- Attachment #2: 0010-qemu-kvm-vcpu_info-v3.patch --]
[-- Type: text/plain, Size: 15225 bytes --]

Merge vcpu_info into CPUState.

Moves definition of vcpu related structs to new header qemu-kvm-vcpu.h
and declares this struct in i386/ia64/ppc CPUState structs if USE_KVM
is defined. In addition conver qemu-kvm.c to pull vcpu_info out of
CPUState.

This eliminates ugly static sized array of struct vcpu_info.

Signed-off-by: Jes Sorensen <jes@sgi.com>

---
 libkvm/kvm-common.h    |    4 -
 libkvm/libkvm.c        |   14 ++--
 libkvm/libkvm.h        |    6 +-
 qemu/qemu-kvm-vcpu.h   |   34 +++++++++++
 qemu/qemu-kvm.c        |  146 +++++++++++++++++++++++++------------------------
 qemu/target-i386/cpu.h |    4 +
 qemu/target-ia64/cpu.h |    5 +
 qemu/target-ppc/cpu.h  |    5 +
 8 files changed, 136 insertions(+), 82 deletions(-)

Index: kvm-userspace.git/libkvm/kvm-common.h
===================================================================
--- kvm-userspace.git.orig/libkvm/kvm-common.h
+++ kvm-userspace.git/libkvm/kvm-common.h
@@ -84,11 +84,11 @@
 void kvm_show_code(kvm_context_t kvm, int vcpu);
 
 int handle_halt(kvm_context_t kvm, int vcpu);
-int handle_shutdown(kvm_context_t kvm, int vcpu);
+int handle_shutdown(kvm_context_t kvm, void *env);
 void post_kvm_run(kvm_context_t kvm, int vcpu);
 int pre_kvm_run(kvm_context_t kvm, int vcpu);
 int handle_io_window(kvm_context_t kvm);
-int handle_debug(kvm_context_t kvm, int vcpu);
+int handle_debug(kvm_context_t kvm, void *env);
 int try_push_interrupts(kvm_context_t kvm);
 
 #endif
Index: kvm-userspace.git/libkvm/libkvm.c
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.c
+++ kvm-userspace.git/libkvm/libkvm.c
@@ -738,9 +738,9 @@
 	return 0;
 }
 
-int handle_debug(kvm_context_t kvm, int vcpu)
+int handle_debug(kvm_context_t kvm, void *env)
 {
-	return kvm->callbacks->debug(kvm->opaque, vcpu);
+	return kvm->callbacks->debug(kvm->opaque, env);
 }
 
 int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
@@ -822,9 +822,9 @@
 	return kvm->callbacks->halt(kvm->opaque, vcpu);
 }
 
-int handle_shutdown(kvm_context_t kvm, int vcpu)
+int handle_shutdown(kvm_context_t kvm, void *env)
 {
-	return kvm->callbacks->shutdown(kvm->opaque, vcpu);
+	return kvm->callbacks->shutdown(kvm->opaque, env);
 }
 
 int try_push_interrupts(kvm_context_t kvm)
@@ -872,7 +872,7 @@
 #endif
 }
 
-int kvm_run(kvm_context_t kvm, int vcpu)
+int kvm_run(kvm_context_t kvm, int vcpu, void *env)
 {
 	int r;
 	int fd = kvm->vcpu_fd[vcpu];
@@ -948,7 +948,7 @@
 			r = handle_io(kvm, run, vcpu);
 			break;
 		case KVM_EXIT_DEBUG:
-			r = handle_debug(kvm, vcpu);
+			r = handle_debug(kvm, env);
 			break;
 		case KVM_EXIT_MMIO:
 			r = handle_mmio(kvm, run);
@@ -962,7 +962,7 @@
 #endif
 			break;
 		case KVM_EXIT_SHUTDOWN:
-			r = handle_shutdown(kvm, vcpu);
+			r = handle_shutdown(kvm, env);
 			break;
 #if defined(__s390__)
 		case KVM_EXIT_S390_SIEIC:
Index: kvm-userspace.git/libkvm/libkvm.h
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.h
+++ kvm-userspace.git/libkvm/libkvm.h
@@ -55,7 +55,7 @@
 	/// generic memory writes to unmapped memory (For MMIO devices)
     int (*mmio_write)(void *opaque, uint64_t addr, uint8_t *data,
 					int len);
-    int (*debug)(void *opaque, int vcpu);
+    int (*debug)(void *opaque, void *env);
 	/*!
 	 * \brief Called when the VCPU issues an 'hlt' instruction.
 	 *
@@ -63,7 +63,7 @@
 	 * on the host CPU.
 	 */
     int (*halt)(void *opaque, int vcpu);
-    int (*shutdown)(void *opaque, int vcpu);
+    int (*shutdown)(void *opaque, void *env);
     int (*io_window)(void *opaque);
     int (*try_push_interrupts)(void *opaque);
     int (*try_push_nmi)(void *opaque);
@@ -181,7 +181,7 @@
  * return except for when an error has occured, or when you have sent it
  * an EINTR signal.
  */
-int kvm_run(kvm_context_t kvm, int vcpu);
+int kvm_run(kvm_context_t kvm, int vcpu, void *env);
 
 /*!
  * \brief Get interrupt flag from on last exit to userspace
Index: kvm-userspace.git/qemu/qemu-kvm-vcpu.h
===================================================================
--- /dev/null
+++ kvm-userspace.git/qemu/qemu-kvm-vcpu.h
@@ -0,0 +1,34 @@
+/*
+ * qemu/kvm vcpu definitions
+ *
+ * Copyright (C) 2006-2008 Qumranet Technologies
+ *
+ * Licensed under the terms of the GNU GPL version 2 or higher.
+ */
+#ifndef QEMU_KVM_VCPU_H
+#define QEMU_KVM_VCPU_H
+
+#include <pthread.h>
+
+struct qemu_kvm_work_item {
+    struct qemu_kvm_work_item *next;
+    void (*func)(void *data);
+    void *data;
+    int done;
+};
+
+/*
+ * KVM vcpu struct
+ */
+struct vcpu_info {
+    int sipi_needed;
+    int init;
+    pthread_t thread;
+    int signalled;
+    int stop;
+    int stopped;
+    int created;
+    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
+};
+
+#endif
Index: kvm-userspace.git/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.c
+++ kvm-userspace.git/qemu/qemu-kvm.c
@@ -22,13 +22,13 @@
 #include "compatfd.h"
 
 #include "qemu-kvm.h"
+#include "qemu-kvm-vcpu.h"
 #include <libkvm.h>
 #include <pthread.h>
 #include <sys/utsname.h>
 #include <sys/syscall.h>
 #include <sys/mman.h>
 
-#define bool _Bool
 #define false 0
 #define true 1
 
@@ -43,31 +43,12 @@
 pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
-__thread struct vcpu_info *vcpu;
+__thread struct CPUState *current_env;
 
 static int qemu_system_ready;
 
 #define SIG_IPI (SIGRTMIN+4)
 
-struct qemu_kvm_work_item {
-    struct qemu_kvm_work_item *next;
-    void (*func)(void *data);
-    void *data;
-    bool done;
-};
-
-struct vcpu_info {
-    CPUState *env;
-    int sipi_needed;
-    int init;
-    pthread_t thread;
-    int signalled;
-    int stop;
-    int stopped;
-    int created;
-    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
-} vcpu_info[256];
-
 pthread_t io_thread;
 static int io_thread_fd = -1;
 static int io_thread_sigfd = -1;
@@ -93,7 +74,20 @@
 
 CPUState *qemu_kvm_cpu_env(int index)
 {
-    return vcpu_info[index].env;
+    CPUState *penv;
+
+    if (current_env->cpu_index == index)
+        return current_env;
+
+    penv = first_cpu;
+
+    while (penv) {
+        if (penv->cpu_index == index)
+            return penv;
+        penv = (CPUState *)penv->next_cpu;
+    }
+
+    return NULL;
 }
 
 static void sig_ipi_handler(int n)
@@ -102,10 +96,10 @@
 
 static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+    struct vcpu_info *vi = &env->vcpu_info;
     struct qemu_kvm_work_item wi;
 
-    if (vi == vcpu) {
+    if (env == current_env) {
         func(data);
         return;
     }
@@ -127,7 +121,7 @@
 
 static void inject_interrupt(void *data)
 {
-    cpu_interrupt(vcpu->env, (int)data);
+    cpu_interrupt(current_env, (int)data);
 }
 
 void kvm_inject_interrupt(CPUState *env, int mask)
@@ -140,29 +134,33 @@
     int signal = 0;
 
     if (env) {
-        if (!vcpu)
+        if (current_env && !current_env->vcpu_info.created)
             signal = 1;
-        if (vcpu && env != vcpu->env && !vcpu_info[env->cpu_index].signalled)
+        /*
+         * Testing for vcpu_info.created here is really redundant
+         */
+        if (current_env->vcpu_info.created &&
+            env != current_env && !env->vcpu_info.signalled)
             signal = 1;
 
         if (signal) {
-            vcpu_info[env->cpu_index].signalled = 1;
-                if (vcpu_info[env->cpu_index].thread)
-                    pthread_kill(vcpu_info[env->cpu_index].thread, SIG_IPI);
+            env->vcpu_info.signalled = 1;
+            if (env->vcpu_info.thread)
+                pthread_kill(env->vcpu_info.thread, SIG_IPI);
         }
     }
 }
 
 void kvm_update_after_sipi(CPUState *env)
 {
-    vcpu_info[env->cpu_index].sipi_needed = 1;
+    env->vcpu_info.sipi_needed = 1;
     kvm_update_interrupt_request(env);
 }
 
 void kvm_apic_init(CPUState *env)
 {
     if (env->cpu_index != 0)
-	vcpu_info[env->cpu_index].init = 1;
+	env->vcpu_info.init = 1;
     kvm_update_interrupt_request(env);
 }
 
@@ -227,7 +225,7 @@
 {
     int r;
 
-    r = kvm_run(kvm_context, env->cpu_index);
+    r = kvm_run(kvm_context, env->cpu_index, env);
     if (r < 0) {
         printf("kvm_run returned %d\n", r);
         exit(1);
@@ -240,7 +238,7 @@
 
 static int has_work(CPUState *env)
 {
-    if (!vm_running || (env && vcpu_info[env->cpu_index].stopped))
+    if (!vm_running || (env && env->vcpu_info.stopped))
 	return 0;
     if (!env->halted)
 	return 1;
@@ -249,7 +247,7 @@
 
 static void flush_queued_work(CPUState *env)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+    struct vcpu_info *vi = &env->vcpu_info;
     struct qemu_kvm_work_item *wi;
 
     if (!vi->queued_work_first)
@@ -266,6 +264,7 @@
 
 static void kvm_main_loop_wait(CPUState *env, int timeout)
 {
+    struct vcpu_info *vi = &env->vcpu_info;
     struct timespec ts;
     int r, e;
     siginfo_t siginfo;
@@ -291,49 +290,55 @@
     cpu_single_env = env;
     flush_queued_work(env);
 
-    if (vcpu_info[env->cpu_index].stop) {
-	vcpu_info[env->cpu_index].stop = 0;
-	vcpu_info[env->cpu_index].stopped = 1;
+    if (vi->stop) {
+	vi->stop = 0;
+	vi->stopped = 1;
 	pthread_cond_signal(&qemu_pause_cond);
     }
 
-    vcpu_info[env->cpu_index].signalled = 0;
+    vi->signalled = 0;
 }
 
 static int all_threads_paused(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
+
+    while (penv) {
+        if (penv->vcpu_info.stop)
+            return 0;
+        penv = (CPUState *)penv->next_cpu;
+    }
 
-    for (i = 0; i < smp_cpus; ++i)
-	if (vcpu_info[i].stop)
-	    return 0;
     return 1;
 }
 
 static void pause_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 1;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->vcpu_info.stop = 1;
+        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
+
     while (!all_threads_paused())
 	qemu_cond_wait(&qemu_pause_cond);
 }
 
 static void resume_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 0;
-	vcpu_info[i].stopped = 0;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->vcpu_info.stop = 0;
+        penv->vcpu_info.stopped = 0;
+        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
 }
 
@@ -348,7 +353,7 @@
 static void update_regs_for_sipi(CPUState *env)
 {
     kvm_arch_update_regs_for_sipi(env);
-    vcpu_info[env->cpu_index].sipi_needed = 0;
+    env->vcpu_info.sipi_needed = 0;
 }
 
 static void update_regs_for_init(CPUState *env)
@@ -361,11 +366,11 @@
 
 #ifdef TARGET_I386
     /* restore SIPI vector */
-    if(vcpu_info[env->cpu_index].sipi_needed)
+    if(env->vcpu_info.sipi_needed)
         env->segs[R_CS] = cs;
-
-    vcpu_info[env->cpu_index].init = 0;
 #endif
+
+    env->vcpu_info.init = 0;
     kvm_arch_load_regs(env);
 }
 
@@ -387,21 +392,23 @@
 
 void qemu_kvm_system_reset(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     pause_all_threads();
 
     qemu_system_reset();
 
-    for (i = 0; i < smp_cpus; ++i)
-	kvm_arch_cpu_reset(vcpu_info[i].env);
+    while (penv) {
+        kvm_arch_cpu_reset(penv);
+        penv = (CPUState *)penv->next_cpu;
+    }
 
     resume_all_threads();
 }
 
 static int kvm_main_loop_cpu(CPUState *env)
 {
-    struct vcpu_info *info = &vcpu_info[env->cpu_index];
+    struct vcpu_info *info = &env->vcpu_info;
 
     setup_kernel_sigmask(env);
 
@@ -442,9 +449,8 @@
     CPUState *env = _env;
     sigset_t signals;
 
-    vcpu = &vcpu_info[env->cpu_index];
-    vcpu->env = env;
-    vcpu->env->thread_id = kvm_get_thread_id();
+    current_env = env;
+    env->thread_id = kvm_get_thread_id();
     sigfillset(&signals);
     sigprocmask(SIG_BLOCK, &signals, NULL);
     kvm_create_vcpu(kvm_context, env->cpu_index);
@@ -452,7 +458,7 @@
 
     /* signal VCPU creation */
     pthread_mutex_lock(&qemu_mutex);
-    vcpu->created = 1;
+    current_env->vcpu_info.created = 1;
     pthread_cond_signal(&qemu_vcpu_cond);
 
     /* and wait for machine initialization */
@@ -466,9 +472,9 @@
 
 void kvm_init_new_ap(int cpu, CPUState *env)
 {
-    pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
+    pthread_create(&env->vcpu_info.thread, NULL, ap_main_loop, env);
 
-    while (vcpu_info[cpu].created == 0)
+    while (env->vcpu_info.created == 0)
 	qemu_cond_wait(&qemu_vcpu_cond);
 }
 
@@ -624,10 +630,10 @@
     return 0;
 }
 
-static int kvm_debug(void *opaque, int vcpu)
+static int kvm_debug(void *opaque, struct CPUState *env)
 {
     kvm_debug_stop_requested = 1;
-    vcpu_info[vcpu].stopped = 1;
+    env->vcpu_info.stopped = 1;
     return 1;
 }
 
@@ -721,10 +727,10 @@
     return kvm_arch_halt(opaque, vcpu);
 }
 
-static int kvm_shutdown(void *opaque, int vcpu)
+static int kvm_shutdown(void *opaque, struct CPUState *env)
 {
     /* stop the current vcpu from going back to guest mode */
-    vcpu_info[cpu_single_env->cpu_index].stopped = 1;
+    env->vcpu_info.stopped = 1;
 
     qemu_system_reset_request();
     return 1;
Index: kvm-userspace.git/qemu/target-i386/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-i386/cpu.h
+++ kvm-userspace.git/qemu/target-i386/cpu.h
@@ -45,6 +45,7 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
 
 #define R_EAX 0
 #define R_ECX 1
@@ -622,6 +623,9 @@
 #define NR_IRQ_WORDS (256/ BITS_PER_LONG)
     uint32_t kvm_interrupt_bitmap[NR_IRQ_WORDS];
 
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
     /* in order to simplify APIC support, we leave this pointer to the
        user */
     struct APICState *apic_state;
Index: kvm-userspace.git/qemu/target-ia64/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ia64/cpu.h
+++ kvm-userspace.git/qemu/target-ia64/cpu.h
@@ -40,10 +40,15 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
+
 typedef struct CPUIA64State {
     CPU_COMMON;
     uint32_t hflags;
     int mp_state;
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
 } CPUIA64State;
 
 #define CPUState CPUIA64State
Index: kvm-userspace.git/qemu/target-ppc/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ppc/cpu.h
+++ kvm-userspace.git/qemu/target-ppc/cpu.h
@@ -22,6 +22,7 @@
 
 #include "config.h"
 #include <inttypes.h>
+#include "qemu-kvm-vcpu.h"
 
 //#define PPC_EMULATE_32BITS_HYPV
 
@@ -578,6 +579,10 @@
 
     CPU_COMMON
 
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
+
     int access_type; /* when a memory exception occurs, the access
                         type is stored here */
 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] fold struct vcpu_info into CPUState
  2008-10-24 15:57       ` Jes Sorensen
@ 2008-10-24 19:10         ` Hollis Blanchard
  2008-10-27  9:48           ` Jes Sorensen
  2008-10-27 16:06           ` [patch] " Jes Sorensen
  0 siblings, 2 replies; 15+ messages in thread
From: Hollis Blanchard @ 2008-10-24 19:10 UTC (permalink / raw)
  To: Jes Sorensen; +Cc: Glauber Costa, kvm, kvm-ia64

On Fri, 2008-10-24 at 17:57 +0200, Jes Sorensen wrote:
> Hi,
> 
> Here's an updated version of the patch, which builds against Avi's
> current tree. Please note this is a bit of a work in progress version
> as I am trying to eliminate the need for the vcpu -> env linear
> conversion, but I wanted to send out something before heading off for
> the weekend.
> 
> Hollis if you have time to check this out for PPC, I would much
> appreciate it.

Unfortunately qemu segfaults halfway through guest kernel boot:

        ...
        virtio-pci 0000:00:01.0: enabling device (0000 -> 0001)
         vda:Segmentation fault

This is 100% repeatable, and it doesn't happen without your patch
applied.

0x10116f1c in kvm_update_interrupt_request (env=0x103d4718)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/qemu-kvm.c:142
142             if (current_env->vcpu_info.created &&
(gdb) bt
#0  0x10116f1c in kvm_update_interrupt_request (env=0x103d4718)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/qemu-kvm.c:142
#1  0x100b4a74 in cpu_interrupt (env=0x103d4718, mask=0x2)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/exec.c:1507
#2  0x10033d04 in ppc_set_irq (env=0x0, n_IRQ=0x2, level=0x1)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/hw/ppc.c:41
#3  0x10033e4c in ppc40x_set_irq (opaque=0x103d4718, pin=0x4, level=0x1)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/hw/ppc.c:400
#4  0x44004084 in ?? ()
#5  0x1007cc40 in qemu_set_irq (irq=0x103d4718, level=0x1)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/hw/irq.c:38
#6  0x1006e408 in ppcuic_trigger_irq (uic=0x103f2c80)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/hw/irq.h:19
#7  0x1007cc40 in qemu_set_irq (irq=0x103d4718, level=0x1)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/hw/irq.c:38
#8  0x1006edf4 in bamboo_pci_set_irq (pic=0x103d4718, irq_num=0x2, level=0x1)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/hw/ppc4xx_devs.c:837
#9  0x100170d4 in pci_set_irq (opaque=0x103d4718, irq_num=0x2, level=0x1)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/hw/pci.c:560
#10 0x1007cc40 in qemu_set_irq (irq=0x103d4718, level=0x1)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/hw/irq.c:38
#11 0x1003194c in virtio_update_irq (vdev=0x103d4718)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/hw/virtio.c:205
#12 0x10032ef4 in virtio_blk_rw_complete (opaque=0x10438008, ret=0x0)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/hw/virtio-blk.c:119
#13 0x100ad680 in qcow_aio_read_cb (opaque=0x10432e58, ret=0x0)
    at block-qcow2.c:1205
#14 0x1001c450 in posix_aio_read (opaque=0x103d4718) at block-raw-posix.c:550
#15 0x1000e5d8 in main_loop_wait (timeout=0xbfe5a978)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/vl.c:8352
#16 0x24000082 in ?? ()
#17 0x10117a00 in kvm_main_loop ()
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/qemu-kvm.c:614
#18 0x10011aa0 in main (argc=0x101b0000, argv=0x10380958)
    at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/vl.c:8416
(gdb) p current_env
$1 = (struct CPUPPCState *) 0x0
(gdb)

It looks like qemu is actually switching threads when this happens...
I'm not sure what the current state is of qemu/KVM threads... I think we
have 1 thread per vcpu, plus 1 IO thread? If that's right, maybe
current_env isn't being initialized in the IO thread; I only see that
happening inside ap_main_loop(), which is the vcpu thread.

-- 
Hollis Blanchard
IBM Linux Technology Center




^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] fold struct vcpu_info into CPUState
  2008-10-24 19:10         ` Hollis Blanchard
@ 2008-10-27  9:48           ` Jes Sorensen
  2008-10-27 16:02             ` Hollis Blanchard
  2008-10-27 16:06           ` [patch] " Jes Sorensen
  1 sibling, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2008-10-27  9:48 UTC (permalink / raw)
  To: Hollis Blanchard; +Cc: Glauber Costa, kvm, kvm-ia64

Hollis Blanchard wrote:
> It looks like qemu is actually switching threads when this happens...
> I'm not sure what the current state is of qemu/KVM threads... I think we
> have 1 thread per vcpu, plus 1 IO thread? If that's right, maybe
> current_env isn't being initialized in the IO thread; I only see that
> happening inside ap_main_loop(), which is the vcpu thread.
> 

Hmmm, this is bizarre. If it was the IO thread dieing because of this
I would expect the same to happen on ia64. Could you try and add a test
in the code to find out which thread you are when you die, and maybe
check for current_env being valid?

Thanks,
Jes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] fold struct vcpu_info into CPUState
  2008-10-27  9:48           ` Jes Sorensen
@ 2008-10-27 16:02             ` Hollis Blanchard
  2008-10-28 16:25               ` [patch] v4 - " Jes Sorensen
  0 siblings, 1 reply; 15+ messages in thread
From: Hollis Blanchard @ 2008-10-27 16:02 UTC (permalink / raw)
  To: Jes Sorensen; +Cc: Glauber Costa, kvm, kvm-ia64

On Mon, 2008-10-27 at 10:48 +0100, Jes Sorensen wrote:
> Hollis Blanchard wrote:
> > It looks like qemu is actually switching threads when this happens...
> > I'm not sure what the current state is of qemu/KVM threads... I think we
> > have 1 thread per vcpu, plus 1 IO thread? If that's right, maybe
> > current_env isn't being initialized in the IO thread; I only see that
> > happening inside ap_main_loop(), which is the vcpu thread.
> > 
> 
> Hmmm, this is bizarre. If it was the IO thread dieing because of this
> I would expect the same to happen on ia64. Could you try and add a test
> in the code to find out which thread you are when you die, and maybe
> check for current_env being valid?

I tested in gdb, and I can confirm that a) current_env was initialized
in one thread, and then used in another, and b) current_env was NULL
(invalid) when it was used.

Are you using virtio on ia64?

-- 
Hollis Blanchard
IBM Linux Technology Center


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] fold struct vcpu_info into CPUState
  2008-10-24 19:10         ` Hollis Blanchard
  2008-10-27  9:48           ` Jes Sorensen
@ 2008-10-27 16:06           ` Jes Sorensen
  1 sibling, 0 replies; 15+ messages in thread
From: Jes Sorensen @ 2008-10-27 16:06 UTC (permalink / raw)
  To: Hollis Blanchard; +Cc: Glauber Costa, kvm, kvm-ia64

[-- Attachment #1: Type: text/plain, Size: 478 bytes --]

Hollis Blanchard wrote:
> This is 100% repeatable, and it doesn't happen without your patch
> applied.
> 
> 0x10116f1c in kvm_update_interrupt_request (env=0x103d4718)
>     at /home/hollisb/source/kvm-userspace-ppc.hg/qemu/qemu-kvm.c:142
> 142             if (current_env->vcpu_info.created &&
> (gdb) bt

Hollis,

Ok, I looked at the logic in this function again and I think I got some
of it wrong when I did the conversion.

Could you try out this one instead.

Thanks,
Jes


[-- Attachment #2: 0010-qemu-kvm-vcpu_info-v3.1.patch --]
[-- Type: text/plain, Size: 15240 bytes --]

Merge vcpu_info into CPUState.

Moves definition of vcpu related structs to new header qemu-kvm-vcpu.h
and declares this struct in i386/ia64/ppc CPUState structs if USE_KVM
is defined. In addition conver qemu-kvm.c to pull vcpu_info out of
CPUState.

This eliminates ugly static sized array of struct vcpu_info.

Signed-off-by: Jes Sorensen <jes@sgi.com>

---
 libkvm/kvm-common.h    |    4 -
 libkvm/libkvm.c        |   14 ++--
 libkvm/libkvm.h        |    6 +-
 qemu/qemu-kvm-vcpu.h   |   34 +++++++++++
 qemu/qemu-kvm.c        |  146 +++++++++++++++++++++++++------------------------
 qemu/target-i386/cpu.h |    4 +
 qemu/target-ia64/cpu.h |    5 +
 qemu/target-ppc/cpu.h  |    5 +
 8 files changed, 136 insertions(+), 82 deletions(-)

Index: kvm-userspace.git/libkvm/kvm-common.h
===================================================================
--- kvm-userspace.git.orig/libkvm/kvm-common.h
+++ kvm-userspace.git/libkvm/kvm-common.h
@@ -84,11 +84,11 @@
 void kvm_show_code(kvm_context_t kvm, int vcpu);
 
 int handle_halt(kvm_context_t kvm, int vcpu);
-int handle_shutdown(kvm_context_t kvm, int vcpu);
+int handle_shutdown(kvm_context_t kvm, void *env);
 void post_kvm_run(kvm_context_t kvm, int vcpu);
 int pre_kvm_run(kvm_context_t kvm, int vcpu);
 int handle_io_window(kvm_context_t kvm);
-int handle_debug(kvm_context_t kvm, int vcpu);
+int handle_debug(kvm_context_t kvm, void *env);
 int try_push_interrupts(kvm_context_t kvm);
 
 #endif
Index: kvm-userspace.git/libkvm/libkvm.c
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.c
+++ kvm-userspace.git/libkvm/libkvm.c
@@ -738,9 +738,9 @@
 	return 0;
 }
 
-int handle_debug(kvm_context_t kvm, int vcpu)
+int handle_debug(kvm_context_t kvm, void *env)
 {
-	return kvm->callbacks->debug(kvm->opaque, vcpu);
+	return kvm->callbacks->debug(kvm->opaque, env);
 }
 
 int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
@@ -822,9 +822,9 @@
 	return kvm->callbacks->halt(kvm->opaque, vcpu);
 }
 
-int handle_shutdown(kvm_context_t kvm, int vcpu)
+int handle_shutdown(kvm_context_t kvm, void *env)
 {
-	return kvm->callbacks->shutdown(kvm->opaque, vcpu);
+	return kvm->callbacks->shutdown(kvm->opaque, env);
 }
 
 int try_push_interrupts(kvm_context_t kvm)
@@ -872,7 +872,7 @@
 #endif
 }
 
-int kvm_run(kvm_context_t kvm, int vcpu)
+int kvm_run(kvm_context_t kvm, int vcpu, void *env)
 {
 	int r;
 	int fd = kvm->vcpu_fd[vcpu];
@@ -948,7 +948,7 @@
 			r = handle_io(kvm, run, vcpu);
 			break;
 		case KVM_EXIT_DEBUG:
-			r = handle_debug(kvm, vcpu);
+			r = handle_debug(kvm, env);
 			break;
 		case KVM_EXIT_MMIO:
 			r = handle_mmio(kvm, run);
@@ -962,7 +962,7 @@
 #endif
 			break;
 		case KVM_EXIT_SHUTDOWN:
-			r = handle_shutdown(kvm, vcpu);
+			r = handle_shutdown(kvm, env);
 			break;
 #if defined(__s390__)
 		case KVM_EXIT_S390_SIEIC:
Index: kvm-userspace.git/libkvm/libkvm.h
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.h
+++ kvm-userspace.git/libkvm/libkvm.h
@@ -55,7 +55,7 @@
 	/// generic memory writes to unmapped memory (For MMIO devices)
     int (*mmio_write)(void *opaque, uint64_t addr, uint8_t *data,
 					int len);
-    int (*debug)(void *opaque, int vcpu);
+    int (*debug)(void *opaque, void *env);
 	/*!
 	 * \brief Called when the VCPU issues an 'hlt' instruction.
 	 *
@@ -63,7 +63,7 @@
 	 * on the host CPU.
 	 */
     int (*halt)(void *opaque, int vcpu);
-    int (*shutdown)(void *opaque, int vcpu);
+    int (*shutdown)(void *opaque, void *env);
     int (*io_window)(void *opaque);
     int (*try_push_interrupts)(void *opaque);
     int (*try_push_nmi)(void *opaque);
@@ -181,7 +181,7 @@
  * return except for when an error has occured, or when you have sent it
  * an EINTR signal.
  */
-int kvm_run(kvm_context_t kvm, int vcpu);
+int kvm_run(kvm_context_t kvm, int vcpu, void *env);
 
 /*!
  * \brief Get interrupt flag from on last exit to userspace
Index: kvm-userspace.git/qemu/qemu-kvm-vcpu.h
===================================================================
--- /dev/null
+++ kvm-userspace.git/qemu/qemu-kvm-vcpu.h
@@ -0,0 +1,34 @@
+/*
+ * qemu/kvm vcpu definitions
+ *
+ * Copyright (C) 2006-2008 Qumranet Technologies
+ *
+ * Licensed under the terms of the GNU GPL version 2 or higher.
+ */
+#ifndef QEMU_KVM_VCPU_H
+#define QEMU_KVM_VCPU_H
+
+#include <pthread.h>
+
+struct qemu_kvm_work_item {
+    struct qemu_kvm_work_item *next;
+    void (*func)(void *data);
+    void *data;
+    int done;
+};
+
+/*
+ * KVM vcpu struct
+ */
+struct vcpu_info {
+    int sipi_needed;
+    int init;
+    pthread_t thread;
+    int signalled;
+    int stop;
+    int stopped;
+    int created;
+    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
+};
+
+#endif
Index: kvm-userspace.git/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.c
+++ kvm-userspace.git/qemu/qemu-kvm.c
@@ -22,13 +22,13 @@
 #include "compatfd.h"
 
 #include "qemu-kvm.h"
+#include "qemu-kvm-vcpu.h"
 #include <libkvm.h>
 #include <pthread.h>
 #include <sys/utsname.h>
 #include <sys/syscall.h>
 #include <sys/mman.h>
 
-#define bool _Bool
 #define false 0
 #define true 1
 
@@ -43,31 +43,12 @@
 pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
-__thread struct vcpu_info *vcpu;
+__thread struct CPUState *current_env;
 
 static int qemu_system_ready;
 
 #define SIG_IPI (SIGRTMIN+4)
 
-struct qemu_kvm_work_item {
-    struct qemu_kvm_work_item *next;
-    void (*func)(void *data);
-    void *data;
-    bool done;
-};
-
-struct vcpu_info {
-    CPUState *env;
-    int sipi_needed;
-    int init;
-    pthread_t thread;
-    int signalled;
-    int stop;
-    int stopped;
-    int created;
-    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
-} vcpu_info[256];
-
 pthread_t io_thread;
 static int io_thread_fd = -1;
 static int io_thread_sigfd = -1;
@@ -93,7 +74,20 @@
 
 CPUState *qemu_kvm_cpu_env(int index)
 {
-    return vcpu_info[index].env;
+    CPUState *penv;
+
+    if (current_env->cpu_index == index)
+        return current_env;
+
+    penv = first_cpu;
+
+    while (penv) {
+        if (penv->cpu_index == index)
+            return penv;
+        penv = (CPUState *)penv->next_cpu;
+    }
+
+    return NULL;
 }
 
 static void sig_ipi_handler(int n)
@@ -102,10 +96,10 @@
 
 static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+    struct vcpu_info *vi = &env->vcpu_info;
     struct qemu_kvm_work_item wi;
 
-    if (vi == vcpu) {
+    if (env == current_env) {
         func(data);
         return;
     }
@@ -127,7 +121,7 @@
 
 static void inject_interrupt(void *data)
 {
-    cpu_interrupt(vcpu->env, (int)data);
+    cpu_interrupt(current_env, (int)data);
 }
 
 void kvm_inject_interrupt(CPUState *env, int mask)
@@ -140,29 +134,33 @@
     int signal = 0;
 
     if (env) {
-        if (!vcpu)
+        if (current_env && !current_env->vcpu_info.created)
             signal = 1;
-        if (vcpu && env != vcpu->env && !vcpu_info[env->cpu_index].signalled)
+        /*
+         * Testing for vcpu_info.created here is really redundant
+         */
+        if (current_env && current_env->vcpu_info.created &&
+            env != current_env && !env->vcpu_info.signalled)
             signal = 1;
 
         if (signal) {
-            vcpu_info[env->cpu_index].signalled = 1;
-                if (vcpu_info[env->cpu_index].thread)
-                    pthread_kill(vcpu_info[env->cpu_index].thread, SIG_IPI);
+            env->vcpu_info.signalled = 1;
+            if (env->vcpu_info.thread)
+                pthread_kill(env->vcpu_info.thread, SIG_IPI);
         }
     }
 }
 
 void kvm_update_after_sipi(CPUState *env)
 {
-    vcpu_info[env->cpu_index].sipi_needed = 1;
+    env->vcpu_info.sipi_needed = 1;
     kvm_update_interrupt_request(env);
 }
 
 void kvm_apic_init(CPUState *env)
 {
     if (env->cpu_index != 0)
-	vcpu_info[env->cpu_index].init = 1;
+	env->vcpu_info.init = 1;
     kvm_update_interrupt_request(env);
 }
 
@@ -227,7 +225,7 @@
 {
     int r;
 
-    r = kvm_run(kvm_context, env->cpu_index);
+    r = kvm_run(kvm_context, env->cpu_index, env);
     if (r < 0) {
         printf("kvm_run returned %d\n", r);
         exit(1);
@@ -240,7 +238,7 @@
 
 static int has_work(CPUState *env)
 {
-    if (!vm_running || (env && vcpu_info[env->cpu_index].stopped))
+    if (!vm_running || (env && env->vcpu_info.stopped))
 	return 0;
     if (!env->halted)
 	return 1;
@@ -249,7 +247,7 @@
 
 static void flush_queued_work(CPUState *env)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+    struct vcpu_info *vi = &env->vcpu_info;
     struct qemu_kvm_work_item *wi;
 
     if (!vi->queued_work_first)
@@ -266,6 +264,7 @@
 
 static void kvm_main_loop_wait(CPUState *env, int timeout)
 {
+    struct vcpu_info *vi = &env->vcpu_info;
     struct timespec ts;
     int r, e;
     siginfo_t siginfo;
@@ -291,49 +290,55 @@
     cpu_single_env = env;
     flush_queued_work(env);
 
-    if (vcpu_info[env->cpu_index].stop) {
-	vcpu_info[env->cpu_index].stop = 0;
-	vcpu_info[env->cpu_index].stopped = 1;
+    if (vi->stop) {
+	vi->stop = 0;
+	vi->stopped = 1;
 	pthread_cond_signal(&qemu_pause_cond);
     }
 
-    vcpu_info[env->cpu_index].signalled = 0;
+    vi->signalled = 0;
 }
 
 static int all_threads_paused(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
+
+    while (penv) {
+        if (penv->vcpu_info.stop)
+            return 0;
+        penv = (CPUState *)penv->next_cpu;
+    }
 
-    for (i = 0; i < smp_cpus; ++i)
-	if (vcpu_info[i].stop)
-	    return 0;
     return 1;
 }
 
 static void pause_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 1;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->vcpu_info.stop = 1;
+        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
+
     while (!all_threads_paused())
 	qemu_cond_wait(&qemu_pause_cond);
 }
 
 static void resume_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 0;
-	vcpu_info[i].stopped = 0;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->vcpu_info.stop = 0;
+        penv->vcpu_info.stopped = 0;
+        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
 }
 
@@ -348,7 +353,7 @@
 static void update_regs_for_sipi(CPUState *env)
 {
     kvm_arch_update_regs_for_sipi(env);
-    vcpu_info[env->cpu_index].sipi_needed = 0;
+    env->vcpu_info.sipi_needed = 0;
 }
 
 static void update_regs_for_init(CPUState *env)
@@ -361,11 +366,11 @@
 
 #ifdef TARGET_I386
     /* restore SIPI vector */
-    if(vcpu_info[env->cpu_index].sipi_needed)
+    if(env->vcpu_info.sipi_needed)
         env->segs[R_CS] = cs;
-
-    vcpu_info[env->cpu_index].init = 0;
 #endif
+
+    env->vcpu_info.init = 0;
     kvm_arch_load_regs(env);
 }
 
@@ -387,21 +392,23 @@
 
 void qemu_kvm_system_reset(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     pause_all_threads();
 
     qemu_system_reset();
 
-    for (i = 0; i < smp_cpus; ++i)
-	kvm_arch_cpu_reset(vcpu_info[i].env);
+    while (penv) {
+        kvm_arch_cpu_reset(penv);
+        penv = (CPUState *)penv->next_cpu;
+    }
 
     resume_all_threads();
 }
 
 static int kvm_main_loop_cpu(CPUState *env)
 {
-    struct vcpu_info *info = &vcpu_info[env->cpu_index];
+    struct vcpu_info *info = &env->vcpu_info;
 
     setup_kernel_sigmask(env);
 
@@ -442,9 +449,8 @@
     CPUState *env = _env;
     sigset_t signals;
 
-    vcpu = &vcpu_info[env->cpu_index];
-    vcpu->env = env;
-    vcpu->env->thread_id = kvm_get_thread_id();
+    current_env = env;
+    env->thread_id = kvm_get_thread_id();
     sigfillset(&signals);
     sigprocmask(SIG_BLOCK, &signals, NULL);
     kvm_create_vcpu(kvm_context, env->cpu_index);
@@ -452,7 +458,7 @@
 
     /* signal VCPU creation */
     pthread_mutex_lock(&qemu_mutex);
-    vcpu->created = 1;
+    current_env->vcpu_info.created = 1;
     pthread_cond_signal(&qemu_vcpu_cond);
 
     /* and wait for machine initialization */
@@ -466,9 +472,9 @@
 
 void kvm_init_new_ap(int cpu, CPUState *env)
 {
-    pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
+    pthread_create(&env->vcpu_info.thread, NULL, ap_main_loop, env);
 
-    while (vcpu_info[cpu].created == 0)
+    while (env->vcpu_info.created == 0)
 	qemu_cond_wait(&qemu_vcpu_cond);
 }
 
@@ -624,10 +630,10 @@
     return 0;
 }
 
-static int kvm_debug(void *opaque, int vcpu)
+static int kvm_debug(void *opaque, struct CPUState *env)
 {
     kvm_debug_stop_requested = 1;
-    vcpu_info[vcpu].stopped = 1;
+    env->vcpu_info.stopped = 1;
     return 1;
 }
 
@@ -721,10 +727,10 @@
     return kvm_arch_halt(opaque, vcpu);
 }
 
-static int kvm_shutdown(void *opaque, int vcpu)
+static int kvm_shutdown(void *opaque, struct CPUState *env)
 {
     /* stop the current vcpu from going back to guest mode */
-    vcpu_info[cpu_single_env->cpu_index].stopped = 1;
+    env->vcpu_info.stopped = 1;
 
     qemu_system_reset_request();
     return 1;
Index: kvm-userspace.git/qemu/target-i386/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-i386/cpu.h
+++ kvm-userspace.git/qemu/target-i386/cpu.h
@@ -45,6 +45,7 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
 
 #define R_EAX 0
 #define R_ECX 1
@@ -622,6 +623,9 @@
 #define NR_IRQ_WORDS (256/ BITS_PER_LONG)
     uint32_t kvm_interrupt_bitmap[NR_IRQ_WORDS];
 
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
     /* in order to simplify APIC support, we leave this pointer to the
        user */
     struct APICState *apic_state;
Index: kvm-userspace.git/qemu/target-ia64/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ia64/cpu.h
+++ kvm-userspace.git/qemu/target-ia64/cpu.h
@@ -40,10 +40,15 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
+
 typedef struct CPUIA64State {
     CPU_COMMON;
     uint32_t hflags;
     int mp_state;
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
 } CPUIA64State;
 
 #define CPUState CPUIA64State
Index: kvm-userspace.git/qemu/target-ppc/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ppc/cpu.h
+++ kvm-userspace.git/qemu/target-ppc/cpu.h
@@ -22,6 +22,7 @@
 
 #include "config.h"
 #include <inttypes.h>
+#include "qemu-kvm-vcpu.h"
 
 //#define PPC_EMULATE_32BITS_HYPV
 
@@ -578,6 +579,10 @@
 
     CPU_COMMON
 
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
+
     int access_type; /* when a memory exception occurs, the access
                         type is stored here */
 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [patch] v4 - fold struct vcpu_info into CPUState
  2008-10-27 16:02             ` Hollis Blanchard
@ 2008-10-28 16:25               ` Jes Sorensen
  2008-10-29 13:01                 ` Anthony Liguori
  0 siblings, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2008-10-28 16:25 UTC (permalink / raw)
  To: Hollis Blanchard; +Cc: Glauber Costa, kvm, kvm-ia64

[-- Attachment #1: Type: text/plain, Size: 522 bytes --]

Hi,

Here's an updated version of the patch. It should fix the problems
Hollis ran into, and also compile on x86_64 again :-)

I managed to get rid of all the runtime use of qemu_kvm_cpu_env(),
except for the hotplug code. But I think it's reasonable to do the
walk of the linked list in that case. However, the more I have looked
at this, the more obvious to me it becomes that it is right<tm> to
expose struct CPUState to libkvm, and avoid passing around int vcpu.

Comments and test reports very welcome!

Cheers,
Jes


[-- Attachment #2: 0010-qemu-kvm-vcpu_info-v4.patch --]
[-- Type: text/plain, Size: 20968 bytes --]

Merge vcpu_info into CPUState.

Moves definition of vcpu related structs to new header qemu-kvm-vcpu.h
and declares this struct in i386/ia64/ppc CPUState structs if USE_KVM
is defined. In addition conver qemu-kvm.c to pull vcpu_info out of
CPUState.

This eliminates ugly static sized array of struct vcpu_info.

Signed-off-by: Jes Sorensen <jes@sgi.com>

---
 libkvm/kvm-common.h     |    8 +-
 libkvm/libkvm.c         |   28 ++++----
 libkvm/libkvm.h         |   10 +--
 qemu/hw/acpi.c          |   18 +++++
 qemu/qemu-kvm-ia64.c    |    4 -
 qemu/qemu-kvm-powerpc.c |    5 -
 qemu/qemu-kvm-vcpu.h    |   34 ++++++++++
 qemu/qemu-kvm-x86.c     |   11 +--
 qemu/qemu-kvm.c         |  151 ++++++++++++++++++++++--------------------------
 qemu/qemu-kvm.h         |    6 -
 qemu/target-i386/cpu.h  |    4 +
 qemu/target-ia64/cpu.h  |    5 +
 qemu/target-ppc/cpu.h   |    5 +
 13 files changed, 172 insertions(+), 117 deletions(-)

Index: kvm-userspace.git/libkvm/kvm-common.h
===================================================================
--- kvm-userspace.git.orig/libkvm/kvm-common.h
+++ kvm-userspace.git/libkvm/kvm-common.h
@@ -84,11 +84,11 @@
 void kvm_show_code(kvm_context_t kvm, int vcpu);
 
 int handle_halt(kvm_context_t kvm, int vcpu);
-int handle_shutdown(kvm_context_t kvm, int vcpu);
-void post_kvm_run(kvm_context_t kvm, int vcpu);
-int pre_kvm_run(kvm_context_t kvm, int vcpu);
+int handle_shutdown(kvm_context_t kvm, void *env);
+void post_kvm_run(kvm_context_t kvm, void *env);
+int pre_kvm_run(kvm_context_t kvm, void *env);
 int handle_io_window(kvm_context_t kvm);
-int handle_debug(kvm_context_t kvm, int vcpu);
+int handle_debug(kvm_context_t kvm, void *env);
 int try_push_interrupts(kvm_context_t kvm);
 
 #endif
Index: kvm-userspace.git/libkvm/libkvm.c
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.c
+++ kvm-userspace.git/libkvm/libkvm.c
@@ -738,9 +738,9 @@
 	return 0;
 }
 
-int handle_debug(kvm_context_t kvm, int vcpu)
+int handle_debug(kvm_context_t kvm, void *env)
 {
-	return kvm->callbacks->debug(kvm->opaque, vcpu);
+	return kvm->callbacks->debug(kvm->opaque, env);
 }
 
 int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
@@ -822,9 +822,9 @@
 	return kvm->callbacks->halt(kvm->opaque, vcpu);
 }
 
-int handle_shutdown(kvm_context_t kvm, int vcpu)
+int handle_shutdown(kvm_context_t kvm, void *env)
 {
-	return kvm->callbacks->shutdown(kvm->opaque, vcpu);
+	return kvm->callbacks->shutdown(kvm->opaque, env);
 }
 
 int try_push_interrupts(kvm_context_t kvm)
@@ -837,14 +837,14 @@
 	return kvm->callbacks->try_push_nmi(kvm->opaque);
 }
 
-void post_kvm_run(kvm_context_t kvm, int vcpu)
+void post_kvm_run(kvm_context_t kvm, void *env)
 {
-	kvm->callbacks->post_kvm_run(kvm->opaque, vcpu);
+	kvm->callbacks->post_kvm_run(kvm->opaque, env);
 }
 
-int pre_kvm_run(kvm_context_t kvm, int vcpu)
+int pre_kvm_run(kvm_context_t kvm, void *env)
 {
-	return kvm->callbacks->pre_kvm_run(kvm->opaque, vcpu);
+	return kvm->callbacks->pre_kvm_run(kvm->opaque, env);
 }
 
 int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
@@ -872,7 +872,7 @@
 #endif
 }
 
-int kvm_run(kvm_context_t kvm, int vcpu)
+int kvm_run(kvm_context_t kvm, int vcpu, void *env)
 {
 	int r;
 	int fd = kvm->vcpu_fd[vcpu];
@@ -886,19 +886,19 @@
 	if (!kvm->irqchip_in_kernel)
 		run->request_interrupt_window = try_push_interrupts(kvm);
 #endif
-	r = pre_kvm_run(kvm, vcpu);
+	r = pre_kvm_run(kvm, env);
 	if (r)
 	    return r;
 	r = ioctl(fd, KVM_RUN, 0);
 
 	if (r == -1 && errno != EINTR && errno != EAGAIN) {
 		r = -errno;
-		post_kvm_run(kvm, vcpu);
+		post_kvm_run(kvm, env);
 		fprintf(stderr, "kvm_run: %s\n", strerror(-r));
 		return r;
 	}
 
-	post_kvm_run(kvm, vcpu);
+	post_kvm_run(kvm, env);
 
 #if defined(KVM_CAP_COALESCED_MMIO)
 	if (kvm->coalesced_mmio) {
@@ -948,7 +948,7 @@
 			r = handle_io(kvm, run, vcpu);
 			break;
 		case KVM_EXIT_DEBUG:
-			r = handle_debug(kvm, vcpu);
+			r = handle_debug(kvm, env);
 			break;
 		case KVM_EXIT_MMIO:
 			r = handle_mmio(kvm, run);
@@ -962,7 +962,7 @@
 #endif
 			break;
 		case KVM_EXIT_SHUTDOWN:
-			r = handle_shutdown(kvm, vcpu);
+			r = handle_shutdown(kvm, env);
 			break;
 #if defined(__s390__)
 		case KVM_EXIT_S390_SIEIC:
Index: kvm-userspace.git/libkvm/libkvm.h
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.h
+++ kvm-userspace.git/libkvm/libkvm.h
@@ -55,7 +55,7 @@
 	/// generic memory writes to unmapped memory (For MMIO devices)
     int (*mmio_write)(void *opaque, uint64_t addr, uint8_t *data,
 					int len);
-    int (*debug)(void *opaque, int vcpu);
+    int (*debug)(void *opaque, void *env);
 	/*!
 	 * \brief Called when the VCPU issues an 'hlt' instruction.
 	 *
@@ -63,12 +63,12 @@
 	 * on the host CPU.
 	 */
     int (*halt)(void *opaque, int vcpu);
-    int (*shutdown)(void *opaque, int vcpu);
+    int (*shutdown)(void *opaque, void *env);
     int (*io_window)(void *opaque);
     int (*try_push_interrupts)(void *opaque);
     int (*try_push_nmi)(void *opaque);
-    void (*post_kvm_run)(void *opaque, int vcpu);
-    int (*pre_kvm_run)(void *opaque, int vcpu);
+    void (*post_kvm_run)(void *opaque, void *env);
+    int (*pre_kvm_run)(void *opaque, void *env);
     int (*tpr_access)(void *opaque, int vcpu, uint64_t rip, int is_write);
 #if defined(__powerpc__)
     int (*powerpc_dcr_read)(int vcpu, uint32_t dcrn, uint32_t *data);
@@ -181,7 +181,7 @@
  * return except for when an error has occured, or when you have sent it
  * an EINTR signal.
  */
-int kvm_run(kvm_context_t kvm, int vcpu);
+int kvm_run(kvm_context_t kvm, int vcpu, void *env);
 
 /*!
  * \brief Get interrupt flag from on last exit to userspace
Index: kvm-userspace.git/qemu/hw/acpi.c
===================================================================
--- kvm-userspace.git.orig/qemu/hw/acpi.c
+++ kvm-userspace.git/qemu/hw/acpi.c
@@ -722,6 +722,24 @@
 }
 
 #if defined(TARGET_I386) || defined(TARGET_X86_64)
+#ifdef USE_KVM
+static CPUState *qemu_kvm_cpu_env(int index)
+{
+    CPUState *penv;
+
+    penv = first_cpu;
+
+    while (penv) {
+        if (penv->cpu_index == index)
+            return penv;
+        penv = (CPUState *)penv->next_cpu;
+    }
+
+    return NULL;
+}
+#endif
+
+
 void qemu_system_cpu_hot_add(int cpu, int state)
 {
     CPUState *env;
Index: kvm-userspace.git/qemu/qemu-kvm-ia64.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-ia64.c
+++ kvm-userspace.git/qemu/qemu-kvm-ia64.c
@@ -39,11 +39,11 @@
     return 1;
 }
 
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
 {
 }
 
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
 {
 }
 
Index: kvm-userspace.git/qemu/qemu-kvm-powerpc.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-powerpc.c
+++ kvm-userspace.git/qemu/qemu-kvm-powerpc.c
@@ -142,14 +142,13 @@
     return 1;
 }
 
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
 {
 	return;
 }
 
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
 {
-    CPUState *env = qemu_kvm_cpu_env(vcpu);
     cpu_single_env = env;
 }
 
Index: kvm-userspace.git/qemu/qemu-kvm-vcpu.h
===================================================================
--- /dev/null
+++ kvm-userspace.git/qemu/qemu-kvm-vcpu.h
@@ -0,0 +1,34 @@
+/*
+ * qemu/kvm vcpu definitions
+ *
+ * Copyright (C) 2006-2008 Qumranet Technologies
+ *
+ * Licensed under the terms of the GNU GPL version 2 or higher.
+ */
+#ifndef QEMU_KVM_VCPU_H
+#define QEMU_KVM_VCPU_H
+
+#include <pthread.h>
+
+struct qemu_kvm_work_item {
+    struct qemu_kvm_work_item *next;
+    void (*func)(void *data);
+    void *data;
+    int done;
+};
+
+/*
+ * KVM vcpu struct
+ */
+struct vcpu_info {
+    int sipi_needed;
+    int init;
+    pthread_t thread;
+    int signalled;
+    int stop;
+    int stopped;
+    int created;
+    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
+};
+
+#endif
Index: kvm-userspace.git/qemu/qemu-kvm-x86.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-x86.c
+++ kvm-userspace.git/qemu/qemu-kvm-x86.c
@@ -618,17 +618,16 @@
     return 1;
 }
 
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
 {
-    CPUState *env = cpu_single_env;
-
     if (!kvm_irqchip_in_kernel(kvm_context))
-	kvm_set_cr8(kvm_context, vcpu, cpu_get_apic_tpr(env));
+	kvm_set_cr8(kvm_context, env->cpu_index, cpu_get_apic_tpr(env));
 }
 
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
 {
-    CPUState *env = qemu_kvm_cpu_env(vcpu);
+    int vcpu = env->cpu_index;
+
     cpu_single_env = env;
 
     env->eflags = kvm_get_interrupt_flag(kvm_context, vcpu)
Index: kvm-userspace.git/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.c
+++ kvm-userspace.git/qemu/qemu-kvm.c
@@ -22,13 +22,13 @@
 #include "compatfd.h"
 
 #include "qemu-kvm.h"
+#include "qemu-kvm-vcpu.h"
 #include <libkvm.h>
 #include <pthread.h>
 #include <sys/utsname.h>
 #include <sys/syscall.h>
 #include <sys/mman.h>
 
-#define bool _Bool
 #define false 0
 #define true 1
 
@@ -43,31 +43,12 @@
 pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
-__thread struct vcpu_info *vcpu;
+__thread struct CPUState *current_env;
 
 static int qemu_system_ready;
 
 #define SIG_IPI (SIGRTMIN+4)
 
-struct qemu_kvm_work_item {
-    struct qemu_kvm_work_item *next;
-    void (*func)(void *data);
-    void *data;
-    bool done;
-};
-
-struct vcpu_info {
-    CPUState *env;
-    int sipi_needed;
-    int init;
-    pthread_t thread;
-    int signalled;
-    int stop;
-    int stopped;
-    int created;
-    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
-} vcpu_info[256];
-
 pthread_t io_thread;
 static int io_thread_fd = -1;
 static int io_thread_sigfd = -1;
@@ -91,21 +72,16 @@
     cpu_single_env = env;
 }
 
-CPUState *qemu_kvm_cpu_env(int index)
-{
-    return vcpu_info[index].env;
-}
-
 static void sig_ipi_handler(int n)
 {
 }
 
 static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+    struct vcpu_info *vi = &env->vcpu_info;
     struct qemu_kvm_work_item wi;
 
-    if (vi == vcpu) {
+    if (env == current_env) {
         func(data);
         return;
     }
@@ -127,7 +103,7 @@
 
 static void inject_interrupt(void *data)
 {
-    cpu_interrupt(vcpu->env, (int)data);
+    cpu_interrupt(current_env, (int)data);
 }
 
 void kvm_inject_interrupt(CPUState *env, int mask)
@@ -140,29 +116,33 @@
     int signal = 0;
 
     if (env) {
-        if (!vcpu)
+        if (current_env && !current_env->vcpu_info.created)
             signal = 1;
-        if (vcpu && env != vcpu->env && !vcpu_info[env->cpu_index].signalled)
+        /*
+         * Testing for vcpu_info.created here is really redundant
+         */
+        if (current_env && current_env->vcpu_info.created &&
+            env != current_env && !env->vcpu_info.signalled)
             signal = 1;
 
         if (signal) {
-            vcpu_info[env->cpu_index].signalled = 1;
-                if (vcpu_info[env->cpu_index].thread)
-                    pthread_kill(vcpu_info[env->cpu_index].thread, SIG_IPI);
+            env->vcpu_info.signalled = 1;
+            if (env->vcpu_info.thread)
+                pthread_kill(env->vcpu_info.thread, SIG_IPI);
         }
     }
 }
 
 void kvm_update_after_sipi(CPUState *env)
 {
-    vcpu_info[env->cpu_index].sipi_needed = 1;
+    env->vcpu_info.sipi_needed = 1;
     kvm_update_interrupt_request(env);
 }
 
 void kvm_apic_init(CPUState *env)
 {
     if (env->cpu_index != 0)
-	vcpu_info[env->cpu_index].init = 1;
+	env->vcpu_info.init = 1;
     kvm_update_interrupt_request(env);
 }
 
@@ -178,18 +158,19 @@
     return kvm_arch_try_push_nmi(opaque);
 }
 
-static void post_kvm_run(void *opaque, int vcpu)
+static void post_kvm_run(void *opaque, void *data)
 {
+    CPUState *env = (CPUState *)data;
 
     pthread_mutex_lock(&qemu_mutex);
-    kvm_arch_post_kvm_run(opaque, vcpu);
+    kvm_arch_post_kvm_run(opaque, env);
 }
 
-static int pre_kvm_run(void *opaque, int vcpu)
+static int pre_kvm_run(void *opaque, void *data)
 {
-    CPUState *env = qemu_kvm_cpu_env(vcpu);
+    CPUState *env = (CPUState *)data;
 
-    kvm_arch_pre_kvm_run(opaque, vcpu);
+    kvm_arch_pre_kvm_run(opaque, env);
 
     if (env->interrupt_request & CPU_INTERRUPT_EXIT)
 	return 1;
@@ -227,7 +208,7 @@
 {
     int r;
 
-    r = kvm_run(kvm_context, env->cpu_index);
+    r = kvm_run(kvm_context, env->cpu_index, env);
     if (r < 0) {
         printf("kvm_run returned %d\n", r);
         exit(1);
@@ -240,7 +221,7 @@
 
 static int has_work(CPUState *env)
 {
-    if (!vm_running || (env && vcpu_info[env->cpu_index].stopped))
+    if (!vm_running || (env && env->vcpu_info.stopped))
 	return 0;
     if (!env->halted)
 	return 1;
@@ -249,7 +230,7 @@
 
 static void flush_queued_work(CPUState *env)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
+    struct vcpu_info *vi = &env->vcpu_info;
     struct qemu_kvm_work_item *wi;
 
     if (!vi->queued_work_first)
@@ -266,6 +247,7 @@
 
 static void kvm_main_loop_wait(CPUState *env, int timeout)
 {
+    struct vcpu_info *vi = &env->vcpu_info;
     struct timespec ts;
     int r, e;
     siginfo_t siginfo;
@@ -291,49 +273,55 @@
     cpu_single_env = env;
     flush_queued_work(env);
 
-    if (vcpu_info[env->cpu_index].stop) {
-	vcpu_info[env->cpu_index].stop = 0;
-	vcpu_info[env->cpu_index].stopped = 1;
+    if (vi->stop) {
+	vi->stop = 0;
+	vi->stopped = 1;
 	pthread_cond_signal(&qemu_pause_cond);
     }
 
-    vcpu_info[env->cpu_index].signalled = 0;
+    vi->signalled = 0;
 }
 
 static int all_threads_paused(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
+
+    while (penv) {
+        if (penv->vcpu_info.stop)
+            return 0;
+        penv = (CPUState *)penv->next_cpu;
+    }
 
-    for (i = 0; i < smp_cpus; ++i)
-	if (vcpu_info[i].stop)
-	    return 0;
     return 1;
 }
 
 static void pause_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 1;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->vcpu_info.stop = 1;
+        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
+
     while (!all_threads_paused())
 	qemu_cond_wait(&qemu_pause_cond);
 }
 
 static void resume_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 0;
-	vcpu_info[i].stopped = 0;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->vcpu_info.stop = 0;
+        penv->vcpu_info.stopped = 0;
+        pthread_kill(penv->vcpu_info.thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
 }
 
@@ -348,7 +336,7 @@
 static void update_regs_for_sipi(CPUState *env)
 {
     kvm_arch_update_regs_for_sipi(env);
-    vcpu_info[env->cpu_index].sipi_needed = 0;
+    env->vcpu_info.sipi_needed = 0;
 }
 
 static void update_regs_for_init(CPUState *env)
@@ -361,11 +349,11 @@
 
 #ifdef TARGET_I386
     /* restore SIPI vector */
-    if(vcpu_info[env->cpu_index].sipi_needed)
+    if(env->vcpu_info.sipi_needed)
         env->segs[R_CS] = cs;
-
-    vcpu_info[env->cpu_index].init = 0;
 #endif
+
+    env->vcpu_info.init = 0;
     kvm_arch_load_regs(env);
 }
 
@@ -387,21 +375,23 @@
 
 void qemu_kvm_system_reset(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     pause_all_threads();
 
     qemu_system_reset();
 
-    for (i = 0; i < smp_cpus; ++i)
-	kvm_arch_cpu_reset(vcpu_info[i].env);
+    while (penv) {
+        kvm_arch_cpu_reset(penv);
+        penv = (CPUState *)penv->next_cpu;
+    }
 
     resume_all_threads();
 }
 
 static int kvm_main_loop_cpu(CPUState *env)
 {
-    struct vcpu_info *info = &vcpu_info[env->cpu_index];
+    struct vcpu_info *info = &env->vcpu_info;
 
     setup_kernel_sigmask(env);
 
@@ -442,9 +432,8 @@
     CPUState *env = _env;
     sigset_t signals;
 
-    vcpu = &vcpu_info[env->cpu_index];
-    vcpu->env = env;
-    vcpu->env->thread_id = kvm_get_thread_id();
+    current_env = env;
+    env->thread_id = kvm_get_thread_id();
     sigfillset(&signals);
     sigprocmask(SIG_BLOCK, &signals, NULL);
     kvm_create_vcpu(kvm_context, env->cpu_index);
@@ -452,7 +441,7 @@
 
     /* signal VCPU creation */
     pthread_mutex_lock(&qemu_mutex);
-    vcpu->created = 1;
+    current_env->vcpu_info.created = 1;
     pthread_cond_signal(&qemu_vcpu_cond);
 
     /* and wait for machine initialization */
@@ -466,9 +455,9 @@
 
 void kvm_init_new_ap(int cpu, CPUState *env)
 {
-    pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
+    pthread_create(&env->vcpu_info.thread, NULL, ap_main_loop, env);
 
-    while (vcpu_info[cpu].created == 0)
+    while (env->vcpu_info.created == 0)
 	qemu_cond_wait(&qemu_vcpu_cond);
 }
 
@@ -624,10 +613,12 @@
     return 0;
 }
 
-static int kvm_debug(void *opaque, int vcpu)
+static int kvm_debug(void *opaque, void *data)
 {
+    struct CPUState *env = (struct CPUState *)data;
+
     kvm_debug_stop_requested = 1;
-    vcpu_info[vcpu].stopped = 1;
+    env->vcpu_info.stopped = 1;
     return 1;
 }
 
@@ -721,10 +712,12 @@
     return kvm_arch_halt(opaque, vcpu);
 }
 
-static int kvm_shutdown(void *opaque, int vcpu)
+static int kvm_shutdown(void *opaque, void *data)
 {
+    struct CPUState *env = (struct CPUState *)data;
+
     /* stop the current vcpu from going back to guest mode */
-    vcpu_info[cpu_single_env->cpu_index].stopped = 1;
+    env->vcpu_info.stopped = 1;
 
     qemu_system_reset_request();
     return 1;
Index: kvm-userspace.git/qemu/qemu-kvm.h
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.h
+++ kvm-userspace.git/qemu/qemu-kvm.h
@@ -63,16 +63,14 @@
 void kvm_arch_load_regs(CPUState *env);
 int kvm_arch_qemu_init_env(CPUState *cenv);
 int kvm_arch_halt(void *opaque, int vcpu);
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu);
-void kvm_arch_post_kvm_run(void *opaque, int vcpu);
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env);
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env);
 int kvm_arch_has_work(CPUState *env);
 int kvm_arch_try_push_interrupts(void *opaque);
 int kvm_arch_try_push_nmi(void *opaque);
 void kvm_arch_update_regs_for_sipi(CPUState *env);
 void kvm_arch_cpu_reset(CPUState *env);
 
-CPUState *qemu_kvm_cpu_env(int index);
-
 void qemu_kvm_aio_wait_start(void);
 void qemu_kvm_aio_wait(void);
 void qemu_kvm_aio_wait_end(void);
Index: kvm-userspace.git/qemu/target-i386/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-i386/cpu.h
+++ kvm-userspace.git/qemu/target-i386/cpu.h
@@ -45,6 +45,7 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
 
 #define R_EAX 0
 #define R_ECX 1
@@ -622,6 +623,9 @@
 #define NR_IRQ_WORDS (256/ BITS_PER_LONG)
     uint32_t kvm_interrupt_bitmap[NR_IRQ_WORDS];
 
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
     /* in order to simplify APIC support, we leave this pointer to the
        user */
     struct APICState *apic_state;
Index: kvm-userspace.git/qemu/target-ia64/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ia64/cpu.h
+++ kvm-userspace.git/qemu/target-ia64/cpu.h
@@ -40,10 +40,15 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+#include "qemu-kvm-vcpu.h"
+
 typedef struct CPUIA64State {
     CPU_COMMON;
     uint32_t hflags;
     int mp_state;
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
 } CPUIA64State;
 
 #define CPUState CPUIA64State
Index: kvm-userspace.git/qemu/target-ppc/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ppc/cpu.h
+++ kvm-userspace.git/qemu/target-ppc/cpu.h
@@ -22,6 +22,7 @@
 
 #include "config.h"
 #include <inttypes.h>
+#include "qemu-kvm-vcpu.h"
 
 //#define PPC_EMULATE_32BITS_HYPV
 
@@ -578,6 +579,10 @@
 
     CPU_COMMON
 
+#ifdef USE_KVM
+    struct vcpu_info vcpu_info;
+#endif
+
     int access_type; /* when a memory exception occurs, the access
                         type is stored here */
 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] v4 - fold struct vcpu_info into CPUState
  2008-10-28 16:25               ` [patch] v4 - " Jes Sorensen
@ 2008-10-29 13:01                 ` Anthony Liguori
  2008-10-29 13:04                   ` Jes Sorensen
  0 siblings, 1 reply; 15+ messages in thread
From: Anthony Liguori @ 2008-10-29 13:01 UTC (permalink / raw)
  To: Jes Sorensen; +Cc: Hollis Blanchard, Glauber Costa, kvm, kvm-ia64

Jes Sorensen wrote:
> Hi,
>
> Here's an updated version of the patch. It should fix the problems
> Hollis ran into, and also compile on x86_64 again :-)
>
> I managed to get rid of all the runtime use of qemu_kvm_cpu_env(),
> except for the hotplug code. But I think it's reasonable to do the
> walk of the linked list in that case. However, the more I have looked
> at this, the more obvious to me it becomes that it is right<tm> to
> expose struct CPUState to libkvm, and avoid passing around int vcpu.
>
> Comments and test reports very welcome!

FWIW, vcpu_info seems to have nothing to do with KVM.  It's entirely 
build around the IO thread.  The IO thread really isn't KVM specific.

I would just stick these fields in CPU_COMMON unconditionally if you're 
going to move them at all.

Regards,

Anthony Liguori

> Cheers,
> Jes
>


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] v4 - fold struct vcpu_info into CPUState
  2008-10-29 13:01                 ` Anthony Liguori
@ 2008-10-29 13:04                   ` Jes Sorensen
  2008-10-29 13:09                     ` Anthony Liguori
  0 siblings, 1 reply; 15+ messages in thread
From: Jes Sorensen @ 2008-10-29 13:04 UTC (permalink / raw)
  To: Anthony Liguori; +Cc: Hollis Blanchard, Glauber Costa, kvm, kvm-ia64

Anthony Liguori wrote:
> FWIW, vcpu_info seems to have nothing to do with KVM.  It's entirely 
> build around the IO thread.  The IO thread really isn't KVM specific.
> 
> I would just stick these fields in CPU_COMMON unconditionally if you're 
> going to move them at all.

Hi Anthony,

I am quite happy to do that, my main concern was to get rid of the
static declaration of the vcpu_info array.

Do you prefer we do it all in one go, or rather have this patch, then
another patch afterwards that moves them into CPU_COMMON? I'm good with
both, I just don't want it to get too complicated so it won't get
accepted because of that.

Cheers,
Jes

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [patch] v4 - fold struct vcpu_info into CPUState
  2008-10-29 13:04                   ` Jes Sorensen
@ 2008-10-29 13:09                     ` Anthony Liguori
  0 siblings, 0 replies; 15+ messages in thread
From: Anthony Liguori @ 2008-10-29 13:09 UTC (permalink / raw)
  To: Jes Sorensen; +Cc: Hollis Blanchard, Glauber Costa, kvm, kvm-ia64

Jes Sorensen wrote:
> Anthony Liguori wrote:
>> FWIW, vcpu_info seems to have nothing to do with KVM.  It's entirely 
>> build around the IO thread.  The IO thread really isn't KVM specific.
>>
>> I would just stick these fields in CPU_COMMON unconditionally if 
>> you're going to move them at all.
>
> Hi Anthony,
>
> I am quite happy to do that, my main concern was to get rid of the
> static declaration of the vcpu_info array.
>
> Do you prefer we do it all in one go, or rather have this patch, then
> another patch afterwards that moves them into CPU_COMMON? I'm good with
> both, I just don't want it to get too complicated so it won't get
> accepted because of that.

I'm indifferent, it's Avi's call.

Regards,

Anthony Liguori

> Cheers,
> Jes


^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2008-10-29 13:09 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-09-29 15:24 [patch] fold struct vcpu_info into CPUState Jes Sorensen
2008-10-05 10:02 ` Avi Kivity
2008-10-05 20:48   ` Glauber Costa
2008-10-13 22:24 ` Glauber Costa
2008-10-17 15:28   ` Jes Sorensen
2008-10-17 21:27     ` Glauber Costa
2008-10-24 15:57       ` Jes Sorensen
2008-10-24 19:10         ` Hollis Blanchard
2008-10-27  9:48           ` Jes Sorensen
2008-10-27 16:02             ` Hollis Blanchard
2008-10-28 16:25               ` [patch] v4 - " Jes Sorensen
2008-10-29 13:01                 ` Anthony Liguori
2008-10-29 13:04                   ` Jes Sorensen
2008-10-29 13:09                     ` Anthony Liguori
2008-10-27 16:06           ` [patch] " Jes Sorensen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).