* [PATCH] 3/5 Using kvm arch support instead of kvm_x86_ops
@ 2007-11-14 5:43 Zhang, Xiantao
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FACA-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
0 siblings, 1 reply; 10+ messages in thread
From: Zhang, Xiantao @ 2007-11-14 5:43 UTC (permalink / raw)
To: Avi Kivity
Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
carsteno-tA70FqPdS9bQT0dZR+AlfA, Hollis Blanchard
From: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
Using kvm arch support to replace kvm_x86_ops, and make them
arch-independent.
Signed-off-by: Zhang Xiantao <xiantao.zhang-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>
---
drivers/kvm/kvm.h | 18 +++++++
drivers/kvm/kvm_main.c | 78 +++++++-----------------------
drivers/kvm/x86.c | 127
++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 163 insertions(+), 60 deletions(-)
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 6498324..6c797ea 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -649,6 +649,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run);
__init void kvm_arch_init(void);
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_decache(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int
id);
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu);
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_arch_hardware_enable(void *garbage);
+void kvm_arch_hardware_disable(void *garbage);
+int kvm_arch_hardware_setup(void);
+void kvm_arch_hardware_unsetup(void);
+void kvm_arch_check_processor_compat(void *rtn);
+
+
static inline void kvm_guest_enter(void)
{
account_system_vtime(current);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 71a3b7a..6a109e8 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -124,13 +124,8 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm
*kvm, unsigned id)
mutex_init(&vcpu->mutex);
vcpu->cpu = -1;
- vcpu->mmu.root_hpa = INVALID_PAGE;
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
- if (!irqchip_in_kernel(kvm) || id == 0)
- vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
- else
- vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
init_waitqueue_head(&vcpu->wq);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -140,29 +135,11 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct
kvm *kvm, unsigned id)
}
vcpu->run = page_address(page);
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- r = -ENOMEM;
+ r = kvm_arch_vcpu_init(vcpu);
+ if (r < 0)
goto fail_free_run;
- }
- vcpu->pio_data = page_address(page);
-
- r = kvm_mmu_create(vcpu);
- if (r < 0)
- goto fail_free_pio_data;
-
- if (irqchip_in_kernel(kvm)) {
- r = kvm_create_lapic(vcpu);
- if (r < 0)
- goto fail_mmu_destroy;
- }
-
return 0;
-fail_mmu_destroy:
- kvm_mmu_destroy(vcpu);
-fail_free_pio_data:
- free_page((unsigned long)vcpu->pio_data);
fail_free_run:
free_page((unsigned long)vcpu->run);
fail:
@@ -172,9 +149,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
- kvm_free_lapic(vcpu);
- kvm_mmu_destroy(vcpu);
- free_page((unsigned long)vcpu->pio_data);
+ kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
@@ -240,7 +215,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
kvm_unload_vcpu_mmu(kvm->vcpus[i]);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
- kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
+ kvm_arch_vcpu_free(kvm->vcpus[i]);
kvm->vcpus[i] = NULL;
}
}
@@ -901,28 +876,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm, int n)
if (!valid_vcpu(n))
return -EINVAL;
- vcpu = kvm_x86_ops->vcpu_create(kvm, n);
- if (IS_ERR(vcpu))
+ vcpu = kvm_arch_vcpu_create(kvm, n);
+ if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
preempt_notifier_init(&vcpu->preempt_notifier,
&kvm_preempt_ops);
- /* We do fxsave: this must be aligned. */
- BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
-
- vcpu_load(vcpu);
- r = kvm_x86_ops->vcpu_reset(vcpu);
- if (r == 0)
- r = kvm_mmu_setup(vcpu);
- vcpu_put(vcpu);
- if (r < 0)
- goto free_vcpu;
-
mutex_lock(&kvm->lock);
if (kvm->vcpus[n]) {
r = -EEXIST;
mutex_unlock(&kvm->lock);
- goto mmu_unload;
+ goto vcpu_destroy;
}
kvm->vcpus[n] = vcpu;
mutex_unlock(&kvm->lock);
@@ -937,14 +901,8 @@ unlink:
mutex_lock(&kvm->lock);
kvm->vcpus[n] = NULL;
mutex_unlock(&kvm->lock);
-
-mmu_unload:
- vcpu_load(vcpu);
- kvm_mmu_unload(vcpu);
- vcpu_put(vcpu);
-
-free_vcpu:
- kvm_x86_ops->vcpu_free(vcpu);
+vcpu_destroy:
+ kvm_arch_vcpu_destory(vcpu);
return r;
}
@@ -1308,7 +1266,7 @@ static void decache_vcpus_on_cpu(int cpu)
*/
if (mutex_trylock(&vcpu->mutex)) {
if (vcpu->cpu == cpu) {
- kvm_x86_ops->vcpu_decache(vcpu);
+ kvm_arch_vcpu_decache(vcpu);
vcpu->cpu = -1;
}
mutex_unlock(&vcpu->mutex);
@@ -1324,7 +1282,7 @@ static void hardware_enable(void *junk)
if (cpu_isset(cpu, cpus_hardware_enabled))
return;
cpu_set(cpu, cpus_hardware_enabled);
- kvm_x86_ops->hardware_enable(NULL);
+ kvm_arch_hardware_enable(NULL);
}
static void hardware_disable(void *junk)
@@ -1335,7 +1293,7 @@ static void hardware_disable(void *junk)
return;
cpu_clear(cpu, cpus_hardware_enabled);
decache_vcpus_on_cpu(cpu);
- kvm_x86_ops->hardware_disable(NULL);
+ kvm_arch_hardware_disable(NULL);
}
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned
long val,
@@ -1501,7 +1459,7 @@ static void kvm_sched_in(struct preempt_notifier
*pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- kvm_x86_ops->vcpu_load(vcpu, cpu);
+ kvm_arch_vcpu_load(vcpu, cpu);
}
static void kvm_sched_out(struct preempt_notifier *pn,
@@ -1509,7 +1467,7 @@ static void kvm_sched_out(struct preempt_notifier
*pn,
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- kvm_x86_ops->vcpu_put(vcpu);
+ kvm_arch_vcpu_put(vcpu);
}
int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
@@ -1534,13 +1492,13 @@ int kvm_init_x86(struct kvm_x86_ops *ops,
unsigned int vcpu_size,
kvm_x86_ops = ops;
- r = kvm_x86_ops->hardware_setup();
+ r = kvm_arch_hardware_setup();
if (r < 0)
goto out;
for_each_online_cpu(cpu) {
smp_call_function_single(cpu,
-
kvm_x86_ops->check_processor_compatibility,
+ kvm_arch_check_processor_compat,
&r, 0, 1);
if (r < 0)
goto out_free_0;
@@ -1595,7 +1553,7 @@ out_free_2:
out_free_1:
on_each_cpu(hardware_disable, NULL, 0, 1);
out_free_0:
- kvm_x86_ops->hardware_unsetup();
+ kvm_arch_hardware_unsetup();
out:
kvm_x86_ops = NULL;
return r;
@@ -1611,7 +1569,7 @@ void kvm_exit_x86(void)
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
on_each_cpu(hardware_disable, NULL, 0, 1);
- kvm_x86_ops->hardware_unsetup();
+ kvm_arch_hardware_unsetup();
kvm_x86_ops = NULL;
}
EXPORT_SYMBOL_GPL(kvm_exit_x86);
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 92c0988..2e0fded 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -2320,3 +2320,130 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
fx_restore(&vcpu->host_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->vcpu_free(vcpu);
+}
+
+void kvm_arch_vcpu_decache(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->vcpu_decache(vcpu);
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ unsigned int id)
+{
+ int r;
+ struct kvm_vcpu *vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+
+ if (IS_ERR(vcpu)) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ /* We do fxsave: this must be aligned. */
+ BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+
+ vcpu_load(vcpu);
+ r = kvm_arch_vcpu_reset(vcpu);
+ if (r == 0)
+ r = kvm_mmu_setup(vcpu);
+ vcpu_put(vcpu);
+ if (r < 0)
+ goto free_vcpu;
+
+ return vcpu;
+free_vcpu:
+ kvm_x86_ops->vcpu_free(vcpu);
+fail:
+ return ERR_PTR(r);
+}
+
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu)
+{
+ vcpu_load(vcpu);
+ kvm_mmu_unload(vcpu);
+ vcpu_put(vcpu);
+
+ kvm_x86_ops->vcpu_free(vcpu);
+}
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+ return kvm_x86_ops->vcpu_reset(vcpu);
+}
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+ kvm_x86_ops->hardware_enable(garbage);
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+ kvm_x86_ops->hardware_disable(garbage);
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return kvm_x86_ops->hardware_setup();
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+ kvm_x86_ops->hardware_unsetup();
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ kvm_x86_ops->check_processor_compatibility(rtn);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ struct page *page;
+ struct kvm *kvm;
+ int r;
+
+ BUG_ON(vcpu->kvm == NULL);
+ kvm = vcpu->kvm;
+
+ vcpu->mmu.root_hpa = INVALID_PAGE;
+ if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
+ vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+ else
+ vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page) {
+ r = -ENOMEM;
+ goto fail;
+ }
+ vcpu->pio_data = page_address(page);
+
+ r = kvm_mmu_create(vcpu);
+ if (r < 0)
+ goto fail_free_pio_data;
+
+ if (irqchip_in_kernel(kvm)) {
+ r = kvm_create_lapic(vcpu);
+ if (r < 0)
+ goto fail_mmu_destroy;
+ }
+
+ return 0;
+
+fail_mmu_destroy:
+ kvm_mmu_destroy(vcpu);
+fail_free_pio_data:
+ free_page((unsigned long)vcpu->pio_data);
+fail:
+ return r;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ kvm_free_lapic(vcpu);
+ kvm_mmu_destroy(vcpu);
+ free_page((unsigned long)vcpu->pio_data);
+}
--
1.5.1.2
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH] 3/5 Using kvm arch support instead of kvm_x86_ops
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FACA-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-11-14 11:53 ` Carsten Otte
[not found] ` <473AE1A9.3010501-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
0 siblings, 1 reply; 10+ messages in thread
From: Carsten Otte @ 2007-11-14 11:53 UTC (permalink / raw)
To: Zhang, Xiantao
Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f,
carsteno-tA70FqPdS9bQT0dZR+AlfA, Hollis Blanchard, Avi Kivity
I still do strongly agree with the general idea of this patch, and
most of the split comes out just right now. However, there is one
thing I'd like to pick on:
decache_vcpus_on_cpu should be an arch callback, and rather than
kvm_arch_vcpu_decache. There's no reason for s390 to grab locks and do
the arch callback in a loop. The whole thing is nop for us, thus the
whole thing should be a callback.
I'd really like to see this changed before the patch gets merged.
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] 3/5 Using kvm arch support instead ofkvm_x86_ops
[not found] ` <473AE1A9.3010501-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
@ 2007-11-14 12:35 ` Zhang, Xiantao
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FBF9-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-11-14 12:45 ` Zhang, Xiantao
2007-11-14 13:35 ` [PATCH] 3/5 Using kvm arch support instead of kvm_x86_ops Avi Kivity
2 siblings, 1 reply; 10+ messages in thread
From: Zhang, Xiantao @ 2007-11-14 12:35 UTC (permalink / raw)
To: carsteno-tA70FqPdS9bQT0dZR+AlfA
Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Avi Kivity,
Hollis Blanchard
Carsten Otte wrote:
> I still do strongly agree with the general idea of this patch, and
> most of the split comes out just right now. However, there is one
> thing I'd like to pick on:
>
> decache_vcpus_on_cpu should be an arch callback, and rather than
> kvm_arch_vcpu_decache. There's no reason for s390 to grab locks and do
> the arch callback in a loop. The whole thing is nop for us, thus the
> whole thing should be a callback.
Hi Carsteno.
So we have to expose kvm_lock, and vm_list out. Is it OK?
Xiantao
> I'd really like to see this changed before the patch gets merged.
>
>
------------------------------------------------------------------------
-
> This SF.net email is sponsored by: Splunk Inc.
> Still grepping through log files to find problems? Stop.
> Now Search log events and configuration files using AJAX and a
> browser. Download your FREE copy of Splunk now >>
> http://get.splunk.com/ _______________________________________________
> kvm-devel mailing list
> kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
> https://lists.sourceforge.net/lists/listinfo/kvm-devel
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] 3/5 Using kvm arch support instead ofkvm_x86_ops
[not found] ` <473AE1A9.3010501-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
2007-11-14 12:35 ` [PATCH] 3/5 Using kvm arch support instead ofkvm_x86_ops Zhang, Xiantao
@ 2007-11-14 12:45 ` Zhang, Xiantao
2007-11-14 13:35 ` [PATCH] 3/5 Using kvm arch support instead of kvm_x86_ops Avi Kivity
2 siblings, 0 replies; 10+ messages in thread
From: Zhang, Xiantao @ 2007-11-14 12:45 UTC (permalink / raw)
To: carsteno-tA70FqPdS9bQT0dZR+AlfA, Avi Kivity
Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Hollis Blanchard
[-- Attachment #1: Type: text/plain, Size: 1598 bytes --]
IA64 doesn't need it as well. OK, keep them under arch, but we have to expose kvm_lock, and vm_list out. :)
Xiantao
-----Original Message-----
From: kvm-devel-bounces-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org [mailto:kvm-devel-bounces-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org] On Behalf Of Carsten Otte
Sent: 2007年11月14日 19:53
To: Zhang, Xiantao
Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org; carsteno-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org; Hollis Blanchard; Avi Kivity
Subject: Re: [kvm-devel] [PATCH] 3/5 Using kvm arch support instead ofkvm_x86_ops
I still do strongly agree with the general idea of this patch, and
most of the split comes out just right now. However, there is one
thing I'd like to pick on:
decache_vcpus_on_cpu should be an arch callback, and rather than
kvm_arch_vcpu_decache. There's no reason for s390 to grab locks and do
the arch callback in a loop. The whole thing is nop for us, thus the
whole thing should be a callback.
I'd really like to see this changed before the patch gets merged.
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel
[-- Attachment #2: 0005-Move-some-x86-specific-part-from-kvm_init-to-kvm_arc.patch --]
[-- Type: application/octet-stream, Size: 3848 bytes --]
From 5c474cfa69f199d0347d2791933b3a9a4f540fcd Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:40:21 +0800
Subject: [PATCH] Move-some-x86-specific-part-from-kvm_init-to-kvm_arch
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm.h | 5 +++--
drivers/kvm/kvm_main.c | 26 ++++++--------------------
drivers/kvm/x86.c | 27 ++++++++++++++++++++++++++-
3 files changed, 35 insertions(+), 23 deletions(-)
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 5e7be15..96d9c7d 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -495,7 +495,7 @@ void vcpu_put(struct kvm_vcpu *vcpu);
void decache_vcpus_on_cpu(int cpu);
-int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+int kvm_init(void *opaque, unsigned int vcpu_size,
struct module *module);
void kvm_exit(void);
@@ -649,7 +649,8 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
-__init void kvm_arch_init(void);
+int kvm_arch_init(void *opaque);
+void kvm_arch_exit(void);
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 626a8fc..2cbf662 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -1435,7 +1435,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_vcpu_put(vcpu);
}
-int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+int kvm_init(void *opaque, unsigned int vcpu_size,
struct module *module)
{
int r;
@@ -1447,7 +1447,9 @@ int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
kvm_init_debug();
- kvm_arch_init();
+ r = kvm_arch_init(opaque);
+ if (r)
+ goto out4;
bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -1456,22 +1458,6 @@ int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
goto out;
}
- if (kvm_x86_ops) {
- printk(KERN_ERR "kvm: already loaded the other module\n");
- return -EEXIST;
- }
-
- if (!ops->cpu_has_kvm_support()) {
- printk(KERN_ERR "kvm: no hardware support\n");
- return -EOPNOTSUPP;
- }
- if (ops->disabled_by_bios()) {
- printk(KERN_ERR "kvm: disabled by bios\n");
- return -EOPNOTSUPP;
- }
-
- kvm_x86_ops = ops;
-
r = kvm_arch_hardware_setup();
if (r < 0)
goto out;
@@ -1535,7 +1521,7 @@ out_free_1:
out_free_0:
kvm_arch_hardware_unsetup();
out:
- kvm_x86_ops = NULL;
+ kvm_arch_exit();
kvm_exit_debug();
kvm_mmu_module_exit();
out4:
@@ -1553,7 +1539,7 @@ void kvm_exit(void)
unregister_cpu_notifier(&kvm_cpu_notifier);
on_each_cpu(hardware_disable, NULL, 0, 1);
kvm_arch_hardware_unsetup();
- kvm_x86_ops = NULL;
+ kvm_arch_exit();
kvm_exit_debug();
__free_page(bad_page);
kvm_mmu_module_exit();
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 6f3560e..8d45920 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -1646,11 +1646,36 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
}
EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
-__init void kvm_arch_init(void)
+int kvm_arch_init(void *opaque)
{
+ struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+
kvm_init_msr_list();
+
+ if (kvm_x86_ops) {
+ printk(KERN_ERR "kvm: already loaded the other module\n");
+ return -EEXIST;
+ }
+
+ if (!ops->cpu_has_kvm_support()) {
+ printk(KERN_ERR "kvm: no hardware support\n");
+ return -EOPNOTSUPP;
+ }
+ if (ops->disabled_by_bios()) {
+ printk(KERN_ERR "kvm: disabled by bios\n");
+ return -EOPNOTSUPP;
+ }
+
+ kvm_x86_ops = ops;
+
+ return 0;
}
+void kvm_arch_exit(void)
+{
+ kvm_x86_ops = NULL;
+ }
+
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
--
1.5.1.2
[-- Attachment #3: 0001-Move-some-includes-to-x86.c-from-kvm_main.c-since-t.patch --]
[-- Type: application/octet-stream, Size: 1321 bytes --]
From 0097715864b649b83e25b818094c456fcea2e928 Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:08:51 +0800
Subject: [PATCH] Move some includes to x86.c from kvm_main.c, since the related functions have been moved to x86.c
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm_main.c | 2 --
drivers/kvm/x86.c | 2 ++
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index fb939ab..3a72f58 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -17,7 +17,6 @@
#include "kvm.h"
#include "x86.h"
-#include "x86_emulate.h"
#include "irq.h"
#include <linux/kvm.h>
@@ -44,7 +43,6 @@
#include <linux/mman.h>
#include <asm/processor.h>
-#include <asm/msr.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index aa6c3d8..2fa4f13 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -16,6 +16,7 @@
#include "kvm.h"
#include "x86.h"
+#include "x86_emulate.h"
#include "segment_descriptor.h"
#include "irq.h"
@@ -25,6 +26,7 @@
#include <linux/module.h>
#include <asm/uaccess.h>
+#include <asm/msr.h>
#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
--
1.5.1.2
[-- Attachment #4: 0002-move-kvm_x86_ops-to-x86.c.patch --]
[-- Type: application/octet-stream, Size: 1547 bytes --]
From c11651f1e1fb6b64fd865acb997bb8f1ae3e4e71 Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:09:30 +0800
Subject: [PATCH] move kvm_x86_ops to x86.c
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm_main.c | 1 -
drivers/kvm/x86.c | 2 ++
drivers/kvm/x86.h | 2 ++
3 files changed, 4 insertions(+), 1 deletions(-)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 3a72f58..71a3b7a 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -55,7 +55,6 @@ static LIST_HEAD(vm_list);
static cpumask_t cpus_hardware_enabled;
-struct kvm_x86_ops *kvm_x86_ops;
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 2fa4f13..92c0988 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -44,6 +44,8 @@
#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
+struct kvm_x86_ops *kvm_x86_ops;
+
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", STAT_OFFSET(pf_fixed) },
{ "pf_guest", STAT_OFFSET(pf_guest) },
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 663b822..ec32c26 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -85,6 +85,8 @@ struct kvm_vcpu {
struct x86_emulate_ctxt emulate_ctxt;
};
+extern struct kvm_x86_ops *kvm_x86_ops;
+
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
--
1.5.1.2
[-- Attachment #5: 0003-Using-kvm-arch-support-to-replace-kvm_x86_ops-and-f.patch --]
[-- Type: application/octet-stream, Size: 11933 bytes --]
From 7f077b4965a153a8e3321fe6f1a438e193088500 Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:38:21 +0800
Subject: [PATCH] Using kvm arch support to replace kvm_x86_ops, and further make kvm_main.c arch-independent.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm.h | 19 ++++++
drivers/kvm/kvm_main.c | 113 ++++++-----------------------------
drivers/kvm/x86.c | 157 ++++++++++++++++++++++++++++++++++++++++++++++++
drivers/kvm/x86.h | 3 +
4 files changed, 197 insertions(+), 95 deletions(-)
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 6498324..bca07c6 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -492,6 +492,8 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
+void decache_vcpus_on_cpu(int cpu);
+
int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
struct module *module);
@@ -649,6 +651,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
__init void kvm_arch_init(void);
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu);
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_arch_hardware_enable(void *garbage);
+void kvm_arch_hardware_disable(void *garbage);
+int kvm_arch_hardware_setup(void);
+void kvm_arch_hardware_unsetup(void);
+void kvm_arch_check_processor_compat(void *rtn);
+
+
static inline void kvm_guest_enter(void)
{
account_system_vtime(current);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 71a3b7a..1c7689b 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -50,8 +50,8 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-static DEFINE_SPINLOCK(kvm_lock);
-static LIST_HEAD(vm_list);
+DEFINE_SPINLOCK(kvm_lock);
+LIST_HEAD(vm_list);
static cpumask_t cpus_hardware_enabled;
@@ -124,13 +124,8 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
mutex_init(&vcpu->mutex);
vcpu->cpu = -1;
- vcpu->mmu.root_hpa = INVALID_PAGE;
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
- if (!irqchip_in_kernel(kvm) || id == 0)
- vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
- else
- vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
init_waitqueue_head(&vcpu->wq);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -140,29 +135,11 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
}
vcpu->run = page_address(page);
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- r = -ENOMEM;
- goto fail_free_run;
- }
- vcpu->pio_data = page_address(page);
-
- r = kvm_mmu_create(vcpu);
+ r = kvm_arch_vcpu_init(vcpu);
if (r < 0)
- goto fail_free_pio_data;
-
- if (irqchip_in_kernel(kvm)) {
- r = kvm_create_lapic(vcpu);
- if (r < 0)
- goto fail_mmu_destroy;
- }
-
+ goto fail_free_run;
return 0;
-fail_mmu_destroy:
- kvm_mmu_destroy(vcpu);
-fail_free_pio_data:
- free_page((unsigned long)vcpu->pio_data);
fail_free_run:
free_page((unsigned long)vcpu->run);
fail:
@@ -172,9 +149,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
- kvm_free_lapic(vcpu);
- kvm_mmu_destroy(vcpu);
- free_page((unsigned long)vcpu->pio_data);
+ kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
@@ -240,7 +215,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
kvm_unload_vcpu_mmu(kvm->vcpus[i]);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
- kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
+ kvm_arch_vcpu_free(kvm->vcpus[i]);
kvm->vcpus[i] = NULL;
}
}
@@ -901,28 +876,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
if (!valid_vcpu(n))
return -EINVAL;
- vcpu = kvm_x86_ops->vcpu_create(kvm, n);
+ vcpu = kvm_arch_vcpu_create(kvm, n);
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
- /* We do fxsave: this must be aligned. */
- BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
-
- vcpu_load(vcpu);
- r = kvm_x86_ops->vcpu_reset(vcpu);
- if (r == 0)
- r = kvm_mmu_setup(vcpu);
- vcpu_put(vcpu);
- if (r < 0)
- goto free_vcpu;
-
mutex_lock(&kvm->lock);
if (kvm->vcpus[n]) {
r = -EEXIST;
mutex_unlock(&kvm->lock);
- goto mmu_unload;
+ goto vcpu_destroy;
}
kvm->vcpus[n] = vcpu;
mutex_unlock(&kvm->lock);
@@ -937,14 +901,8 @@ unlink:
mutex_lock(&kvm->lock);
kvm->vcpus[n] = NULL;
mutex_unlock(&kvm->lock);
-
-mmu_unload:
- vcpu_load(vcpu);
- kvm_mmu_unload(vcpu);
- vcpu_put(vcpu);
-
-free_vcpu:
- kvm_x86_ops->vcpu_free(vcpu);
+vcpu_destroy:
+ kvm_arch_vcpu_destory(vcpu);
return r;
}
@@ -1282,41 +1240,6 @@ static struct miscdevice kvm_dev = {
&kvm_chardev_ops,
};
-/*
- * Make sure that a cpu that is being hot-unplugged does not have any vcpus
- * cached on it.
- */
-static void decache_vcpus_on_cpu(int cpu)
-{
- struct kvm *vm;
- struct kvm_vcpu *vcpu;
- int i;
-
- spin_lock(&kvm_lock);
- list_for_each_entry(vm, &vm_list, vm_list)
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = vm->vcpus[i];
- if (!vcpu)
- continue;
- /*
- * If the vcpu is locked, then it is running on some
- * other cpu and therefore it is not cached on the
- * cpu in question.
- *
- * If it's not locked, check the last cpu it executed
- * on.
- */
- if (mutex_trylock(&vcpu->mutex)) {
- if (vcpu->cpu == cpu) {
- kvm_x86_ops->vcpu_decache(vcpu);
- vcpu->cpu = -1;
- }
- mutex_unlock(&vcpu->mutex);
- }
- }
- spin_unlock(&kvm_lock);
-}
-
static void hardware_enable(void *junk)
{
int cpu = raw_smp_processor_id();
@@ -1324,7 +1247,7 @@ static void hardware_enable(void *junk)
if (cpu_isset(cpu, cpus_hardware_enabled))
return;
cpu_set(cpu, cpus_hardware_enabled);
- kvm_x86_ops->hardware_enable(NULL);
+ kvm_arch_hardware_enable(NULL);
}
static void hardware_disable(void *junk)
@@ -1335,7 +1258,7 @@ static void hardware_disable(void *junk)
return;
cpu_clear(cpu, cpus_hardware_enabled);
decache_vcpus_on_cpu(cpu);
- kvm_x86_ops->hardware_disable(NULL);
+ kvm_arch_hardware_disable(NULL);
}
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
@@ -1501,7 +1424,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- kvm_x86_ops->vcpu_load(vcpu, cpu);
+ kvm_arch_vcpu_load(vcpu, cpu);
}
static void kvm_sched_out(struct preempt_notifier *pn,
@@ -1509,7 +1432,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- kvm_x86_ops->vcpu_put(vcpu);
+ kvm_arch_vcpu_put(vcpu);
}
int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
@@ -1534,13 +1457,13 @@ int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
kvm_x86_ops = ops;
- r = kvm_x86_ops->hardware_setup();
+ r = kvm_arch_hardware_setup();
if (r < 0)
goto out;
for_each_online_cpu(cpu) {
smp_call_function_single(cpu,
- kvm_x86_ops->check_processor_compatibility,
+ kvm_arch_check_processor_compat,
&r, 0, 1);
if (r < 0)
goto out_free_0;
@@ -1595,7 +1518,7 @@ out_free_2:
out_free_1:
on_each_cpu(hardware_disable, NULL, 0, 1);
out_free_0:
- kvm_x86_ops->hardware_unsetup();
+ kvm_arch_hardware_unsetup();
out:
kvm_x86_ops = NULL;
return r;
@@ -1611,7 +1534,7 @@ void kvm_exit_x86(void)
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
on_each_cpu(hardware_disable, NULL, 0, 1);
- kvm_x86_ops->hardware_unsetup();
+ kvm_arch_hardware_unsetup();
kvm_x86_ops = NULL;
}
EXPORT_SYMBOL_GPL(kvm_exit_x86);
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 92c0988..6f3560e 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -564,6 +564,41 @@ out:
return r;
}
+/*
+ * Make sure that a cpu that is being hot-unplugged does not have any vcpus
+ * cached on it.
+ */
+void decache_vcpus_on_cpu(int cpu)
+{
+ struct kvm *vm;
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ spin_lock(&kvm_lock);
+ list_for_each_entry(vm, &vm_list, vm_list)
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ vcpu = vm->vcpus[i];
+ if (!vcpu)
+ continue;
+ /*
+ * If the vcpu is locked, then it is running on some
+ * other cpu and therefore it is not cached on the
+ * cpu in question.
+ *
+ * If it's not locked, check the last cpu it executed
+ * on.
+ */
+ if (mutex_trylock(&vcpu->mutex)) {
+ if (vcpu->cpu == cpu) {
+ kvm_x86_ops->vcpu_decache(vcpu);
+ vcpu->cpu = -1;
+ }
+ mutex_unlock(&vcpu->mutex);
+ }
+ }
+ spin_unlock(&kvm_lock);
+}
+
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2320,3 +2355,125 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
fx_restore(&vcpu->host_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->vcpu_free(vcpu);
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ unsigned int id)
+{
+ int r;
+ struct kvm_vcpu *vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+
+ if (IS_ERR(vcpu)) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ /* We do fxsave: this must be aligned. */
+ BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+
+ vcpu_load(vcpu);
+ r = kvm_arch_vcpu_reset(vcpu);
+ if (r == 0)
+ r = kvm_mmu_setup(vcpu);
+ vcpu_put(vcpu);
+ if (r < 0)
+ goto free_vcpu;
+
+ return vcpu;
+free_vcpu:
+ kvm_x86_ops->vcpu_free(vcpu);
+fail:
+ return ERR_PTR(r);
+}
+
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu)
+{
+ vcpu_load(vcpu);
+ kvm_mmu_unload(vcpu);
+ vcpu_put(vcpu);
+
+ kvm_x86_ops->vcpu_free(vcpu);
+}
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+ return kvm_x86_ops->vcpu_reset(vcpu);
+}
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+ kvm_x86_ops->hardware_enable(garbage);
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+ kvm_x86_ops->hardware_disable(garbage);
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return kvm_x86_ops->hardware_setup();
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+ kvm_x86_ops->hardware_unsetup();
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ kvm_x86_ops->check_processor_compatibility(rtn);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ struct page *page;
+ struct kvm *kvm;
+ int r;
+
+ BUG_ON(vcpu->kvm == NULL);
+ kvm = vcpu->kvm;
+
+ vcpu->mmu.root_hpa = INVALID_PAGE;
+ if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
+ vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+ else
+ vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page) {
+ r = -ENOMEM;
+ goto fail;
+ }
+ vcpu->pio_data = page_address(page);
+
+ r = kvm_mmu_create(vcpu);
+ if (r < 0)
+ goto fail_free_pio_data;
+
+ if (irqchip_in_kernel(kvm)) {
+ r = kvm_create_lapic(vcpu);
+ if (r < 0)
+ goto fail_mmu_destroy;
+ }
+
+ return 0;
+
+fail_mmu_destroy:
+ kvm_mmu_destroy(vcpu);
+fail_free_pio_data:
+ free_page((unsigned long)vcpu->pio_data);
+fail:
+ return r;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ kvm_free_lapic(vcpu);
+ kvm_mmu_destroy(vcpu);
+ free_page((unsigned long)vcpu->pio_data);
+}
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index ec32c26..4df0641 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -19,6 +19,9 @@
#include <linux/kvm.h>
#include <linux/kvm_para.h>
+extern spinlock_t kvm_lock;
+extern struct list_head vm_list;
+
struct kvm_vcpu {
KVM_VCPU_COMM;
u64 host_tsc;
--
1.5.1.2
[-- Attachment #6: 0004-Combine-kvm_init-and-kvm_init_x86-and-will-be-calle.patch --]
[-- Type: application/octet-stream, Size: 3946 bytes --]
From 0c1d201effc1b4e595406670d42b68cdcfb9b85d Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:39:31 +0800
Subject: [PATCH] Combine kvm_init and kvm_init_x86, and will be called once arch registers.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm.h | 4 +-
drivers/kvm/kvm_main.c | 61 +++++++++++++++++------------------------------
drivers/kvm/svm.c | 4 +-
drivers/kvm/vmx.c | 4 +-
4 files changed, 28 insertions(+), 45 deletions(-)
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index bca07c6..5e7be15 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -495,9 +495,9 @@ void vcpu_put(struct kvm_vcpu *vcpu);
void decache_vcpus_on_cpu(int cpu);
-int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
struct module *module);
-void kvm_exit_x86(void);
+void kvm_exit(void);
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 1c7689b..626a8fc 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -1435,12 +1435,27 @@ static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_vcpu_put(vcpu);
}
-int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
struct module *module)
{
int r;
int cpu;
+ r = kvm_mmu_module_init();
+ if (r)
+ goto out4;
+
+ kvm_init_debug();
+
+ kvm_arch_init();
+
+ bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+
+ if (bad_page == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
if (kvm_x86_ops) {
printk(KERN_ERR "kvm: already loaded the other module\n");
return -EEXIST;
@@ -1521,11 +1536,14 @@ out_free_0:
kvm_arch_hardware_unsetup();
out:
kvm_x86_ops = NULL;
+ kvm_exit_debug();
+ kvm_mmu_module_exit();
+out4:
return r;
}
-EXPORT_SYMBOL_GPL(kvm_init_x86);
+EXPORT_SYMBOL_GPL(kvm_init);
-void kvm_exit_x86(void)
+void kvm_exit(void)
{
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
@@ -1536,43 +1554,8 @@ void kvm_exit_x86(void)
on_each_cpu(hardware_disable, NULL, 0, 1);
kvm_arch_hardware_unsetup();
kvm_x86_ops = NULL;
-}
-EXPORT_SYMBOL_GPL(kvm_exit_x86);
-
-static __init int kvm_init(void)
-{
- int r;
-
- r = kvm_mmu_module_init();
- if (r)
- goto out4;
-
- kvm_init_debug();
-
- kvm_arch_init();
-
- bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-
- if (bad_page == NULL) {
- r = -ENOMEM;
- goto out;
- }
-
- return 0;
-
-out:
- kvm_exit_debug();
- kvm_mmu_module_exit();
-out4:
- return r;
-}
-
-static __exit void kvm_exit(void)
-{
kvm_exit_debug();
__free_page(bad_page);
kvm_mmu_module_exit();
}
-
-module_init(kvm_init)
-module_exit(kvm_exit)
+EXPORT_SYMBOL_GPL(kvm_exit);
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index f54b2ea..11a0010 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -1722,13 +1722,13 @@ static struct kvm_x86_ops svm_x86_ops = {
static int __init svm_init(void)
{
- return kvm_init_x86(&svm_x86_ops, sizeof(struct vcpu_svm),
+ return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
THIS_MODULE);
}
static void __exit svm_exit(void)
{
- kvm_exit_x86();
+ kvm_exit();
}
module_init(svm_init)
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 0f48d23..ecb98c1 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -2623,7 +2623,7 @@ static int __init vmx_init(void)
memset(iova, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b);
- r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r)
goto out1;
@@ -2644,7 +2644,7 @@ static void __exit vmx_exit(void)
__free_page(vmx_io_bitmap_b);
__free_page(vmx_io_bitmap_a);
- kvm_exit_x86();
+ kvm_exit();
}
module_init(vmx_init)
--
1.5.1.2
[-- Attachment #7: Type: text/plain, Size: 314 bytes --]
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
[-- Attachment #8: Type: text/plain, Size: 186 bytes --]
_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH] 3/5 Using kvm arch support instead ofkvm_x86_ops
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FBF9-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-11-14 13:04 ` Carsten Otte
0 siblings, 0 replies; 10+ messages in thread
From: Carsten Otte @ 2007-11-14 13:04 UTC (permalink / raw)
To: Zhang, Xiantao
Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Avi Kivity,
Hollis Blanchard
Zhang, Xiantao wrote:
> So we have to expose kvm_lock, and vm_list out. Is it OK?
I think that should be ok. We're within the very same module.
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] 3/5 Using kvm arch support instead of kvm_x86_ops
[not found] ` <473AE1A9.3010501-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
2007-11-14 12:35 ` [PATCH] 3/5 Using kvm arch support instead ofkvm_x86_ops Zhang, Xiantao
2007-11-14 12:45 ` Zhang, Xiantao
@ 2007-11-14 13:35 ` Avi Kivity
[not found] ` <473AF995.9000006-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2 siblings, 1 reply; 10+ messages in thread
From: Avi Kivity @ 2007-11-14 13:35 UTC (permalink / raw)
To: carsteno-tA70FqPdS9bQT0dZR+AlfA
Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Hollis Blanchard,
Zhang, Xiantao
Carsten Otte wrote:
> I still do strongly agree with the general idea of this patch, and
> most of the split comes out just right now. However, there is one
> thing I'd like to pick on:
>
> decache_vcpus_on_cpu should be an arch callback, and rather than
> kvm_arch_vcpu_decache. There's no reason for s390 to grab locks and do
> the arch callback in a loop. The whole thing is nop for us, thus the
> whole thing should be a callback.
>
> I'd really like to see this changed before the patch gets merged.
Xiantao, can you address this?
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] 3/5 Using kvm arch support instead of kvm_x86_ops
[not found] ` <473AF995.9000006-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-11-14 15:20 ` Zhang, Xiantao
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FC27-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
0 siblings, 1 reply; 10+ messages in thread
From: Zhang, Xiantao @ 2007-11-14 15:20 UTC (permalink / raw)
To: Avi Kivity, carsteno-tA70FqPdS9bQT0dZR+AlfA
Cc: kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Hollis Blanchard
[-- Attachment #1: Type: text/plain, Size: 628 bytes --]
Avi Kivity wrote:
> Carsten Otte wrote:
>> I still do strongly agree with the general idea of this patch, and
>> most of the split comes out just right now. However, there is one
>> thing I'd like to pick on:
>>
>> decache_vcpus_on_cpu should be an arch callback, and rather than
>> kvm_arch_vcpu_decache. There's no reason for s390 to grab locks and
>> do the arch callback in a loop. The whole thing is nop for us, thus
>> the whole thing should be a callback.
>>
>> I'd really like to see this changed before the patch gets merged.
>
> Xiantao, can you address this?
Yes, adressed in attached patch. :)
[-- Attachment #2: 0003-Using-kvm-arch-support-to-replace-kvm_x86_ops-and-f.patch --]
[-- Type: application/octet-stream, Size: 11933 bytes --]
From 7f077b4965a153a8e3321fe6f1a438e193088500 Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:38:21 +0800
Subject: [PATCH] Using kvm arch support to replace kvm_x86_ops, and further make kvm_main.c arch-independent.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm.h | 19 ++++++
drivers/kvm/kvm_main.c | 113 ++++++-----------------------------
drivers/kvm/x86.c | 157 ++++++++++++++++++++++++++++++++++++++++++++++++
drivers/kvm/x86.h | 3 +
4 files changed, 197 insertions(+), 95 deletions(-)
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 6498324..bca07c6 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -492,6 +492,8 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
+void decache_vcpus_on_cpu(int cpu);
+
int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
struct module *module);
@@ -649,6 +651,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
__init void kvm_arch_init(void);
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu);
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_arch_hardware_enable(void *garbage);
+void kvm_arch_hardware_disable(void *garbage);
+int kvm_arch_hardware_setup(void);
+void kvm_arch_hardware_unsetup(void);
+void kvm_arch_check_processor_compat(void *rtn);
+
+
static inline void kvm_guest_enter(void)
{
account_system_vtime(current);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 71a3b7a..1c7689b 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -50,8 +50,8 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-static DEFINE_SPINLOCK(kvm_lock);
-static LIST_HEAD(vm_list);
+DEFINE_SPINLOCK(kvm_lock);
+LIST_HEAD(vm_list);
static cpumask_t cpus_hardware_enabled;
@@ -124,13 +124,8 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
mutex_init(&vcpu->mutex);
vcpu->cpu = -1;
- vcpu->mmu.root_hpa = INVALID_PAGE;
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
- if (!irqchip_in_kernel(kvm) || id == 0)
- vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
- else
- vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
init_waitqueue_head(&vcpu->wq);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -140,29 +135,11 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
}
vcpu->run = page_address(page);
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- r = -ENOMEM;
- goto fail_free_run;
- }
- vcpu->pio_data = page_address(page);
-
- r = kvm_mmu_create(vcpu);
+ r = kvm_arch_vcpu_init(vcpu);
if (r < 0)
- goto fail_free_pio_data;
-
- if (irqchip_in_kernel(kvm)) {
- r = kvm_create_lapic(vcpu);
- if (r < 0)
- goto fail_mmu_destroy;
- }
-
+ goto fail_free_run;
return 0;
-fail_mmu_destroy:
- kvm_mmu_destroy(vcpu);
-fail_free_pio_data:
- free_page((unsigned long)vcpu->pio_data);
fail_free_run:
free_page((unsigned long)vcpu->run);
fail:
@@ -172,9 +149,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
- kvm_free_lapic(vcpu);
- kvm_mmu_destroy(vcpu);
- free_page((unsigned long)vcpu->pio_data);
+ kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
@@ -240,7 +215,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
kvm_unload_vcpu_mmu(kvm->vcpus[i]);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
- kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
+ kvm_arch_vcpu_free(kvm->vcpus[i]);
kvm->vcpus[i] = NULL;
}
}
@@ -901,28 +876,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
if (!valid_vcpu(n))
return -EINVAL;
- vcpu = kvm_x86_ops->vcpu_create(kvm, n);
+ vcpu = kvm_arch_vcpu_create(kvm, n);
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
- /* We do fxsave: this must be aligned. */
- BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
-
- vcpu_load(vcpu);
- r = kvm_x86_ops->vcpu_reset(vcpu);
- if (r == 0)
- r = kvm_mmu_setup(vcpu);
- vcpu_put(vcpu);
- if (r < 0)
- goto free_vcpu;
-
mutex_lock(&kvm->lock);
if (kvm->vcpus[n]) {
r = -EEXIST;
mutex_unlock(&kvm->lock);
- goto mmu_unload;
+ goto vcpu_destroy;
}
kvm->vcpus[n] = vcpu;
mutex_unlock(&kvm->lock);
@@ -937,14 +901,8 @@ unlink:
mutex_lock(&kvm->lock);
kvm->vcpus[n] = NULL;
mutex_unlock(&kvm->lock);
-
-mmu_unload:
- vcpu_load(vcpu);
- kvm_mmu_unload(vcpu);
- vcpu_put(vcpu);
-
-free_vcpu:
- kvm_x86_ops->vcpu_free(vcpu);
+vcpu_destroy:
+ kvm_arch_vcpu_destory(vcpu);
return r;
}
@@ -1282,41 +1240,6 @@ static struct miscdevice kvm_dev = {
&kvm_chardev_ops,
};
-/*
- * Make sure that a cpu that is being hot-unplugged does not have any vcpus
- * cached on it.
- */
-static void decache_vcpus_on_cpu(int cpu)
-{
- struct kvm *vm;
- struct kvm_vcpu *vcpu;
- int i;
-
- spin_lock(&kvm_lock);
- list_for_each_entry(vm, &vm_list, vm_list)
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = vm->vcpus[i];
- if (!vcpu)
- continue;
- /*
- * If the vcpu is locked, then it is running on some
- * other cpu and therefore it is not cached on the
- * cpu in question.
- *
- * If it's not locked, check the last cpu it executed
- * on.
- */
- if (mutex_trylock(&vcpu->mutex)) {
- if (vcpu->cpu == cpu) {
- kvm_x86_ops->vcpu_decache(vcpu);
- vcpu->cpu = -1;
- }
- mutex_unlock(&vcpu->mutex);
- }
- }
- spin_unlock(&kvm_lock);
-}
-
static void hardware_enable(void *junk)
{
int cpu = raw_smp_processor_id();
@@ -1324,7 +1247,7 @@ static void hardware_enable(void *junk)
if (cpu_isset(cpu, cpus_hardware_enabled))
return;
cpu_set(cpu, cpus_hardware_enabled);
- kvm_x86_ops->hardware_enable(NULL);
+ kvm_arch_hardware_enable(NULL);
}
static void hardware_disable(void *junk)
@@ -1335,7 +1258,7 @@ static void hardware_disable(void *junk)
return;
cpu_clear(cpu, cpus_hardware_enabled);
decache_vcpus_on_cpu(cpu);
- kvm_x86_ops->hardware_disable(NULL);
+ kvm_arch_hardware_disable(NULL);
}
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
@@ -1501,7 +1424,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- kvm_x86_ops->vcpu_load(vcpu, cpu);
+ kvm_arch_vcpu_load(vcpu, cpu);
}
static void kvm_sched_out(struct preempt_notifier *pn,
@@ -1509,7 +1432,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- kvm_x86_ops->vcpu_put(vcpu);
+ kvm_arch_vcpu_put(vcpu);
}
int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
@@ -1534,13 +1457,13 @@ int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
kvm_x86_ops = ops;
- r = kvm_x86_ops->hardware_setup();
+ r = kvm_arch_hardware_setup();
if (r < 0)
goto out;
for_each_online_cpu(cpu) {
smp_call_function_single(cpu,
- kvm_x86_ops->check_processor_compatibility,
+ kvm_arch_check_processor_compat,
&r, 0, 1);
if (r < 0)
goto out_free_0;
@@ -1595,7 +1518,7 @@ out_free_2:
out_free_1:
on_each_cpu(hardware_disable, NULL, 0, 1);
out_free_0:
- kvm_x86_ops->hardware_unsetup();
+ kvm_arch_hardware_unsetup();
out:
kvm_x86_ops = NULL;
return r;
@@ -1611,7 +1534,7 @@ void kvm_exit_x86(void)
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
on_each_cpu(hardware_disable, NULL, 0, 1);
- kvm_x86_ops->hardware_unsetup();
+ kvm_arch_hardware_unsetup();
kvm_x86_ops = NULL;
}
EXPORT_SYMBOL_GPL(kvm_exit_x86);
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 92c0988..6f3560e 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -564,6 +564,41 @@ out:
return r;
}
+/*
+ * Make sure that a cpu that is being hot-unplugged does not have any vcpus
+ * cached on it.
+ */
+void decache_vcpus_on_cpu(int cpu)
+{
+ struct kvm *vm;
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ spin_lock(&kvm_lock);
+ list_for_each_entry(vm, &vm_list, vm_list)
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ vcpu = vm->vcpus[i];
+ if (!vcpu)
+ continue;
+ /*
+ * If the vcpu is locked, then it is running on some
+ * other cpu and therefore it is not cached on the
+ * cpu in question.
+ *
+ * If it's not locked, check the last cpu it executed
+ * on.
+ */
+ if (mutex_trylock(&vcpu->mutex)) {
+ if (vcpu->cpu == cpu) {
+ kvm_x86_ops->vcpu_decache(vcpu);
+ vcpu->cpu = -1;
+ }
+ mutex_unlock(&vcpu->mutex);
+ }
+ }
+ spin_unlock(&kvm_lock);
+}
+
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2320,3 +2355,125 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
fx_restore(&vcpu->host_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->vcpu_free(vcpu);
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ unsigned int id)
+{
+ int r;
+ struct kvm_vcpu *vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+
+ if (IS_ERR(vcpu)) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ /* We do fxsave: this must be aligned. */
+ BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+
+ vcpu_load(vcpu);
+ r = kvm_arch_vcpu_reset(vcpu);
+ if (r == 0)
+ r = kvm_mmu_setup(vcpu);
+ vcpu_put(vcpu);
+ if (r < 0)
+ goto free_vcpu;
+
+ return vcpu;
+free_vcpu:
+ kvm_x86_ops->vcpu_free(vcpu);
+fail:
+ return ERR_PTR(r);
+}
+
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu)
+{
+ vcpu_load(vcpu);
+ kvm_mmu_unload(vcpu);
+ vcpu_put(vcpu);
+
+ kvm_x86_ops->vcpu_free(vcpu);
+}
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+ return kvm_x86_ops->vcpu_reset(vcpu);
+}
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+ kvm_x86_ops->hardware_enable(garbage);
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+ kvm_x86_ops->hardware_disable(garbage);
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return kvm_x86_ops->hardware_setup();
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+ kvm_x86_ops->hardware_unsetup();
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ kvm_x86_ops->check_processor_compatibility(rtn);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ struct page *page;
+ struct kvm *kvm;
+ int r;
+
+ BUG_ON(vcpu->kvm == NULL);
+ kvm = vcpu->kvm;
+
+ vcpu->mmu.root_hpa = INVALID_PAGE;
+ if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
+ vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+ else
+ vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page) {
+ r = -ENOMEM;
+ goto fail;
+ }
+ vcpu->pio_data = page_address(page);
+
+ r = kvm_mmu_create(vcpu);
+ if (r < 0)
+ goto fail_free_pio_data;
+
+ if (irqchip_in_kernel(kvm)) {
+ r = kvm_create_lapic(vcpu);
+ if (r < 0)
+ goto fail_mmu_destroy;
+ }
+
+ return 0;
+
+fail_mmu_destroy:
+ kvm_mmu_destroy(vcpu);
+fail_free_pio_data:
+ free_page((unsigned long)vcpu->pio_data);
+fail:
+ return r;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ kvm_free_lapic(vcpu);
+ kvm_mmu_destroy(vcpu);
+ free_page((unsigned long)vcpu->pio_data);
+}
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index ec32c26..4df0641 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -19,6 +19,9 @@
#include <linux/kvm.h>
#include <linux/kvm_para.h>
+extern spinlock_t kvm_lock;
+extern struct list_head vm_list;
+
struct kvm_vcpu {
KVM_VCPU_COMM;
u64 host_tsc;
--
1.5.1.2
[-- Attachment #3: Type: text/plain, Size: 314 bytes --]
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
[-- Attachment #4: Type: text/plain, Size: 186 bytes --]
_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH] 3/5 Using kvm arch support instead of kvm_x86_ops
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FC27-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-11-14 15:25 ` Avi Kivity
[not found] ` <473B1368.9000304-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
0 siblings, 1 reply; 10+ messages in thread
From: Avi Kivity @ 2007-11-14 15:25 UTC (permalink / raw)
To: Zhang, Xiantao
Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Hollis Blanchard
Zhang, Xiantao wrote:
> Avi Kivity wrote:
>
>> Carsten Otte wrote:
>>
>>> I still do strongly agree with the general idea of this patch, and
>>> most of the split comes out just right now. However, there is one
>>> thing I'd like to pick on:
>>>
>>> decache_vcpus_on_cpu should be an arch callback, and rather than
>>> kvm_arch_vcpu_decache. There's no reason for s390 to grab locks and
>>> do the arch callback in a loop. The whole thing is nop for us, thus
>>> the whole thing should be a callback.
>>>
>>> I'd really like to see this changed before the patch gets merged.
>>>
>> Xiantao, can you address this?
>>
>
> Yes, adressed in attached patch. :)
>
Just to be sure: this is a replacement for your previous #3 patch, all
the rest remain unchanged?
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] 3/5 Using kvm arch support instead ofkvm_x86_ops
[not found] ` <473B1368.9000304-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
@ 2007-11-14 15:30 ` Zhang, Xiantao
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FC29-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
0 siblings, 1 reply; 10+ messages in thread
From: Zhang, Xiantao @ 2007-11-14 15:30 UTC (permalink / raw)
To: Avi Kivity
Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Hollis Blanchard
[-- Attachment #1: Type: text/plain, Size: 1000 bytes --]
Avi Kivity wrote:
> Zhang, Xiantao wrote:
>> Avi Kivity wrote:
>>
>>> Carsten Otte wrote:
>>>
>>>> I still do strongly agree with the general idea of this patch, and
>>>> most of the split comes out just right now. However, there is one
>>>> thing I'd like to pick on:
>>>>
>>>> decache_vcpus_on_cpu should be an arch callback, and rather than
>>>> kvm_arch_vcpu_decache. There's no reason for s390 to grab locks and
>>>> do the arch callback in a loop. The whole thing is nop for us, thus
>>>> the whole thing should be a callback.
>>>>
>>>> I'd really like to see this changed before the patch gets merged.
>>>>
>>> Xiantao, can you address this?
>>>
>>
>> Yes, adressed in attached patch. :)
>>
>
> Just to be sure: this is a replacement for your previous #3 patch,
> all the rest remain unchanged?
Also work. But the last two patches will have some warnings about hunk
offset. Had better use the attached patches which have been rebased.
Thanks -Xiantao
[-- Attachment #2: 0005-Move-some-x86-specific-part-from-kvm_init-to-kvm_arc.patch --]
[-- Type: application/octet-stream, Size: 3848 bytes --]
From 5c474cfa69f199d0347d2791933b3a9a4f540fcd Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:40:21 +0800
Subject: [PATCH] Move-some-x86-specific-part-from-kvm_init-to-kvm_arch
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm.h | 5 +++--
drivers/kvm/kvm_main.c | 26 ++++++--------------------
drivers/kvm/x86.c | 27 ++++++++++++++++++++++++++-
3 files changed, 35 insertions(+), 23 deletions(-)
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 5e7be15..96d9c7d 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -495,7 +495,7 @@ void vcpu_put(struct kvm_vcpu *vcpu);
void decache_vcpus_on_cpu(int cpu);
-int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+int kvm_init(void *opaque, unsigned int vcpu_size,
struct module *module);
void kvm_exit(void);
@@ -649,7 +649,8 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
struct kvm_debug_guest *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
-__init void kvm_arch_init(void);
+int kvm_arch_init(void *opaque);
+void kvm_arch_exit(void);
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 626a8fc..2cbf662 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -1435,7 +1435,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_vcpu_put(vcpu);
}
-int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+int kvm_init(void *opaque, unsigned int vcpu_size,
struct module *module)
{
int r;
@@ -1447,7 +1447,9 @@ int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
kvm_init_debug();
- kvm_arch_init();
+ r = kvm_arch_init(opaque);
+ if (r)
+ goto out4;
bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -1456,22 +1458,6 @@ int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
goto out;
}
- if (kvm_x86_ops) {
- printk(KERN_ERR "kvm: already loaded the other module\n");
- return -EEXIST;
- }
-
- if (!ops->cpu_has_kvm_support()) {
- printk(KERN_ERR "kvm: no hardware support\n");
- return -EOPNOTSUPP;
- }
- if (ops->disabled_by_bios()) {
- printk(KERN_ERR "kvm: disabled by bios\n");
- return -EOPNOTSUPP;
- }
-
- kvm_x86_ops = ops;
-
r = kvm_arch_hardware_setup();
if (r < 0)
goto out;
@@ -1535,7 +1521,7 @@ out_free_1:
out_free_0:
kvm_arch_hardware_unsetup();
out:
- kvm_x86_ops = NULL;
+ kvm_arch_exit();
kvm_exit_debug();
kvm_mmu_module_exit();
out4:
@@ -1553,7 +1539,7 @@ void kvm_exit(void)
unregister_cpu_notifier(&kvm_cpu_notifier);
on_each_cpu(hardware_disable, NULL, 0, 1);
kvm_arch_hardware_unsetup();
- kvm_x86_ops = NULL;
+ kvm_arch_exit();
kvm_exit_debug();
__free_page(bad_page);
kvm_mmu_module_exit();
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 6f3560e..8d45920 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -1646,11 +1646,36 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
}
EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
-__init void kvm_arch_init(void)
+int kvm_arch_init(void *opaque)
{
+ struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+
kvm_init_msr_list();
+
+ if (kvm_x86_ops) {
+ printk(KERN_ERR "kvm: already loaded the other module\n");
+ return -EEXIST;
+ }
+
+ if (!ops->cpu_has_kvm_support()) {
+ printk(KERN_ERR "kvm: no hardware support\n");
+ return -EOPNOTSUPP;
+ }
+ if (ops->disabled_by_bios()) {
+ printk(KERN_ERR "kvm: disabled by bios\n");
+ return -EOPNOTSUPP;
+ }
+
+ kvm_x86_ops = ops;
+
+ return 0;
}
+void kvm_arch_exit(void)
+{
+ kvm_x86_ops = NULL;
+ }
+
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
--
1.5.1.2
[-- Attachment #3: 0001-Move-some-includes-to-x86.c-from-kvm_main.c-since-t.patch --]
[-- Type: application/octet-stream, Size: 1321 bytes --]
From 0097715864b649b83e25b818094c456fcea2e928 Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:08:51 +0800
Subject: [PATCH] Move some includes to x86.c from kvm_main.c, since the related functions have been moved to x86.c
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm_main.c | 2 --
drivers/kvm/x86.c | 2 ++
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index fb939ab..3a72f58 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -17,7 +17,6 @@
#include "kvm.h"
#include "x86.h"
-#include "x86_emulate.h"
#include "irq.h"
#include <linux/kvm.h>
@@ -44,7 +43,6 @@
#include <linux/mman.h>
#include <asm/processor.h>
-#include <asm/msr.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index aa6c3d8..2fa4f13 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -16,6 +16,7 @@
#include "kvm.h"
#include "x86.h"
+#include "x86_emulate.h"
#include "segment_descriptor.h"
#include "irq.h"
@@ -25,6 +26,7 @@
#include <linux/module.h>
#include <asm/uaccess.h>
+#include <asm/msr.h>
#define MAX_IO_MSRS 256
#define CR0_RESERVED_BITS \
--
1.5.1.2
[-- Attachment #4: 0002-move-kvm_x86_ops-to-x86.c.patch --]
[-- Type: application/octet-stream, Size: 1547 bytes --]
From c11651f1e1fb6b64fd865acb997bb8f1ae3e4e71 Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:09:30 +0800
Subject: [PATCH] move kvm_x86_ops to x86.c
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm_main.c | 1 -
drivers/kvm/x86.c | 2 ++
drivers/kvm/x86.h | 2 ++
3 files changed, 4 insertions(+), 1 deletions(-)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 3a72f58..71a3b7a 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -55,7 +55,6 @@ static LIST_HEAD(vm_list);
static cpumask_t cpus_hardware_enabled;
-struct kvm_x86_ops *kvm_x86_ops;
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 2fa4f13..92c0988 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -44,6 +44,8 @@
#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
+struct kvm_x86_ops *kvm_x86_ops;
+
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", STAT_OFFSET(pf_fixed) },
{ "pf_guest", STAT_OFFSET(pf_guest) },
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index 663b822..ec32c26 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -85,6 +85,8 @@ struct kvm_vcpu {
struct x86_emulate_ctxt emulate_ctxt;
};
+extern struct kvm_x86_ops *kvm_x86_ops;
+
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
--
1.5.1.2
[-- Attachment #5: 0003-Using-kvm-arch-support-to-replace-kvm_x86_ops-and-f.patch --]
[-- Type: application/octet-stream, Size: 11933 bytes --]
From 7f077b4965a153a8e3321fe6f1a438e193088500 Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:38:21 +0800
Subject: [PATCH] Using kvm arch support to replace kvm_x86_ops, and further make kvm_main.c arch-independent.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm.h | 19 ++++++
drivers/kvm/kvm_main.c | 113 ++++++-----------------------------
drivers/kvm/x86.c | 157 ++++++++++++++++++++++++++++++++++++++++++++++++
drivers/kvm/x86.h | 3 +
4 files changed, 197 insertions(+), 95 deletions(-)
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 6498324..bca07c6 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -492,6 +492,8 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
+void decache_vcpus_on_cpu(int cpu);
+
int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
struct module *module);
@@ -649,6 +651,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
__init void kvm_arch_init(void);
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu);
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_arch_hardware_enable(void *garbage);
+void kvm_arch_hardware_disable(void *garbage);
+int kvm_arch_hardware_setup(void);
+void kvm_arch_hardware_unsetup(void);
+void kvm_arch_check_processor_compat(void *rtn);
+
+
static inline void kvm_guest_enter(void)
{
account_system_vtime(current);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 71a3b7a..1c7689b 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -50,8 +50,8 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-static DEFINE_SPINLOCK(kvm_lock);
-static LIST_HEAD(vm_list);
+DEFINE_SPINLOCK(kvm_lock);
+LIST_HEAD(vm_list);
static cpumask_t cpus_hardware_enabled;
@@ -124,13 +124,8 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
mutex_init(&vcpu->mutex);
vcpu->cpu = -1;
- vcpu->mmu.root_hpa = INVALID_PAGE;
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
- if (!irqchip_in_kernel(kvm) || id == 0)
- vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
- else
- vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
init_waitqueue_head(&vcpu->wq);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -140,29 +135,11 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
}
vcpu->run = page_address(page);
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- r = -ENOMEM;
- goto fail_free_run;
- }
- vcpu->pio_data = page_address(page);
-
- r = kvm_mmu_create(vcpu);
+ r = kvm_arch_vcpu_init(vcpu);
if (r < 0)
- goto fail_free_pio_data;
-
- if (irqchip_in_kernel(kvm)) {
- r = kvm_create_lapic(vcpu);
- if (r < 0)
- goto fail_mmu_destroy;
- }
-
+ goto fail_free_run;
return 0;
-fail_mmu_destroy:
- kvm_mmu_destroy(vcpu);
-fail_free_pio_data:
- free_page((unsigned long)vcpu->pio_data);
fail_free_run:
free_page((unsigned long)vcpu->run);
fail:
@@ -172,9 +149,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_init);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
- kvm_free_lapic(vcpu);
- kvm_mmu_destroy(vcpu);
- free_page((unsigned long)vcpu->pio_data);
+ kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
@@ -240,7 +215,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
kvm_unload_vcpu_mmu(kvm->vcpus[i]);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
- kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
+ kvm_arch_vcpu_free(kvm->vcpus[i]);
kvm->vcpus[i] = NULL;
}
}
@@ -901,28 +876,17 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
if (!valid_vcpu(n))
return -EINVAL;
- vcpu = kvm_x86_ops->vcpu_create(kvm, n);
+ vcpu = kvm_arch_vcpu_create(kvm, n);
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
- /* We do fxsave: this must be aligned. */
- BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
-
- vcpu_load(vcpu);
- r = kvm_x86_ops->vcpu_reset(vcpu);
- if (r == 0)
- r = kvm_mmu_setup(vcpu);
- vcpu_put(vcpu);
- if (r < 0)
- goto free_vcpu;
-
mutex_lock(&kvm->lock);
if (kvm->vcpus[n]) {
r = -EEXIST;
mutex_unlock(&kvm->lock);
- goto mmu_unload;
+ goto vcpu_destroy;
}
kvm->vcpus[n] = vcpu;
mutex_unlock(&kvm->lock);
@@ -937,14 +901,8 @@ unlink:
mutex_lock(&kvm->lock);
kvm->vcpus[n] = NULL;
mutex_unlock(&kvm->lock);
-
-mmu_unload:
- vcpu_load(vcpu);
- kvm_mmu_unload(vcpu);
- vcpu_put(vcpu);
-
-free_vcpu:
- kvm_x86_ops->vcpu_free(vcpu);
+vcpu_destroy:
+ kvm_arch_vcpu_destory(vcpu);
return r;
}
@@ -1282,41 +1240,6 @@ static struct miscdevice kvm_dev = {
&kvm_chardev_ops,
};
-/*
- * Make sure that a cpu that is being hot-unplugged does not have any vcpus
- * cached on it.
- */
-static void decache_vcpus_on_cpu(int cpu)
-{
- struct kvm *vm;
- struct kvm_vcpu *vcpu;
- int i;
-
- spin_lock(&kvm_lock);
- list_for_each_entry(vm, &vm_list, vm_list)
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = vm->vcpus[i];
- if (!vcpu)
- continue;
- /*
- * If the vcpu is locked, then it is running on some
- * other cpu and therefore it is not cached on the
- * cpu in question.
- *
- * If it's not locked, check the last cpu it executed
- * on.
- */
- if (mutex_trylock(&vcpu->mutex)) {
- if (vcpu->cpu == cpu) {
- kvm_x86_ops->vcpu_decache(vcpu);
- vcpu->cpu = -1;
- }
- mutex_unlock(&vcpu->mutex);
- }
- }
- spin_unlock(&kvm_lock);
-}
-
static void hardware_enable(void *junk)
{
int cpu = raw_smp_processor_id();
@@ -1324,7 +1247,7 @@ static void hardware_enable(void *junk)
if (cpu_isset(cpu, cpus_hardware_enabled))
return;
cpu_set(cpu, cpus_hardware_enabled);
- kvm_x86_ops->hardware_enable(NULL);
+ kvm_arch_hardware_enable(NULL);
}
static void hardware_disable(void *junk)
@@ -1335,7 +1258,7 @@ static void hardware_disable(void *junk)
return;
cpu_clear(cpu, cpus_hardware_enabled);
decache_vcpus_on_cpu(cpu);
- kvm_x86_ops->hardware_disable(NULL);
+ kvm_arch_hardware_disable(NULL);
}
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
@@ -1501,7 +1424,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- kvm_x86_ops->vcpu_load(vcpu, cpu);
+ kvm_arch_vcpu_load(vcpu, cpu);
}
static void kvm_sched_out(struct preempt_notifier *pn,
@@ -1509,7 +1432,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
- kvm_x86_ops->vcpu_put(vcpu);
+ kvm_arch_vcpu_put(vcpu);
}
int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
@@ -1534,13 +1457,13 @@ int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
kvm_x86_ops = ops;
- r = kvm_x86_ops->hardware_setup();
+ r = kvm_arch_hardware_setup();
if (r < 0)
goto out;
for_each_online_cpu(cpu) {
smp_call_function_single(cpu,
- kvm_x86_ops->check_processor_compatibility,
+ kvm_arch_check_processor_compat,
&r, 0, 1);
if (r < 0)
goto out_free_0;
@@ -1595,7 +1518,7 @@ out_free_2:
out_free_1:
on_each_cpu(hardware_disable, NULL, 0, 1);
out_free_0:
- kvm_x86_ops->hardware_unsetup();
+ kvm_arch_hardware_unsetup();
out:
kvm_x86_ops = NULL;
return r;
@@ -1611,7 +1534,7 @@ void kvm_exit_x86(void)
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
on_each_cpu(hardware_disable, NULL, 0, 1);
- kvm_x86_ops->hardware_unsetup();
+ kvm_arch_hardware_unsetup();
kvm_x86_ops = NULL;
}
EXPORT_SYMBOL_GPL(kvm_exit_x86);
diff --git a/drivers/kvm/x86.c b/drivers/kvm/x86.c
index 92c0988..6f3560e 100644
--- a/drivers/kvm/x86.c
+++ b/drivers/kvm/x86.c
@@ -564,6 +564,41 @@ out:
return r;
}
+/*
+ * Make sure that a cpu that is being hot-unplugged does not have any vcpus
+ * cached on it.
+ */
+void decache_vcpus_on_cpu(int cpu)
+{
+ struct kvm *vm;
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ spin_lock(&kvm_lock);
+ list_for_each_entry(vm, &vm_list, vm_list)
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ vcpu = vm->vcpus[i];
+ if (!vcpu)
+ continue;
+ /*
+ * If the vcpu is locked, then it is running on some
+ * other cpu and therefore it is not cached on the
+ * cpu in question.
+ *
+ * If it's not locked, check the last cpu it executed
+ * on.
+ */
+ if (mutex_trylock(&vcpu->mutex)) {
+ if (vcpu->cpu == cpu) {
+ kvm_x86_ops->vcpu_decache(vcpu);
+ vcpu->cpu = -1;
+ }
+ mutex_unlock(&vcpu->mutex);
+ }
+ }
+ spin_unlock(&kvm_lock);
+}
+
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2320,3 +2355,125 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
fx_restore(&vcpu->host_fx_image);
}
EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_x86_ops->vcpu_free(vcpu);
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+ unsigned int id)
+{
+ int r;
+ struct kvm_vcpu *vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+
+ if (IS_ERR(vcpu)) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ /* We do fxsave: this must be aligned. */
+ BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
+
+ vcpu_load(vcpu);
+ r = kvm_arch_vcpu_reset(vcpu);
+ if (r == 0)
+ r = kvm_mmu_setup(vcpu);
+ vcpu_put(vcpu);
+ if (r < 0)
+ goto free_vcpu;
+
+ return vcpu;
+free_vcpu:
+ kvm_x86_ops->vcpu_free(vcpu);
+fail:
+ return ERR_PTR(r);
+}
+
+void kvm_arch_vcpu_destory(struct kvm_vcpu *vcpu)
+{
+ vcpu_load(vcpu);
+ kvm_mmu_unload(vcpu);
+ vcpu_put(vcpu);
+
+ kvm_x86_ops->vcpu_free(vcpu);
+}
+
+int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+ return kvm_x86_ops->vcpu_reset(vcpu);
+}
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+ kvm_x86_ops->hardware_enable(garbage);
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+ kvm_x86_ops->hardware_disable(garbage);
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return kvm_x86_ops->hardware_setup();
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+ kvm_x86_ops->hardware_unsetup();
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ kvm_x86_ops->check_processor_compatibility(rtn);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ struct page *page;
+ struct kvm *kvm;
+ int r;
+
+ BUG_ON(vcpu->kvm == NULL);
+ kvm = vcpu->kvm;
+
+ vcpu->mmu.root_hpa = INVALID_PAGE;
+ if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
+ vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+ else
+ vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page) {
+ r = -ENOMEM;
+ goto fail;
+ }
+ vcpu->pio_data = page_address(page);
+
+ r = kvm_mmu_create(vcpu);
+ if (r < 0)
+ goto fail_free_pio_data;
+
+ if (irqchip_in_kernel(kvm)) {
+ r = kvm_create_lapic(vcpu);
+ if (r < 0)
+ goto fail_mmu_destroy;
+ }
+
+ return 0;
+
+fail_mmu_destroy:
+ kvm_mmu_destroy(vcpu);
+fail_free_pio_data:
+ free_page((unsigned long)vcpu->pio_data);
+fail:
+ return r;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ kvm_free_lapic(vcpu);
+ kvm_mmu_destroy(vcpu);
+ free_page((unsigned long)vcpu->pio_data);
+}
diff --git a/drivers/kvm/x86.h b/drivers/kvm/x86.h
index ec32c26..4df0641 100644
--- a/drivers/kvm/x86.h
+++ b/drivers/kvm/x86.h
@@ -19,6 +19,9 @@
#include <linux/kvm.h>
#include <linux/kvm_para.h>
+extern spinlock_t kvm_lock;
+extern struct list_head vm_list;
+
struct kvm_vcpu {
KVM_VCPU_COMM;
u64 host_tsc;
--
1.5.1.2
[-- Attachment #6: 0004-Combine-kvm_init-and-kvm_init_x86-and-will-be-calle.patch --]
[-- Type: application/octet-stream, Size: 3946 bytes --]
From 0c1d201effc1b4e595406670d42b68cdcfb9b85d Mon Sep 17 00:00:00 2001
From: Zhang Xiantao <xiantao.zhang@intel.com>
Date: Wed, 14 Nov 2007 20:39:31 +0800
Subject: [PATCH] Combine kvm_init and kvm_init_x86, and will be called once arch registers.
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
---
drivers/kvm/kvm.h | 4 +-
drivers/kvm/kvm_main.c | 61 +++++++++++++++++------------------------------
drivers/kvm/svm.c | 4 +-
drivers/kvm/vmx.c | 4 +-
4 files changed, 28 insertions(+), 45 deletions(-)
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index bca07c6..5e7be15 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -495,9 +495,9 @@ void vcpu_put(struct kvm_vcpu *vcpu);
void decache_vcpus_on_cpu(int cpu);
-int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
struct module *module);
-void kvm_exit_x86(void);
+void kvm_exit(void);
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 1c7689b..626a8fc 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -1435,12 +1435,27 @@ static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_vcpu_put(vcpu);
}
-int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
+int kvm_init(struct kvm_x86_ops *ops, unsigned int vcpu_size,
struct module *module)
{
int r;
int cpu;
+ r = kvm_mmu_module_init();
+ if (r)
+ goto out4;
+
+ kvm_init_debug();
+
+ kvm_arch_init();
+
+ bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+
+ if (bad_page == NULL) {
+ r = -ENOMEM;
+ goto out;
+ }
+
if (kvm_x86_ops) {
printk(KERN_ERR "kvm: already loaded the other module\n");
return -EEXIST;
@@ -1521,11 +1536,14 @@ out_free_0:
kvm_arch_hardware_unsetup();
out:
kvm_x86_ops = NULL;
+ kvm_exit_debug();
+ kvm_mmu_module_exit();
+out4:
return r;
}
-EXPORT_SYMBOL_GPL(kvm_init_x86);
+EXPORT_SYMBOL_GPL(kvm_init);
-void kvm_exit_x86(void)
+void kvm_exit(void)
{
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
@@ -1536,43 +1554,8 @@ void kvm_exit_x86(void)
on_each_cpu(hardware_disable, NULL, 0, 1);
kvm_arch_hardware_unsetup();
kvm_x86_ops = NULL;
-}
-EXPORT_SYMBOL_GPL(kvm_exit_x86);
-
-static __init int kvm_init(void)
-{
- int r;
-
- r = kvm_mmu_module_init();
- if (r)
- goto out4;
-
- kvm_init_debug();
-
- kvm_arch_init();
-
- bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-
- if (bad_page == NULL) {
- r = -ENOMEM;
- goto out;
- }
-
- return 0;
-
-out:
- kvm_exit_debug();
- kvm_mmu_module_exit();
-out4:
- return r;
-}
-
-static __exit void kvm_exit(void)
-{
kvm_exit_debug();
__free_page(bad_page);
kvm_mmu_module_exit();
}
-
-module_init(kvm_init)
-module_exit(kvm_exit)
+EXPORT_SYMBOL_GPL(kvm_exit);
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index f54b2ea..11a0010 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -1722,13 +1722,13 @@ static struct kvm_x86_ops svm_x86_ops = {
static int __init svm_init(void)
{
- return kvm_init_x86(&svm_x86_ops, sizeof(struct vcpu_svm),
+ return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
THIS_MODULE);
}
static void __exit svm_exit(void)
{
- kvm_exit_x86();
+ kvm_exit();
}
module_init(svm_init)
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 0f48d23..ecb98c1 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -2623,7 +2623,7 @@ static int __init vmx_init(void)
memset(iova, 0xff, PAGE_SIZE);
kunmap(vmx_io_bitmap_b);
- r = kvm_init_x86(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
if (r)
goto out1;
@@ -2644,7 +2644,7 @@ static void __exit vmx_exit(void)
__free_page(vmx_io_bitmap_b);
__free_page(vmx_io_bitmap_a);
- kvm_exit_x86();
+ kvm_exit();
}
module_init(vmx_init)
--
1.5.1.2
[-- Attachment #7: Type: text/plain, Size: 314 bytes --]
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
[-- Attachment #8: Type: text/plain, Size: 186 bytes --]
_______________________________________________
kvm-devel mailing list
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f@public.gmane.org
https://lists.sourceforge.net/lists/listinfo/kvm-devel
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH] 3/5 Using kvm arch support instead ofkvm_x86_ops
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FC29-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
@ 2007-11-14 16:05 ` Avi Kivity
0 siblings, 0 replies; 10+ messages in thread
From: Avi Kivity @ 2007-11-14 16:05 UTC (permalink / raw)
To: Zhang, Xiantao
Cc: carsteno-tA70FqPdS9bQT0dZR+AlfA,
kvm-devel-5NWGOfrQmneRv+LV9MX5uipxlwaOVQ5f, Hollis Blanchard
Zhang, Xiantao wrote:
> Also work. But the last two patches will have some warnings about hunk
> offset. Had better use the attached patches which have been rebased.
> Thanks -Xiantao
>
Okay, applied the lot. I supplied signoffs from previous iterations of
this patchset, but please keep them in the future.
--
error compiling committee.c: too many arguments to function
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2007-11-14 16:05 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-11-14 5:43 [PATCH] 3/5 Using kvm arch support instead of kvm_x86_ops Zhang, Xiantao
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FACA-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-11-14 11:53 ` Carsten Otte
[not found] ` <473AE1A9.3010501-tA70FqPdS9bQT0dZR+AlfA@public.gmane.org>
2007-11-14 12:35 ` [PATCH] 3/5 Using kvm arch support instead ofkvm_x86_ops Zhang, Xiantao
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FBF9-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-11-14 13:04 ` Carsten Otte
2007-11-14 12:45 ` Zhang, Xiantao
2007-11-14 13:35 ` [PATCH] 3/5 Using kvm arch support instead of kvm_x86_ops Avi Kivity
[not found] ` <473AF995.9000006-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-11-14 15:20 ` Zhang, Xiantao
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FC27-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-11-14 15:25 ` Avi Kivity
[not found] ` <473B1368.9000304-atKUWr5tajBWk0Htik3J/w@public.gmane.org>
2007-11-14 15:30 ` [PATCH] 3/5 Using kvm arch support instead ofkvm_x86_ops Zhang, Xiantao
[not found] ` <42DFA526FC41B1429CE7279EF83C6BDC94FC29-wq7ZOvIWXbMAbVU2wMM1CrfspsVTdybXVpNB7YpNyf8@public.gmane.org>
2007-11-14 16:05 ` Avi Kivity
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox