* [PATCH RFC v3 0/4] decouple vcpu index from apic id
@ 2009-05-28 9:54 Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 1/4] Introduce kvm_vcpu_is_bsp() function Gleb Natapov
` (3 more replies)
0 siblings, 4 replies; 7+ messages in thread
From: Gleb Natapov @ 2009-05-28 9:54 UTC (permalink / raw)
To: kvm
Currently vcpu_id is used as an index into vcpus array and as apic id
on x86. This is incorrect since apic ids not have to be continuous
(they can also encode cpu hierarchy information) and may have values
bigger then vcpu array in case of x2apic. This series removes use of
vcpu_id as vcpus array index. Each architecture may use it how it sees
fit. x86 uses it as apic id.
v2:
In this version vcpus[] is managed by generic code.
v3:
New ioctl is added to specify vcpu_id of bsp vcpu. To maintain backwards
compatibility by default vcpu_id == 0 is bsp.
Gleb Natapov (4):
Introduce kvm_vcpu_is_bsp() function.
Use pointer to vcpu instead of vcpu_id in timer code.
Break dependency between vcpu index in vcpus array and vcpu_id.
Use macro to iterate over vcpus.
arch/ia64/kvm/kvm-ia64.c | 35 ++++++++++--------------
arch/ia64/kvm/vcpu.c | 2 +-
arch/powerpc/kvm/powerpc.c | 16 +++++++----
arch/s390/kvm/kvm-s390.c | 33 +++++++++++------------
arch/x86/kvm/i8254.c | 13 +++-----
arch/x86/kvm/i8259.c | 6 ++--
arch/x86/kvm/kvm_timer.h | 2 +-
arch/x86/kvm/lapic.c | 9 +++---
arch/x86/kvm/mmu.c | 6 ++--
arch/x86/kvm/svm.c | 4 +-
arch/x86/kvm/timer.c | 2 +-
arch/x86/kvm/vmx.c | 6 ++--
arch/x86/kvm/x86.c | 29 ++++++++++----------
include/linux/kvm.h | 2 +
include/linux/kvm_host.h | 18 ++++++++++++
virt/kvm/ioapic.c | 4 ++-
virt/kvm/irq_comm.c | 6 +---
virt/kvm/kvm_main.c | 63 +++++++++++++++++++++++--------------------
18 files changed, 138 insertions(+), 118 deletions(-)
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH RFC v3 1/4] Introduce kvm_vcpu_is_bsp() function.
2009-05-28 9:54 [PATCH RFC v3 0/4] decouple vcpu index from apic id Gleb Natapov
@ 2009-05-28 9:54 ` Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 2/4] Use pointer to vcpu instead of vcpu_id in timer code Gleb Natapov
` (2 subsequent siblings)
3 siblings, 0 replies; 7+ messages in thread
From: Gleb Natapov @ 2009-05-28 9:54 UTC (permalink / raw)
To: kvm
Use it instead of open code "vcpu_id zero is BSP" assumption.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
arch/ia64/kvm/kvm-ia64.c | 2 +-
arch/ia64/kvm/vcpu.c | 2 +-
arch/x86/kvm/i8254.c | 4 ++--
arch/x86/kvm/i8259.c | 6 +++---
arch/x86/kvm/lapic.c | 7 ++++---
arch/x86/kvm/svm.c | 4 ++--
arch/x86/kvm/vmx.c | 6 +++---
arch/x86/kvm/x86.c | 4 ++--
include/linux/kvm_host.h | 5 +++++
virt/kvm/ioapic.c | 4 +++-
virt/kvm/kvm_main.c | 2 ++
11 files changed, 28 insertions(+), 18 deletions(-)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 3199221..3924591 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1216,7 +1216,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
if (IS_ERR(vmm_vcpu))
return PTR_ERR(vmm_vcpu);
- if (vcpu->vcpu_id == 0) {
+ if (kvm_vcpu_is_bsp(vcpu)) {
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
/*Set entry address for first run.*/
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index a2c6c15..7e7391d 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -830,7 +830,7 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
kvm = (struct kvm *)KVM_VM_BASE;
- if (vcpu->vcpu_id == 0) {
+ if (kvm_vcpu_is_bsp(vcpu)) {
for (i = 0; i < kvm->arch.online_vcpus; i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 584e3d3..de4785a 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -228,7 +228,7 @@ int pit_has_pending_timer(struct kvm_vcpu *vcpu)
{
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
- if (pit && vcpu->vcpu_id == 0 && pit->pit_state.irq_ack)
+ if (pit && kvm_vcpu_is_bsp(vcpu) && pit->pit_state.irq_ack)
return atomic_read(&pit->pit_state.pit_timer.pending);
return 0;
}
@@ -249,7 +249,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
struct hrtimer *timer;
- if (vcpu->vcpu_id != 0 || !pit)
+ if (!kvm_vcpu_is_bsp(vcpu) || !pit)
return;
timer = &pit->pit_state.pit_timer.timer;
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 1ccb50c..6befa98 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -57,7 +57,7 @@ static void pic_unlock(struct kvm_pic *s)
}
if (wakeup) {
- vcpu = s->kvm->vcpus[0];
+ vcpu = s->kvm->bsp_vcpu;
if (vcpu)
kvm_vcpu_kick(vcpu);
}
@@ -252,7 +252,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
{
int irq, irqbase, n;
struct kvm *kvm = s->pics_state->irq_request_opaque;
- struct kvm_vcpu *vcpu0 = kvm->vcpus[0];
+ struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu;
if (s == &s->pics_state->pics[0])
irqbase = 0;
@@ -505,7 +505,7 @@ static void picdev_read(struct kvm_io_device *this,
static void pic_irq_request(void *opaque, int level)
{
struct kvm *kvm = opaque;
- struct kvm_vcpu *vcpu = kvm->vcpus[0];
+ struct kvm_vcpu *vcpu = kvm->bsp_vcpu;
struct kvm_pic *s = pic_irqchip(kvm);
int irq = pic_get_irq(&s->pics[0]);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index ae99d83..260d4fa 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -787,7 +787,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
vcpu->arch.apic_base = value;
return;
}
- if (apic->vcpu->vcpu_id)
+
+ if (!kvm_vcpu_is_bsp(apic->vcpu))
value &= ~MSR_IA32_APICBASE_BSP;
vcpu->arch.apic_base = value;
@@ -844,7 +845,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
}
update_divide_count(apic);
atomic_set(&apic->lapic_timer.pending, 0);
- if (vcpu->vcpu_id == 0)
+ if (kvm_vcpu_is_bsp(vcpu))
vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
apic_update_ppr(apic);
@@ -985,7 +986,7 @@ int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
int r = 0;
- if (vcpu->vcpu_id == 0) {
+ if (kvm_vcpu_is_bsp(vcpu)) {
if (!apic_hw_enabled(vcpu->arch.apic))
r = 1;
if ((lvt0 & APIC_LVT_MASKED) == 0 &&
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index dd667dd..8eede3f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -605,7 +605,7 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
init_vmcb(svm);
- if (vcpu->vcpu_id != 0) {
+ if (!kvm_vcpu_is_bsp(vcpu)) {
kvm_rip_write(vcpu, 0);
svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
@@ -669,7 +669,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
fx_init(&svm->vcpu);
svm->vcpu.fpu_active = 1;
svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
- if (svm->vcpu.vcpu_id == 0)
+ if (kvm_vcpu_is_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
return &svm->vcpu;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 25f1239..fda57e9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2325,7 +2325,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(&vmx->vcpu, 0);
msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
- if (vmx->vcpu.vcpu_id == 0)
+ if (kvm_vcpu_is_bsp(&vmx->vcpu))
msr |= MSR_IA32_APICBASE_BSP;
kvm_set_apic_base(&vmx->vcpu, msr);
@@ -2336,7 +2336,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
* GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
* insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
*/
- if (vmx->vcpu.vcpu_id == 0) {
+ if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
vmcs_writel(GUEST_CS_BASE, 0x000f0000);
} else {
@@ -2365,7 +2365,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_SYSENTER_EIP, 0);
vmcs_writel(GUEST_RFLAGS, 0x02);
- if (vmx->vcpu.vcpu_id == 0)
+ if (kvm_vcpu_is_bsp(&vmx->vcpu))
kvm_rip_write(vcpu, 0xfff0);
else
kvm_rip_write(vcpu, 0);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6d44dd5..2b8f533 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4286,7 +4286,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
/* Older userspace won't unhalt the vcpu on reset. */
- if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
+ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
!(vcpu->arch.cr0 & X86_CR0_PE))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -4557,7 +4557,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm = vcpu->kvm;
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
- if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
+ if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 28bd112..e9e0cd8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -130,6 +130,7 @@ struct kvm {
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
KVM_PRIVATE_MEM_SLOTS];
+ struct kvm_vcpu *bsp_vcpu;
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
struct list_head vm_list;
struct kvm_io_bus mmio_bus;
@@ -549,4 +550,8 @@ static inline void kvm_irqfd_release(struct kvm *kvm) {}
#endif /* CONFIG_HAVE_KVM_EVENTFD */
+static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->bsp_vcpu == vcpu;
+}
#endif
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 1eddae9..a8a8252 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -165,7 +165,9 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
/* Always delivery PIT interrupt to vcpu 0 */
if (irq == 0) {
irqe.dest_mode = 0; /* Physical mode. */
- irqe.dest_id = ioapic->kvm->vcpus[0]->vcpu_id;
+ /* need to read apic_id from apic regiest since
+ * it can be rewritten */
+ irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id;
}
#endif
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index de042cb..5a55fe0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1737,6 +1737,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
goto vcpu_destroy;
}
kvm->vcpus[n] = vcpu;
+ if (n == 0)
+ kvm->bsp_vcpu = vcpu;
mutex_unlock(&kvm->lock);
/* Now it's all set up, let userspace reach it */
--
1.6.2.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH RFC v3 2/4] Use pointer to vcpu instead of vcpu_id in timer code.
2009-05-28 9:54 [PATCH RFC v3 0/4] decouple vcpu index from apic id Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 1/4] Introduce kvm_vcpu_is_bsp() function Gleb Natapov
@ 2009-05-28 9:54 ` Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 3/4] Break dependency between vcpu index in vcpus array and vcpu_id Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 4/4] Use macro to iterate over vcpus Gleb Natapov
3 siblings, 0 replies; 7+ messages in thread
From: Gleb Natapov @ 2009-05-28 9:54 UTC (permalink / raw)
To: kvm
Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
arch/x86/kvm/i8254.c | 2 +-
arch/x86/kvm/kvm_timer.h | 2 +-
arch/x86/kvm/lapic.c | 2 +-
arch/x86/kvm/timer.c | 2 +-
4 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index de4785a..049cea2 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -291,7 +291,7 @@ static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
pt->timer.function = kvm_timer_fn;
pt->t_ops = &kpit_ops;
pt->kvm = ps->pit->kvm;
- pt->vcpu_id = 0;
+ pt->vcpu = pt->kvm->bsp_vcpu;
atomic_set(&pt->pending, 0);
ps->irq_ack = 1;
diff --git a/arch/x86/kvm/kvm_timer.h b/arch/x86/kvm/kvm_timer.h
index 26bd6ba..55c7524 100644
--- a/arch/x86/kvm/kvm_timer.h
+++ b/arch/x86/kvm/kvm_timer.h
@@ -6,7 +6,7 @@ struct kvm_timer {
bool reinject;
struct kvm_timer_ops *t_ops;
struct kvm *kvm;
- int vcpu_id;
+ struct kvm_vcpu *vcpu;
};
struct kvm_timer_ops {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 260d4fa..9d0608c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -946,7 +946,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
apic->lapic_timer.timer.function = kvm_timer_fn;
apic->lapic_timer.t_ops = &lapic_timer_ops;
apic->lapic_timer.kvm = vcpu->kvm;
- apic->lapic_timer.vcpu_id = vcpu->vcpu_id;
+ apic->lapic_timer.vcpu = vcpu;
apic->base_address = APIC_DEFAULT_PHYS_BASE;
vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index 86dbac0..85cc743 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -33,7 +33,7 @@ enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
struct kvm_vcpu *vcpu;
struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
- vcpu = ktimer->kvm->vcpus[ktimer->vcpu_id];
+ vcpu = ktimer->vcpu;
if (!vcpu)
return HRTIMER_NORESTART;
--
1.6.2.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH RFC v3 3/4] Break dependency between vcpu index in vcpus array and vcpu_id.
2009-05-28 9:54 [PATCH RFC v3 0/4] decouple vcpu index from apic id Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 1/4] Introduce kvm_vcpu_is_bsp() function Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 2/4] Use pointer to vcpu instead of vcpu_id in timer code Gleb Natapov
@ 2009-05-28 9:54 ` Gleb Natapov
2009-06-02 12:38 ` Marcelo Tosatti
2009-05-28 9:54 ` [PATCH RFC v3 4/4] Use macro to iterate over vcpus Gleb Natapov
3 siblings, 1 reply; 7+ messages in thread
From: Gleb Natapov @ 2009-05-28 9:54 UTC (permalink / raw)
To: kvm
Archs are free to use vcpu_id as they see fit. For x86 it is used as
vcpu's apic id. New ioctl is added to configure boot vcpu id that was
assumed to be 0 till now.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
include/linux/kvm.h | 2 +
include/linux/kvm_host.h | 2 +
virt/kvm/kvm_main.c | 50 ++++++++++++++++++++++++++-------------------
3 files changed, 33 insertions(+), 21 deletions(-)
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 632a856..d10ab5d 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -430,6 +430,7 @@ struct kvm_trace_rec {
#ifdef __KVM_HAVE_PIT
#define KVM_CAP_PIT2 33
#endif
+#define KVM_CAP_SET_BOOT_CPU_ID 34
#ifdef KVM_CAP_IRQ_ROUTING
@@ -537,6 +538,7 @@ struct kvm_irqfd {
#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
+#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
/*
* ioctls for vcpu fds
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e9e0cd8..e368a14 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -130,8 +130,10 @@ struct kvm {
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
KVM_PRIVATE_MEM_SLOTS];
+ u32 bsp_vcpu_id;
struct kvm_vcpu *bsp_vcpu;
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ atomic_t online_vcpus;
struct list_head vm_list;
struct kvm_io_bus mmio_bus;
struct kvm_io_bus pio_bus;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5a55fe0..d65c637 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -693,11 +693,6 @@ out:
}
#endif
-static inline int valid_vcpu(int n)
-{
- return likely(n >= 0 && n < KVM_MAX_VCPUS);
-}
-
inline int kvm_is_mmio_pfn(pfn_t pfn)
{
if (pfn_valid(pfn)) {
@@ -1713,15 +1708,12 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
/*
* Creates some virtual cpus. Good luck creating more than one.
*/
-static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
+static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
int r;
struct kvm_vcpu *vcpu;
- if (!valid_vcpu(n))
- return -EINVAL;
-
- vcpu = kvm_arch_vcpu_create(kvm, n);
+ vcpu = kvm_arch_vcpu_create(kvm, id);
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
@@ -1732,25 +1724,36 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
return r;
mutex_lock(&kvm->lock);
- if (kvm->vcpus[n]) {
- r = -EEXIST;
+ if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
+ r = -EINVAL;
goto vcpu_destroy;
}
- kvm->vcpus[n] = vcpu;
- if (n == 0)
- kvm->bsp_vcpu = vcpu;
- mutex_unlock(&kvm->lock);
+
+ for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
+ if (kvm->vcpus[r]->vcpu_id == id) {
+ r = -EEXIST;
+ goto vcpu_destroy;
+ }
+
+ BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
/* Now it's all set up, let userspace reach it */
kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
- if (r < 0)
- goto unlink;
+ if (r < 0) {
+ kvm_put_kvm(kvm);
+ goto vcpu_destroy;
+ }
+
+ kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+ smp_wmb();
+ atomic_inc(&kvm->online_vcpus);
+
+ if (kvm->bsp_vcpu_id == id)
+ kvm->bsp_vcpu = vcpu;
+ mutex_unlock(&kvm->lock);
return r;
-unlink:
- mutex_lock(&kvm->lock);
- kvm->vcpus[n] = NULL;
vcpu_destroy:
mutex_unlock(&kvm->lock);
kvm_arch_vcpu_destroy(vcpu);
@@ -2223,6 +2226,10 @@ static long kvm_vm_ioctl(struct file *filp,
r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
break;
}
+ case KVM_SET_BOOT_CPU_ID:
+ kvm->bsp_vcpu_id = arg;
+ r = 0;
+ break;
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}
@@ -2289,6 +2296,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
case KVM_CAP_USER_MEMORY:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
+ case KVM_CAP_SET_BOOT_CPU_ID:
return 1;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
case KVM_CAP_IRQ_ROUTING:
--
1.6.2.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH RFC v3 4/4] Use macro to iterate over vcpus.
2009-05-28 9:54 [PATCH RFC v3 0/4] decouple vcpu index from apic id Gleb Natapov
` (2 preceding siblings ...)
2009-05-28 9:54 ` [PATCH RFC v3 3/4] Break dependency between vcpu index in vcpus array and vcpu_id Gleb Natapov
@ 2009-05-28 9:54 ` Gleb Natapov
3 siblings, 0 replies; 7+ messages in thread
From: Gleb Natapov @ 2009-05-28 9:54 UTC (permalink / raw)
To: kvm
Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
arch/ia64/kvm/kvm-ia64.c | 33 ++++++++++++++-------------------
arch/powerpc/kvm/powerpc.c | 16 ++++++++++------
arch/s390/kvm/kvm-s390.c | 33 ++++++++++++++++-----------------
arch/x86/kvm/i8254.c | 7 ++-----
arch/x86/kvm/mmu.c | 6 +++---
arch/x86/kvm/x86.c | 25 ++++++++++++-------------
include/linux/kvm_host.h | 11 +++++++++++
virt/kvm/irq_comm.c | 6 ++----
virt/kvm/kvm_main.c | 19 +++++++------------
9 files changed, 77 insertions(+), 79 deletions(-)
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 3924591..c1c5cb6 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -337,13 +337,12 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
{
union ia64_lid lid;
int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
- if (kvm->vcpus[i]) {
- lid.val = VCPU_LID(kvm->vcpus[i]);
- if (lid.id == id && lid.eid == eid)
- return kvm->vcpus[i];
- }
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ lid.val = VCPU_LID(vcpu);
+ if (lid.id == id && lid.eid == eid)
+ return vcpu;
}
return NULL;
@@ -409,21 +408,21 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
struct kvm *kvm = vcpu->kvm;
struct call_data call_data;
int i;
+ struct kvm_vcpu *vcpui;
call_data.ptc_g_data = p->u.ptc_g_data;
- for (i = 0; i < kvm->arch.online_vcpus; i++) {
- if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
- KVM_MP_STATE_UNINITIALIZED ||
- vcpu == kvm->vcpus[i])
+ kvm_for_each_vcpu(i, vcpui, kvm) {
+ if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
+ vcpu == vcpui)
continue;
- if (waitqueue_active(&kvm->vcpus[i]->wq))
- wake_up_interruptible(&kvm->vcpus[i]->wq);
+ if (waitqueue_active(&vcpui->wq))
+ wake_up_interruptible(&vcpui->wq);
- if (kvm->vcpus[i]->cpu != -1) {
- call_data.vcpu = kvm->vcpus[i];
- smp_call_function_single(kvm->vcpus[i]->cpu,
+ if (vcpui->cpu != -1) {
+ call_data.vcpu = vcpui;
+ smp_call_function_single(vcpui->cpu,
vcpu_global_purge, &call_data, 1);
} else
printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
@@ -852,8 +851,6 @@ struct kvm *kvm_arch_create_vm(void)
kvm_init_vm(kvm);
- kvm->arch.online_vcpus = 0;
-
return kvm;
}
@@ -1356,8 +1353,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
goto fail;
}
- kvm->arch.online_vcpus++;
-
return vcpu;
fail:
return ERR_PTR(r);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2cf915e..7ad30e0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -122,13 +122,17 @@ struct kvm *kvm_arch_create_vm(void)
static void kvmppc_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_free(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_free(vcpu);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+
+ atomic_set(&kvm->online_vcpus, 0);
+ mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 981ab04..217d3d3 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -209,13 +209,17 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_destroy(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_destroy(vcpu);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+
+ atomic_set(&kvm->online_vcpus, 0);
+ mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
@@ -311,8 +315,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
BUG_ON(!kvm->arch.sca);
if (!kvm->arch.sca->cpu[id].sda)
kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
- else
- BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
@@ -679,7 +681,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_memory_slot old,
int user_alloc)
{
- int i;
+ int i, j = 0, r = -EINVAL;
+ struct kvm_vcpu *vcpu;
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
@@ -704,14 +707,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
return -EINVAL;
/* request update of sie control block for all available vcpus */
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- if (test_and_set_bit(KVM_REQ_MMU_RELOAD,
- &kvm->vcpus[i]->requests))
- continue;
- kvm_s390_inject_sigp_stop(kvm->vcpus[i],
- ACTION_RELOADVCPU_ON_STOP);
- }
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+ continue;
+ kvm_s390_inject_sigp_stop(vcpus, ACTION_RELOADVCPU_ON_STOP);
}
return 0;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 049cea2..ab96c9e 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -651,11 +651,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
* VCPU0, and only if its LVT0 is in EXTINT mode.
*/
if (kvm->arch.vapics_in_nmi_mode > 0)
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (vcpu)
- kvm_apic_nmi_wd_deliver(vcpu);
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_apic_nmi_wd_deliver(vcpu);
}
void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7030b5f..dac8dca 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1326,10 +1326,10 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
{
int i;
+ struct kvm_vcpu *vcpu;
- for (i = 0; i < KVM_MAX_VCPUS; ++i)
- if (kvm->vcpus[i])
- kvm->vcpus[i]->arch.last_pte_updated = NULL;
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ vcpu->arch.last_pte_updated = NULL;
}
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2b8f533..e3b3b25 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2902,10 +2902,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (!vcpu)
- continue;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
continue;
if (!kvm_request_guest_time_update(vcpu))
@@ -4634,20 +4631,22 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
+ struct kvm_vcpu *vcpu;
/*
* Unpin any mmu pages first.
*/
- for (i = 0; i < KVM_MAX_VCPUS; ++i)
- if (kvm->vcpus[i])
- kvm_unload_vcpu_mmu(kvm->vcpus[i]);
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_free(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_unload_vcpu_mmu(vcpu);
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arch_vcpu_free(vcpu);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+ atomic_set(&kvm->online_vcpus, 0);
+ mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e368a14..42080b8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -171,6 +171,17 @@ struct kvm {
#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
+static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+{
+ smp_rmb();
+ return kvm->vcpus[i];
+}
+
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
+ idx < atomic_read(&kvm->online_vcpus) && vcpup; \
+ vcpup = kvm_get_vcpu(kvm, ++idx))
+
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index a8bd466..c010ea2 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -66,10 +66,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
kvm_is_dm_lowest_prio(irq))
printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
- vcpu = kvm->vcpus[i];
-
- if (!vcpu || !kvm_apic_present(vcpu))
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!kvm_apic_present(vcpu))
continue;
if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d65c637..2a90d29 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -741,10 +741,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
cpumask_clear(cpus);
me = get_cpu();
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (!vcpu)
- continue;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(req, &vcpu->requests))
continue;
cpu = vcpu->cpu;
@@ -1711,7 +1708,7 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
int r;
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu, *v;
vcpu = kvm_arch_vcpu_create(kvm, id);
if (IS_ERR(vcpu))
@@ -1729,8 +1726,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
goto vcpu_destroy;
}
- for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
- if (kvm->vcpus[r]->vcpu_id == id) {
+ kvm_for_each_vcpu(r, v, kvm)
+ if (v->vcpu_id == id) {
r = -EEXIST;
goto vcpu_destroy;
}
@@ -2510,11 +2507,9 @@ static int vcpu_stat_get(void *_offset, u64 *val)
*val = 0;
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- vcpu = kvm->vcpus[i];
- if (vcpu)
- *val += *(u32 *)((void *)vcpu + offset);
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ *val += *(u32 *)((void *)vcpu + offset);
+
spin_unlock(&kvm_lock);
return 0;
}
--
1.6.2.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH RFC v3 3/4] Break dependency between vcpu index in vcpus array and vcpu_id.
2009-05-28 9:54 ` [PATCH RFC v3 3/4] Break dependency between vcpu index in vcpus array and vcpu_id Gleb Natapov
@ 2009-06-02 12:38 ` Marcelo Tosatti
2009-06-02 13:42 ` Gleb Natapov
0 siblings, 1 reply; 7+ messages in thread
From: Marcelo Tosatti @ 2009-06-02 12:38 UTC (permalink / raw)
To: Gleb Natapov; +Cc: kvm
On Thu, May 28, 2009 at 12:54:46PM +0300, Gleb Natapov wrote:
> Archs are free to use vcpu_id as they see fit. For x86 it is used as
> vcpu's apic id. New ioctl is added to configure boot vcpu id that was
> assumed to be 0 till now.
>
> Signed-off-by: Gleb Natapov <gleb@redhat.com>
> ---
> include/linux/kvm.h | 2 +
> include/linux/kvm_host.h | 2 +
> virt/kvm/kvm_main.c | 50 ++++++++++++++++++++++++++-------------------
> 3 files changed, 33 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/kvm.h b/include/linux/kvm.h
> index 632a856..d10ab5d 100644
> --- a/include/linux/kvm.h
> +++ b/include/linux/kvm.h
> @@ -430,6 +430,7 @@ struct kvm_trace_rec {
> #ifdef __KVM_HAVE_PIT
> #define KVM_CAP_PIT2 33
> #endif
> +#define KVM_CAP_SET_BOOT_CPU_ID 34
>
> #ifdef KVM_CAP_IRQ_ROUTING
>
> @@ -537,6 +538,7 @@ struct kvm_irqfd {
> #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
> #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
> #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
> +#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
>
> /*
> * ioctls for vcpu fds
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index e9e0cd8..e368a14 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -130,8 +130,10 @@ struct kvm {
> int nmemslots;
> struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
> KVM_PRIVATE_MEM_SLOTS];
> + u32 bsp_vcpu_id;
> struct kvm_vcpu *bsp_vcpu;
> struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
> + atomic_t online_vcpus;
> struct list_head vm_list;
> struct kvm_io_bus mmio_bus;
> struct kvm_io_bus pio_bus;
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 5a55fe0..d65c637 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -693,11 +693,6 @@ out:
> }
> #endif
>
> -static inline int valid_vcpu(int n)
> -{
> - return likely(n >= 0 && n < KVM_MAX_VCPUS);
> -}
> -
> inline int kvm_is_mmio_pfn(pfn_t pfn)
> {
> if (pfn_valid(pfn)) {
> @@ -1713,15 +1708,12 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
> /*
> * Creates some virtual cpus. Good luck creating more than one.
> */
> -static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
> +static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
> {
> int r;
> struct kvm_vcpu *vcpu;
>
> - if (!valid_vcpu(n))
> - return -EINVAL;
> -
> - vcpu = kvm_arch_vcpu_create(kvm, n);
> + vcpu = kvm_arch_vcpu_create(kvm, id);
> if (IS_ERR(vcpu))
> return PTR_ERR(vcpu);
>
> @@ -1732,25 +1724,36 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
> return r;
>
> mutex_lock(&kvm->lock);
> - if (kvm->vcpus[n]) {
> - r = -EEXIST;
> + if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
> + r = -EINVAL;
> goto vcpu_destroy;
> }
> - kvm->vcpus[n] = vcpu;
> - if (n == 0)
> - kvm->bsp_vcpu = vcpu;
> - mutex_unlock(&kvm->lock);
> +
> + for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
> + if (kvm->vcpus[r]->vcpu_id == id) {
> + r = -EEXIST;
> + goto vcpu_destroy;
> + }
> +
> + BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
>
> /* Now it's all set up, let userspace reach it */
> kvm_get_kvm(kvm);
> r = create_vcpu_fd(vcpu);
> - if (r < 0)
> - goto unlink;
> + if (r < 0) {
> + kvm_put_kvm(kvm);
> + goto vcpu_destroy;
> + }
> +
> + kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
> + smp_wmb();
> + atomic_inc(&kvm->online_vcpus);
> +
> + if (kvm->bsp_vcpu_id == id)
> + kvm->bsp_vcpu = vcpu;
> + mutex_unlock(&kvm->lock);
> return r;
>
> -unlink:
> - mutex_lock(&kvm->lock);
> - kvm->vcpus[n] = NULL;
> vcpu_destroy:
> mutex_unlock(&kvm->lock);
> kvm_arch_vcpu_destroy(vcpu);
> @@ -2223,6 +2226,10 @@ static long kvm_vm_ioctl(struct file *filp,
> r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
> break;
> }
> + case KVM_SET_BOOT_CPU_ID:
> + kvm->bsp_vcpu_id = arg;
> + r = 0;
> + break;
Don't you also need to update the bsp_vcpu pointer? And aren't these two
(bsp_vcpu pointer and bsp_vcpu_id) somewhat redundant?
Other than looks fine.
> default:
> r = kvm_arch_vm_ioctl(filp, ioctl, arg);
> }
> @@ -2289,6 +2296,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
> case KVM_CAP_USER_MEMORY:
> case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
> case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
> + case KVM_CAP_SET_BOOT_CPU_ID:
> return 1;
> #ifdef CONFIG_HAVE_KVM_IRQCHIP
> case KVM_CAP_IRQ_ROUTING:
> --
> 1.6.2.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH RFC v3 3/4] Break dependency between vcpu index in vcpus array and vcpu_id.
2009-06-02 12:38 ` Marcelo Tosatti
@ 2009-06-02 13:42 ` Gleb Natapov
0 siblings, 0 replies; 7+ messages in thread
From: Gleb Natapov @ 2009-06-02 13:42 UTC (permalink / raw)
To: Marcelo Tosatti; +Cc: kvm
On Tue, Jun 02, 2009 at 09:38:08AM -0300, Marcelo Tosatti wrote:
> > @@ -2223,6 +2226,10 @@ static long kvm_vm_ioctl(struct file *filp,
> > r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
> > break;
> > }
> > + case KVM_SET_BOOT_CPU_ID:
> > + kvm->bsp_vcpu_id = arg;
> > + r = 0;
> > + break;
>
> Don't you also need to update the bsp_vcpu pointer? And aren't these two
Actually I think I need to return an error if userspace tries to change BSP
after BSP cpu is already created.
> (bsp_vcpu pointer and bsp_vcpu_id) somewhat redundant?
>
Sometimes pointer to bsp vcpu is needed. It can be found looping over all
vcpus, but I prefer to have the pointer handy.
--
Gleb.
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2009-06-02 13:42 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-05-28 9:54 [PATCH RFC v3 0/4] decouple vcpu index from apic id Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 1/4] Introduce kvm_vcpu_is_bsp() function Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 2/4] Use pointer to vcpu instead of vcpu_id in timer code Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 3/4] Break dependency between vcpu index in vcpus array and vcpu_id Gleb Natapov
2009-06-02 12:38 ` Marcelo Tosatti
2009-06-02 13:42 ` Gleb Natapov
2009-05-28 9:54 ` [PATCH RFC v3 4/4] Use macro to iterate over vcpus Gleb Natapov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox