From: Tomoki Sekiyama <tomoki.sekiyama.qu@hitachi.com>
To: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, x86@kernel.org,
yrl.pp-manager.tt@hitachi.com,
Tomoki Sekiyama <tomoki.sekiyama.qu@hitachi.com>,
Avi Kivity <avi@redhat.com>,
Marcelo Tosatti <mtosatti@redhat.com>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>
Subject: [RFC PATCH 04/18] KVM: Replace local_irq_disable/enable with local_irq_save/restore
Date: Thu, 28 Jun 2012 15:07:40 +0900 [thread overview]
Message-ID: <20120628060740.19298.69063.stgit@localhost.localdomain> (raw)
In-Reply-To: <20120628060719.19298.43879.stgit@localhost.localdomain>
Replace local_irq_disable/enable with local_irq_save/restore in the path
where is executed on slave CPUs. This is required because irqs are disabled
while the guest is running on the slave CPUs.
Signed-off-by: Tomoki Sekiyama <tomoki.sekiyama.qu@hitachi.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
---
arch/x86/kvm/mmu.c | 20 ++++++++++++--------
arch/x86/kvm/vmx.c | 5 +++--
arch/x86/kvm/x86.c | 7 ++++---
arch/x86/mm/gup.c | 7 ++++---
4 files changed, 23 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index be3cea4..6139e1d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -549,13 +549,14 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
return __get_spte_lockless(sptep);
}
-static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
+static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu,
+ unsigned long *flags)
{
/*
* Prevent page table teardown by making any free-er wait during
* kvm_flush_remote_tlbs() IPI to all active vcpus.
*/
- local_irq_disable();
+ local_irq_save(*flags);
vcpu->mode = READING_SHADOW_PAGE_TABLES;
/*
* Make sure a following spte read is not reordered ahead of the write
@@ -564,7 +565,8 @@ static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
smp_mb();
}
-static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
+static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu,
+ unsigned long *flags)
{
/*
* Make sure the write to vcpu->mode is not reordered in front of
@@ -573,7 +575,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
*/
smp_mb();
vcpu->mode = OUTSIDE_GUEST_MODE;
- local_irq_enable();
+ local_irq_restore(*flags);
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -2959,12 +2961,13 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
{
struct kvm_shadow_walk_iterator iterator;
u64 spte = 0ull;
+ unsigned long flags;
- walk_shadow_page_lockless_begin(vcpu);
+ walk_shadow_page_lockless_begin(vcpu, &flags);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
if (!is_shadow_present_pte(spte))
break;
- walk_shadow_page_lockless_end(vcpu);
+ walk_shadow_page_lockless_end(vcpu, &flags);
return spte;
}
@@ -4043,15 +4046,16 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
struct kvm_shadow_walk_iterator iterator;
u64 spte;
int nr_sptes = 0;
+ unsigned long flags;
- walk_shadow_page_lockless_begin(vcpu);
+ walk_shadow_page_lockless_begin(vcpu, &flags);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
sptes[iterator.level-1] = spte;
nr_sptes++;
if (!is_shadow_present_pte(spte))
break;
}
- walk_shadow_page_lockless_end(vcpu);
+ walk_shadow_page_lockless_end(vcpu, &flags);
return nr_sptes;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 32eb588..6ea77e4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1516,12 +1516,13 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (vmx->loaded_vmcs->cpu != cpu) {
struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
unsigned long sysenter_esp;
+ unsigned long flags;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- local_irq_disable();
+ local_irq_save(flags);
list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
&per_cpu(loaded_vmcss_on_cpu, cpu));
- local_irq_enable();
+ local_irq_restore(flags);
/*
* Linux uses per-cpu TSS and GDT, so set these when switching
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index be6d549..4a69c66 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5229,6 +5229,7 @@ static void process_nmi(struct kvm_vcpu *vcpu)
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
+ unsigned long flags;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
bool req_immediate_exit = 0;
@@ -5314,13 +5315,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
*/
smp_mb();
- local_irq_disable();
+ local_irq_save(flags);
if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|| need_resched() || signal_pending(current)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
- local_irq_enable();
+ local_irq_restore(flags);
preempt_enable();
kvm_x86_ops->cancel_injection(vcpu);
r = 1;
@@ -5359,7 +5360,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
- local_irq_enable();
+ local_irq_restore(flags);
++vcpu->stat.exits;
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index dd74e46..6679525 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -315,6 +315,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
+ unsigned long flags;
pgd_t *pgdp;
int nr = 0;
@@ -349,7 +350,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
* (which we do on x86, with the above PAE exception), we can follow the
* address down to the the page and take a ref on it.
*/
- local_irq_disable();
+ local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
pgd_t pgd = *pgdp;
@@ -360,7 +361,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
goto slow;
} while (pgdp++, addr = next, addr != end);
- local_irq_enable();
+ local_irq_restore(flags);
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
return nr;
@@ -369,7 +370,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
int ret;
slow:
- local_irq_enable();
+ local_irq_restore(flags);
slow_irqon:
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
next prev parent reply other threads:[~2012-06-28 6:07 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-06-28 6:07 [RFC PATCH 00/18] KVM: x86: CPU isolation and direct interrupts handling by guests Tomoki Sekiyama
2012-06-28 6:07 ` [RFC PATCH 01/18] x86: Split memory hotplug function from cpu_up() as cpu_memory_up() Tomoki Sekiyama
2012-06-28 6:07 ` [RFC PATCH 02/18] x86: Add a facility to use offlined CPUs as slave CPUs Tomoki Sekiyama
2012-06-28 6:07 ` [RFC PATCH 03/18] x86: Support hrtimer on " Tomoki Sekiyama
2012-06-28 6:07 ` Tomoki Sekiyama [this message]
2012-06-28 6:07 ` [RFC PATCH 05/18] KVM: Enable/Disable virtualization on slave CPUs are activated/dying Tomoki Sekiyama
2012-06-28 6:07 ` [RFC PATCH 06/18] KVM: Add facility to run guests on slave CPUs Tomoki Sekiyama
2012-06-28 17:02 ` Avi Kivity
2012-06-29 9:26 ` Tomoki Sekiyama
2012-06-28 6:07 ` [RFC PATCH 07/18] KVM: handle page faults occured in slave CPUs on online CPUs Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 08/18] KVM: Add KVM_GET_SLAVE_CPU and KVM_SET_SLAVE_CPU to vCPU ioctl Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 09/18] KVM: Go back to online CPU on VM exit by external interrupt Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 10/18] KVM: proxy slab operations for slave CPUs on online CPUs Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 11/18] KVM: no exiting from guest when slave CPU halted Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 12/18] x86/apic: Enable external interrupt routing to slave CPUs Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 13/18] x86/apic: IRQ vector remapping on slave for " Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 14/18] KVM: Directly handle interrupts by guests without VM EXIT on " Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 15/18] KVM: vmx: Add definitions PIN_BASED_PREEMPTION_TIMER Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 16/18] KVM: add kvm_arch_vcpu_prevent_run to prevent VM ENTER when NMI is received Tomoki Sekiyama
2012-06-28 16:48 ` Avi Kivity
2012-06-29 9:26 ` Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 17/18] KVM: route assigned devices' MSI/MSI-X directly to guests on slave CPUs Tomoki Sekiyama
2012-06-28 6:08 ` [RFC PATCH 18/18] x86: request TLB flush to slave CPU using NMI Tomoki Sekiyama
2012-06-28 16:38 ` Avi Kivity
2012-06-29 9:26 ` Tomoki Sekiyama
2012-06-28 16:58 ` [RFC PATCH 00/18] KVM: x86: CPU isolation and direct interrupts handling by guests Avi Kivity
2012-06-28 17:26 ` Jan Kiszka
2012-06-28 17:34 ` Avi Kivity
2012-06-29 9:25 ` Tomoki Sekiyama
2012-06-29 14:56 ` Avi Kivity
2012-07-06 10:33 ` Tomoki Sekiyama
2012-07-12 9:04 ` Avi Kivity
2012-07-04 9:33 ` Tomoki Sekiyama
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20120628060740.19298.69063.stgit@localhost.localdomain \
--to=tomoki.sekiyama.qu@hitachi.com \
--cc=avi@redhat.com \
--cc=hpa@zytor.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=mtosatti@redhat.com \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
--cc=yrl.pp-manager.tt@hitachi.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox