From: Gleb Natapov <gleb@redhat.com>
To: kvm@vger.kernel.org
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, avi@redhat.com,
mingo@elte.hu, a.p.zijlstra@chello.nl, tglx@linutronix.de,
hpa@zytor.com, riel@redhat.com, cl@linux-foundation.org,
mtosatti@redhat.com
Subject: [PATCH v7 03/12] Retry fault before vmentry
Date: Thu, 14 Oct 2010 11:22:47 +0200 [thread overview]
Message-ID: <1287048176-2563-4-git-send-email-gleb@redhat.com> (raw)
In-Reply-To: <1287048176-2563-1-git-send-email-gleb@redhat.com>
When page is swapped in it is mapped into guest memory only after guest
tries to access it again and generate another fault. To save this fault
we can map it immediately since we know that guest is going to access
the page. Do it only when tdp is enabled for now. Shadow paging case is
more complicated. CR[034] and EFER registers should be switched before
doing mapping and then switched back.
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
---
arch/x86/include/asm/kvm_host.h | 4 +++-
arch/x86/kvm/mmu.c | 16 ++++++++--------
arch/x86/kvm/paging_tmpl.h | 6 +++---
arch/x86/kvm/x86.c | 7 +++++++
virt/kvm/async_pf.c | 2 ++
5 files changed, 23 insertions(+), 12 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 043e29e..96aca44 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -241,7 +241,7 @@ struct kvm_mmu {
void (*new_cr3)(struct kvm_vcpu *vcpu);
void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
- int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
+ int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, bool no_apf);
void (*inject_page_fault)(struct kvm_vcpu *vcpu);
void (*free)(struct kvm_vcpu *vcpu);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
@@ -839,6 +839,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf *work);
extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f01e89a..11d152b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2568,7 +2568,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
}
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
- u32 error_code)
+ u32 error_code, bool no_apf)
{
gfn_t gfn;
int r;
@@ -2604,8 +2604,8 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu)
return kvm_x86_ops->interrupt_allowed(vcpu);
}
-static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
- pfn_t *pfn)
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
+ gva_t gva, pfn_t *pfn)
{
bool async;
@@ -2616,7 +2616,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
put_page(pfn_to_page(*pfn));
- if (can_do_async_pf(vcpu)) {
+ if (!no_apf && can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(async, *pfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
trace_kvm_async_pf_doublefault(gva, gfn);
@@ -2631,8 +2631,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
return false;
}
-static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
- u32 error_code)
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
+ bool no_apf)
{
pfn_t pfn;
int r;
@@ -2654,7 +2654,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
- if (try_async_pf(vcpu, gfn, gpa, &pfn))
+ if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn))
return 0;
/* mmio */
@@ -3317,7 +3317,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
int r;
enum emulation_result er;
- r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
+ r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
if (r < 0)
goto out;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index c45376d..d6b281e 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -527,8 +527,8 @@ out_gpte_changed:
* Returns: 1 if we need to emulate the instruction, 0 otherwise, or
* a negative value on error.
*/
-static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
- u32 error_code)
+static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
+ bool no_apf)
{
int write_fault = error_code & PFERR_WRITE_MASK;
int user_fault = error_code & PFERR_USER_MASK;
@@ -569,7 +569,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
- if (try_async_pf(vcpu, walker.gfn, addr, &pfn))
+ if (try_async_pf(vcpu, no_apf, walker.gfn, addr, &pfn))
return 0;
/* mmio */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 09e72fc..bf37397 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6131,6 +6131,13 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
}
EXPORT_SYMBOL_GPL(kvm_set_rflags);
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
+{
+ if (!vcpu->arch.mmu.direct_map || is_error_page(work->page))
+ return;
+ vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+}
+
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{
return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 8b144d5..41607ed 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -132,6 +132,8 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
list_del(&work->link);
spin_unlock(&vcpu->async_pf.lock);
+ if (work->page)
+ kvm_arch_async_page_ready(vcpu, work);
kvm_arch_async_page_present(vcpu, work);
list_del(&work->queue);
--
1.7.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2010-10-14 9:23 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-10-14 9:22 [PATCH v7 00/12] KVM: Add host swap event notifications for PV guest Gleb Natapov
2010-10-14 9:22 ` [PATCH v7 01/12] Add get_user_pages() variant that fails if major fault is required Gleb Natapov
2010-10-14 9:22 ` [PATCH v7 02/12] Halt vcpu if page it tries to access is swapped out Gleb Natapov
2010-10-20 11:28 ` Jan Kiszka
2010-10-20 11:33 ` Gleb Natapov
2010-10-20 11:35 ` Jan Kiszka
2010-10-20 11:39 ` Gleb Natapov
2010-10-14 9:22 ` Gleb Natapov [this message]
2010-10-17 10:33 ` [PATCH v7 03/12] Retry fault before vmentry Avi Kivity
2010-10-17 10:43 ` Avi Kivity
2010-10-17 16:13 ` Gleb Natapov
2010-10-14 9:22 ` [PATCH v7 04/12] Add memory slot versioning and use it to provide fast guest write interface Gleb Natapov
2010-10-18 13:22 ` Gleb Natapov
2010-10-14 9:22 ` [PATCH v7 05/12] Move kvm_smp_prepare_boot_cpu() from kvmclock.c to kvm.c Gleb Natapov
2010-10-14 9:22 ` [PATCH v7 06/12] Add PV MSR to enable asynchronous page faults delivery Gleb Natapov
2010-10-14 9:22 ` [PATCH v7 07/12] Add async PF initialization to PV guest Gleb Natapov
2010-10-14 9:22 ` [PATCH v7 08/12] Handle async PF in a guest Gleb Natapov
2010-10-20 11:48 ` Jan Kiszka
2010-10-20 11:50 ` Jan Kiszka
2010-10-20 11:53 ` Peter Zijlstra
2010-10-14 9:22 ` [PATCH v7 09/12] Inject asynchronous page fault into a PV guest if page is swapped out Gleb Natapov
2010-10-14 9:22 ` [PATCH v7 10/12] Handle async PF in non preemptable context Gleb Natapov
2010-10-14 9:22 ` [PATCH v7 11/12] Let host know whether the guest can handle async PF in non-userspace context Gleb Natapov
2010-10-14 9:22 ` [PATCH v7 12/12] Send async PF when guest is not in userspace too Gleb Natapov
2010-10-18 15:34 ` [PATCH v7 00/12] KVM: Add host swap event notifications for PV guest Marcelo Tosatti
-- strict thread matches above, loose matches on Subject: below --
2010-10-14 9:16 y
2010-10-14 9:17 ` [PATCH v7 03/12] Retry fault before vmentry y
2010-10-14 9:17 ` y
2010-10-14 9:17 ` y
2010-10-14 9:17 ` y
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1287048176-2563-4-git-send-email-gleb@redhat.com \
--to=gleb@redhat.com \
--cc=a.p.zijlstra@chello.nl \
--cc=avi@redhat.com \
--cc=cl@linux-foundation.org \
--cc=hpa@zytor.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mingo@elte.hu \
--cc=mtosatti@redhat.com \
--cc=riel@redhat.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).