From: Gleb Natapov <gleb@redhat.com>
To: Marcelo Tosatti <mtosatti@redhat.com>
Cc: kvm@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, avi@redhat.com, mingo@elte.hu,
a.p.zijlstra@chello.nl, tglx@linutronix.de, hpa@zytor.com,
riel@redhat.com, cl@linux-foundation.org
Subject: Re: [PATCH v6 03/12] Retry fault before vmentry
Date: Wed, 6 Oct 2010 13:07:04 +0200 [thread overview]
Message-ID: <20101006110704.GW11145@redhat.com> (raw)
In-Reply-To: <20101005155409.GB28955@amt.cnet>
On Tue, Oct 05, 2010 at 12:54:09PM -0300, Marcelo Tosatti wrote:
> On Mon, Oct 04, 2010 at 05:56:25PM +0200, Gleb Natapov wrote:
> > When page is swapped in it is mapped into guest memory only after guest
> > tries to access it again and generate another fault. To save this fault
> > we can map it immediately since we know that guest is going to access
> > the page. Do it only when tdp is enabled for now. Shadow paging case is
> > more complicated. CR[034] and EFER registers should be switched before
> > doing mapping and then switched back.
> >
> > Acked-by: Rik van Riel <riel@redhat.com>
> > Signed-off-by: Gleb Natapov <gleb@redhat.com>
> > ---
> > arch/x86/include/asm/kvm_host.h | 4 +++-
> > arch/x86/kvm/mmu.c | 16 ++++++++--------
> > arch/x86/kvm/paging_tmpl.h | 6 +++---
> > arch/x86/kvm/x86.c | 7 +++++++
> > virt/kvm/async_pf.c | 2 ++
> > 5 files changed, 23 insertions(+), 12 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> > index 5f154d3..b9f263e 100644
> > --- a/arch/x86/include/asm/kvm_host.h
> > +++ b/arch/x86/include/asm/kvm_host.h
> > @@ -240,7 +240,7 @@ struct kvm_mmu {
> > void (*new_cr3)(struct kvm_vcpu *vcpu);
> > void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
> > unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
> > - int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
> > + int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, bool no_apf);
> > void (*inject_page_fault)(struct kvm_vcpu *vcpu);
> > void (*free)(struct kvm_vcpu *vcpu);
> > gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
> > @@ -838,6 +838,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
> > struct kvm_async_pf *work);
> > void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
> > struct kvm_async_pf *work);
> > +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
> > + struct kvm_async_pf *work);
> > extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
> >
> > #endif /* _ASM_X86_KVM_HOST_H */
> > diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> > index 4d49b5e..d85fda8 100644
> > --- a/arch/x86/kvm/mmu.c
> > +++ b/arch/x86/kvm/mmu.c
> > @@ -2558,7 +2558,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
> > }
> >
> > static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
> > - u32 error_code)
> > + u32 error_code, bool no_apf)
> > {
> > gfn_t gfn;
> > int r;
> > @@ -2594,8 +2594,8 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu)
> > return kvm_x86_ops->interrupt_allowed(vcpu);
> > }
> >
> > -static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
> > - pfn_t *pfn)
> > +static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
> > + gva_t gva, pfn_t *pfn)
> > {
> > bool async;
> >
> > @@ -2606,7 +2606,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
> >
> > put_page(pfn_to_page(*pfn));
> >
> > - if (can_do_async_pf(vcpu)) {
> > + if (!no_apf && can_do_async_pf(vcpu)) {
> > trace_kvm_try_async_get_page(async, *pfn);
> > if (kvm_find_async_pf_gfn(vcpu, gfn)) {
> > vcpu->async_pf.work = kvm_double_apf;
> > @@ -2620,8 +2620,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
> > return false;
> > }
> >
> > -static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
> > - u32 error_code)
> > +static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
> > + bool no_apf)
> > {
> > pfn_t pfn;
> > int r;
> > @@ -2643,7 +2643,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
> > mmu_seq = vcpu->kvm->mmu_notifier_seq;
> > smp_rmb();
> >
> > - if (try_async_pf(vcpu, gfn, gpa, &pfn))
> > + if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn))
> > return 0;
> >
> > /* mmio */
> > @@ -3306,7 +3306,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
> > int r;
> > enum emulation_result er;
> >
> > - r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
> > + r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
> > if (r < 0)
> > goto out;
> >
> > diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
> > index 8154353..9ad90f8 100644
> > --- a/arch/x86/kvm/paging_tmpl.h
> > +++ b/arch/x86/kvm/paging_tmpl.h
> > @@ -530,8 +530,8 @@ out_gpte_changed:
> > * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
> > * a negative value on error.
> > */
> > -static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
> > - u32 error_code)
> > +static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
> > + bool no_apf)
> > {
> > int write_fault = error_code & PFERR_WRITE_MASK;
> > int user_fault = error_code & PFERR_USER_MASK;
> > @@ -574,7 +574,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
> > mmu_seq = vcpu->kvm->mmu_notifier_seq;
> > smp_rmb();
> >
> > - if (try_async_pf(vcpu, walker.gfn, addr, &pfn))
> > + if (try_async_pf(vcpu, no_apf, walker.gfn, addr, &pfn))
> > return 0;
> >
> > /* mmio */
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 8dd9ac2..48fd59d 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -6123,6 +6123,13 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
> > }
> > EXPORT_SYMBOL_GPL(kvm_set_rflags);
> >
> > +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
> > +{
> > + if (!tdp_enabled || is_error_page(work->page))
> > + return;
> > + vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
> > +}
> > +
>
> Can't you set a bit in vcpu->requests instead, and handle it in "out:"
> at the end of vcpu_enter_guest?
>
> To have a single entry point for pagefaults, after vmexit handling.
Jumping to "out:" will skip vmexit handling anyway, so we will not reuse
same call site anyway. I don't see yet why the way you propose will have
an advantage.
--
Gleb.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2010-10-06 11:07 UTC|newest]
Thread overview: 88+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-10-04 15:56 [PATCH v6 00/12] KVM: Add host swap event notifications for PV guest Gleb Natapov
2010-10-04 15:56 ` [PATCH v6 01/12] Add get_user_pages() variant that fails if major fault is required Gleb Natapov
2010-10-04 15:56 ` [PATCH v6 02/12] Halt vcpu if page it tries to access is swapped out Gleb Natapov
2010-10-05 1:20 ` Rik van Riel
2010-10-05 14:59 ` Marcelo Tosatti
2010-10-06 10:50 ` Avi Kivity
2010-10-06 10:52 ` Gleb Natapov
2010-10-07 9:54 ` Avi Kivity
2010-10-07 17:48 ` Gleb Natapov
2010-10-06 11:15 ` Gleb Natapov
2010-10-07 9:50 ` Avi Kivity
2010-10-07 9:52 ` Avi Kivity
2010-10-07 13:24 ` Rik van Riel
2010-10-07 13:29 ` Avi Kivity
2010-10-07 17:47 ` Gleb Natapov
2010-10-09 18:30 ` Avi Kivity
2010-10-09 18:32 ` Avi Kivity
2010-10-10 7:30 ` Gleb Natapov
2010-10-10 7:29 ` Gleb Natapov
2010-10-10 15:55 ` Avi Kivity
2010-10-10 15:56 ` Avi Kivity
2010-10-10 16:17 ` Gleb Natapov
2010-10-10 16:16 ` Gleb Natapov
2010-10-04 15:56 ` [PATCH v6 03/12] Retry fault before vmentry Gleb Natapov
2010-10-05 15:54 ` Marcelo Tosatti
2010-10-06 11:07 ` Gleb Natapov [this message]
2010-10-06 14:20 ` Marcelo Tosatti
2010-10-07 18:44 ` Gleb Natapov
2010-10-08 16:07 ` Marcelo Tosatti
2010-10-07 12:29 ` Avi Kivity
2010-10-07 17:21 ` Gleb Natapov
2010-10-09 18:42 ` Avi Kivity
2010-10-10 7:35 ` Gleb Natapov
2010-10-04 15:56 ` [PATCH v6 04/12] Add memory slot versioning and use it to provide fast guest write interface Gleb Natapov
2010-10-05 1:29 ` Rik van Riel
2010-10-05 16:57 ` Marcelo Tosatti
2010-10-06 11:14 ` Gleb Natapov
2010-10-06 14:38 ` Marcelo Tosatti
2010-10-06 20:08 ` Gleb Natapov
2010-10-07 10:00 ` Avi Kivity
2010-10-07 15:42 ` Marcelo Tosatti
2010-10-07 16:03 ` Gleb Natapov
2010-10-07 16:20 ` Avi Kivity
2010-10-07 17:23 ` Gleb Natapov
2010-10-10 12:48 ` Avi Kivity
2010-10-07 12:31 ` Avi Kivity
2010-10-04 15:56 ` [PATCH v6 05/12] Move kvm_smp_prepare_boot_cpu() from kvmclock.c to kvm.c Gleb Natapov
2010-10-04 15:56 ` [PATCH v6 06/12] Add PV MSR to enable asynchronous page faults delivery Gleb Natapov
2010-10-07 12:42 ` Avi Kivity
2010-10-07 17:53 ` Gleb Natapov
2010-10-10 12:47 ` Avi Kivity
2010-10-10 13:27 ` Gleb Natapov
2010-10-07 12:58 ` Avi Kivity
2010-10-07 17:59 ` Gleb Natapov
2010-10-09 18:43 ` Avi Kivity
2010-10-04 15:56 ` [PATCH v6 07/12] Add async PF initialization to PV guest Gleb Natapov
2010-10-05 2:34 ` Rik van Riel
2010-10-05 18:25 ` Marcelo Tosatti
2010-10-06 10:55 ` Gleb Natapov
2010-10-06 14:45 ` Marcelo Tosatti
2010-10-06 20:05 ` Gleb Natapov
2010-10-07 12:50 ` Avi Kivity
2010-10-08 7:54 ` Gleb Natapov
2010-10-09 18:44 ` Avi Kivity
2010-10-04 15:56 ` [PATCH v6 08/12] Handle async PF in a guest Gleb Natapov
2010-10-07 13:10 ` Avi Kivity
2010-10-07 17:14 ` Gleb Natapov
2010-10-07 17:18 ` Avi Kivity
2010-10-07 17:48 ` Rik van Riel
2010-10-07 18:03 ` Gleb Natapov
2010-10-09 18:48 ` Avi Kivity
2010-10-10 7:56 ` Gleb Natapov
2010-10-10 12:40 ` Avi Kivity
2010-10-10 12:32 ` Gleb Natapov
2010-10-10 12:38 ` Avi Kivity
2010-10-10 13:22 ` Gleb Natapov
2010-10-04 15:56 ` [PATCH v6 09/12] Inject asynchronous page fault into a PV guest if page is swapped out Gleb Natapov
2010-10-05 2:36 ` Rik van Riel
2010-10-05 19:00 ` Marcelo Tosatti
2010-10-06 10:42 ` Gleb Natapov
2010-10-04 15:56 ` [PATCH v6 10/12] Handle async PF in non preemptable context Gleb Natapov
2010-10-05 19:51 ` Marcelo Tosatti
2010-10-06 10:41 ` Gleb Natapov
2010-10-10 14:25 ` Gleb Natapov
2010-10-04 15:56 ` [PATCH v6 11/12] Let host know whether the guest can handle async PF in non-userspace context Gleb Natapov
2010-10-07 13:36 ` Avi Kivity
2010-10-04 15:56 ` [PATCH v6 12/12] Send async PF when guest is not in userspace too Gleb Natapov
2010-10-05 2:37 ` Rik van Riel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20101006110704.GW11145@redhat.com \
--to=gleb@redhat.com \
--cc=a.p.zijlstra@chello.nl \
--cc=avi@redhat.com \
--cc=cl@linux-foundation.org \
--cc=hpa@zytor.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mingo@elte.hu \
--cc=mtosatti@redhat.com \
--cc=riel@redhat.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).