From: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
To: Avi Kivity <avi@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>,
LKML <linux-kernel@vger.kernel.org>, KVM <kvm@vger.kernel.org>
Subject: [PATCH v3 12/19] KVM: MMU: abstract some functions to handle fault pfn
Date: Thu, 30 Jun 2011 16:24:31 +0800 [thread overview]
Message-ID: <4E0C32BF.5010606@cn.fujitsu.com> (raw)
In-Reply-To: <4E0C3178.2080603@cn.fujitsu.com>
Introduce handle_abnormal_pfn to handle fault pfn on page fault path,
introduce mmu_invalid_pfn to handle fault pfn on prefetch path
It is the preparing work for mmio page fault support
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
arch/x86/kvm/mmu.c | 47 ++++++++++++++++++++++++++++++++-----------
arch/x86/kvm/paging_tmpl.h | 12 +++++-----
2 files changed, 41 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 96a7ed4..9577013 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2221,18 +2221,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
send_sig_info(SIGBUS, &info, tsk);
}
-static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva,
- unsigned access, gfn_t gfn, pfn_t pfn)
+static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
{
kvm_release_pfn_clean(pfn);
if (is_hwpoison_pfn(pfn)) {
kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
return 0;
- } else if (is_fault_pfn(pfn))
- return -EFAULT;
+ }
- vcpu_cache_mmio_info(vcpu, gva, gfn, access);
- return 1;
+ return -EFAULT;
}
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
@@ -2277,6 +2274,33 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
}
}
+static bool mmu_invalid_pfn(pfn_t pfn)
+{
+ return unlikely(is_invalid_pfn(pfn) || is_mmio_pfn(pfn));
+}
+
+static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+ pfn_t pfn, unsigned access, int *ret_val)
+{
+ bool ret = true;
+
+ /* The pfn is invalid, report the error! */
+ if (unlikely(is_invalid_pfn(pfn))) {
+ *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
+ goto exit;
+ }
+
+ if (unlikely(is_mmio_pfn(pfn))) {
+ vcpu_cache_mmio_info(vcpu, gva, gfn, access);
+ *ret_val = 1;
+ goto exit;
+ }
+
+ ret = false;
+exit:
+ return ret;
+}
+
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, pfn_t *pfn, bool write, bool *writable);
@@ -2311,9 +2335,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
return 0;
- /* mmio */
- if (is_error_pfn(pfn))
- return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn);
+ if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
+ return r;
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
@@ -2685,9 +2708,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
return 0;
- /* mmio */
- if (is_error_pfn(pfn))
- return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn);
+ if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
+ return r;
+
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 58a29d0..870be69 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -373,7 +373,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
- if (is_error_pfn(pfn)) {
+ if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
return;
}
@@ -451,7 +451,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
gfn = gpte_to_gfn(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
pte_access & ACC_WRITE_MASK);
- if (is_error_pfn(pfn)) {
+ if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
break;
}
@@ -621,10 +621,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
&map_writable))
return 0;
- /* mmio */
- if (is_error_pfn(pfn))
- return kvm_handle_bad_page(vcpu, mmu_is_nested(vcpu) ? 0 :
- addr, walker.pte_access, walker.gfn, pfn);
+ if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
+ walker.gfn, pfn, walker.pte_access, &r))
+ return r;
+
spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock;
--
1.7.5.4
next prev parent reply other threads:[~2011-06-30 8:22 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-06-30 8:19 [PATCH v3 01/19] KVM: MMU: fix walking shadow page table Xiao Guangrong
2011-06-30 8:19 ` [PATCH v3 02/19] KVM: MMU: do not update slot bitmap if spte is nonpresent Xiao Guangrong
2011-06-30 8:20 ` [PATCH v3 03/19] KVM: x86: introduce vcpu_mmio_gva_to_gpa to cleanup the code Xiao Guangrong
2011-06-30 8:20 ` [PATCH v3 04/19] KVM: MMU: cache mmio info on page fault path Xiao Guangrong
2011-07-05 19:04 ` Marcelo Tosatti
2011-07-06 1:17 ` Xiao Guangrong
2011-06-30 8:21 ` [PATCH v3 05/19] KVM: MMU: optimize to handle dirty bit Xiao Guangrong
2011-07-05 19:27 ` Marcelo Tosatti
2011-07-06 1:22 ` Xiao Guangrong
2011-07-06 16:51 ` Marcelo Tosatti
2011-07-06 19:12 ` Xiao Guangrong
2011-07-07 8:15 ` Marcelo Tosatti
2011-06-30 8:21 ` [PATCH v3 06/19] KVM: MMU: cleanup for FNAME(fetch) Xiao Guangrong
2011-06-30 8:22 ` [PATCH v3 07/19] KVM: MMU: rename 'pt_write' to 'emulate' Xiao Guangrong
2011-06-30 8:22 ` [PATCH v3 08/19] KVM: MMU: count used shadow pages on prepareing path Xiao Guangrong
2011-06-30 8:23 ` [PATCH v3 09/19] KVM: MMU: split kvm_mmu_free_page Xiao Guangrong
2011-06-30 8:23 ` [PATCH v3 10/19] KVM: MMU: remove bypass_guest_pf Xiao Guangrong
2011-06-30 8:24 ` [PATCH v3 11/19] KVM: MMU: filter out the mmio pfn from the fault pfn Xiao Guangrong
2011-07-06 17:17 ` Marcelo Tosatti
2011-07-06 19:13 ` Xiao Guangrong
2011-06-30 8:24 ` Xiao Guangrong [this message]
2011-06-30 8:25 ` [PATCH v3 13/19] KVM: MMU: introduce the rules to modify shadow page table Xiao Guangrong
2011-06-30 8:25 ` [PATCH v3 14/19] KVM: MMU: clean up spte updating and clearing Xiao Guangrong
2011-07-06 17:39 ` Marcelo Tosatti
2011-07-06 19:18 ` Xiao Guangrong
2011-07-07 8:16 ` Marcelo Tosatti
2011-07-07 9:30 ` Xiao Guangrong
2011-06-30 8:26 ` [PATCH v3 15/19] KVM: MMU: do not need atomicly to set/clear spte Xiao Guangrong
2011-06-30 8:26 ` [PATCH v3 16/19] KVM: MMU: lockless walking shadow page table Xiao Guangrong
2011-07-06 18:08 ` Marcelo Tosatti
2011-07-06 19:26 ` Xiao Guangrong
2011-07-07 8:18 ` Marcelo Tosatti
2011-06-30 8:26 ` [PATCH v3 17/19] KVM: MMU: reorganize struct kvm_shadow_walk_iterator Xiao Guangrong
2011-06-30 8:27 ` [PATCH v3 18/19] KVM: MMU: mmio page fault support Xiao Guangrong
2011-07-06 18:52 ` Marcelo Tosatti
2011-07-06 19:59 ` Xiao Guangrong
2011-07-07 8:49 ` Marcelo Tosatti
2011-06-30 8:28 ` [PATCH v3 19/19] KVM: MMU: trace mmio page fault Xiao Guangrong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4E0C32BF.5010606@cn.fujitsu.com \
--to=xiaoguangrong@cn.fujitsu.com \
--cc=avi@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox