From: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
To: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Avi Kivity <avi@redhat.com>,
Marcelo Tosatti <mtosatti@redhat.com>,
LKML <linux-kernel@vger.kernel.org>, KVM <kvm@vger.kernel.org>
Subject: [PATCH 05/10] KVM: reorganize hva_to_pfn
Date: Tue, 17 Jul 2012 22:43:13 +0800 [thread overview]
Message-ID: <50057A01.9010401@linux.vnet.ibm.com> (raw)
In-Reply-To: <5005791B.8040807@linux.vnet.ibm.com>
We do too many things in hva_to_pfn, this patch reorganize the code,
let it be better readable
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
---
virt/kvm/kvm_main.c | 161 +++++++++++++++++++++++++++++++--------------------
1 files changed, 99 insertions(+), 62 deletions(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a89c7b8..54fb47b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1096,84 +1096,121 @@ static inline int check_user_page_hwpoison(unsigned long addr)
return rc == -EHWPOISON;
}
-static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
- bool write_fault, bool *writable)
+/*
+ * The atomic path to get the writable pfn which will be stored in @pfn,
+ * true indicates success, otherwise false is returned.
+ */
+static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
+ bool write_fault, bool *writable, pfn_t *pfn)
{
struct page *page[1];
- int npages = 0;
- pfn_t pfn;
+ int npages;
- /* we can do it either atomically or asynchronously, not both */
- BUG_ON(atomic && async);
+ if (!(async || atomic))
+ return false;
- BUG_ON(!write_fault && !writable);
+ npages = __get_user_pages_fast(addr, 1, 1, page);
+ if (npages == 1) {
+ *pfn = page_to_pfn(page[0]);
+
+ if (writable)
+ *writable = true;
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * The slow path to get the pfn of the specified host virtual address,
+ * 1 indicates success, -errno is returned if error is detected.
+ */
+static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
+ bool *writable, pfn_t *pfn)
+{
+ struct page *page[1];
+ int npages = 0;
+
+ might_sleep();
if (writable)
- *writable = true;
+ *writable = write_fault;
- if (atomic || async)
- npages = __get_user_pages_fast(addr, 1, 1, page);
+ if (async) {
+ down_read(¤t->mm->mmap_sem);
+ npages = get_user_page_nowait(current, current->mm,
+ addr, write_fault, page);
+ up_read(¤t->mm->mmap_sem);
+ } else
+ npages = get_user_pages_fast(addr, 1, write_fault,
+ page);
- if (unlikely(npages != 1) && !atomic) {
- might_sleep();
+ if (npages != 1)
+ return npages;
- if (writable)
- *writable = write_fault;
-
- if (async) {
- down_read(¤t->mm->mmap_sem);
- npages = get_user_page_nowait(current, current->mm,
- addr, write_fault, page);
- up_read(¤t->mm->mmap_sem);
- } else
- npages = get_user_pages_fast(addr, 1, write_fault,
- page);
-
- /* map read fault as writable if possible */
- if (unlikely(!write_fault) && npages == 1) {
- struct page *wpage[1];
-
- npages = __get_user_pages_fast(addr, 1, 1, wpage);
- if (npages == 1) {
- *writable = true;
- put_page(page[0]);
- page[0] = wpage[0];
- }
- npages = 1;
+ /* map read fault as writable if possible */
+ if (unlikely(!write_fault)) {
+ struct page *wpage[1];
+
+ npages = __get_user_pages_fast(addr, 1, 1, wpage);
+ if (npages == 1) {
+ *writable = true;
+ put_page(page[0]);
+ page[0] = wpage[0];
}
+
+ npages = 1;
}
+ *pfn = page_to_pfn(page[0]);
+ return npages;
+}
- if (unlikely(npages != 1)) {
- struct vm_area_struct *vma;
+static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
+ bool write_fault, bool *writable)
+{
+ struct vm_area_struct *vma;
+ pfn_t pfn = 0;
+ int npages;
- if (atomic)
- return get_fault_pfn();
+ /* we can do it either atomically or asynchronously, not both */
+ BUG_ON(atomic && async);
- down_read(¤t->mm->mmap_sem);
- if (npages == -EHWPOISON ||
- (!async && check_user_page_hwpoison(addr))) {
- up_read(¤t->mm->mmap_sem);
- get_page(hwpoison_page);
- return page_to_pfn(hwpoison_page);
- }
+ BUG_ON(!write_fault && !writable);
- vma = find_vma_intersection(current->mm, addr, addr+1);
-
- if (vma == NULL)
- pfn = get_fault_pfn();
- else if ((vma->vm_flags & VM_PFNMAP)) {
- pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
- vma->vm_pgoff;
- BUG_ON(!kvm_is_mmio_pfn(pfn));
- } else {
- if (async && (vma->vm_flags & VM_WRITE))
- *async = true;
- pfn = get_fault_pfn();
- }
- up_read(¤t->mm->mmap_sem);
- } else
- pfn = page_to_pfn(page[0]);
+ if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
+ return pfn;
+ if (atomic)
+ return get_fault_pfn();
+
+ npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
+ if (npages == 1)
+ return pfn;
+
+ down_read(¤t->mm->mmap_sem);
+ if (npages == -EHWPOISON ||
+ (!async && check_user_page_hwpoison(addr))) {
+ get_page(hwpoison_page);
+ pfn = page_to_pfn(hwpoison_page);
+ goto exit;
+ }
+
+ vma = find_vma_intersection(current->mm, addr, addr + 1);
+
+ if (vma == NULL)
+ pfn = get_fault_pfn();
+ else if ((vma->vm_flags & VM_PFNMAP)) {
+ pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+ vma->vm_pgoff;
+ BUG_ON(!kvm_is_mmio_pfn(pfn));
+ } else {
+ if (async && (vma->vm_flags & VM_WRITE))
+ *async = true;
+ pfn = get_fault_pfn();
+ }
+
+exit:
+ up_read(¤t->mm->mmap_sem);
return pfn;
}
--
1.7.7.6
next prev parent reply other threads:[~2012-07-17 14:43 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-07-17 14:39 [PATCH 00/10 v4] KVM: introduce readonly memslot Xiao Guangrong
2012-07-17 14:40 ` [PATCH 01/10] KVM: fix missing check for memslot flags Xiao Guangrong
2012-07-17 14:41 ` [PATCH 02/10] KVM: hide KVM_MEMSLOT_INVALID from userspace Xiao Guangrong
2012-07-17 14:41 ` [PATCH 03/10] KVM: introduce gfn_to_pfn_memslot_atomic Xiao Guangrong
2012-07-17 14:42 ` [PATCH 04/10] KVM: introduce gfn_to_hva_read/kvm_read_hva/kvm_read_hva_atomic Xiao Guangrong
2012-07-17 14:43 ` Xiao Guangrong [this message]
2012-07-17 14:43 ` [PATCH 06/10] KVM: use 'writable' as a hint to map writable pfn Xiao Guangrong
2012-07-17 14:44 ` [PATCH 07/10] KVM: introduce readonly_fault_pfn Xiao Guangrong
2012-07-19 10:15 ` Avi Kivity
2012-07-20 2:56 ` Xiao Guangrong
2012-07-17 14:45 ` [PATCH 08/10] KVM: introduce readonly_bad_hva Xiao Guangrong
2012-07-19 10:16 ` Avi Kivity
2012-07-20 3:01 ` Xiao Guangrong
2012-07-17 14:45 ` [PATCH 09/10] KVM: introduce readonly memslot Xiao Guangrong
2012-07-17 14:46 ` [PATCH 10/10] KVM: indicate readonly access fault Xiao Guangrong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=50057A01.9010401@linux.vnet.ibm.com \
--to=xiaoguangrong@linux.vnet.ibm.com \
--cc=avi@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).