From: Andrew Jones <drjones@redhat.com>
To: kvm@vger.kernel.org
Cc: pbonzini@redhat.com, rkrcmar@redhat.com, cdall@linaro.org,
david@redhat.com, lvivier@redhat.com, thuth@redhat.com
Subject: [PATCH kvm-unit-tests v2 03/12] arm/arm64: fix virt_to_phys
Date: Wed, 17 Jan 2018 11:39:56 +0100 [thread overview]
Message-ID: <20180117104005.29211-4-drjones@redhat.com> (raw)
In-Reply-To: <20180117104005.29211-1-drjones@redhat.com>
Since switching to the vm_memalign() allocator virt_to_phys() hasn't
been returning the correct address, as it was assuming an identity map.
Signed-off-by: Andrew Jones <drjones@redhat.com>
---
lib/arm/asm/page.h | 8 +++-----
lib/arm/asm/pgtable.h | 16 ++++++++++++----
lib/arm/mmu.c | 20 ++++++++++++++++++++
lib/arm64/asm/page.h | 8 +++-----
lib/arm64/asm/pgtable.h | 12 ++++++++++--
5 files changed, 48 insertions(+), 16 deletions(-)
diff --git a/lib/arm/asm/page.h b/lib/arm/asm/page.h
index fc1b30e95567..039c9f7b3d49 100644
--- a/lib/arm/asm/page.h
+++ b/lib/arm/asm/page.h
@@ -34,16 +34,14 @@ typedef struct { pteval_t pgprot; } pgprot_t;
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
-#ifndef __virt_to_phys
-#define __phys_to_virt(x) ((unsigned long) (x))
-#define __virt_to_phys(x) (x)
-#endif
-
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+extern phys_addr_t __virt_to_phys(unsigned long addr);
+extern unsigned long __phys_to_virt(phys_addr_t addr);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASMARM_PAGE_H_ */
diff --git a/lib/arm/asm/pgtable.h b/lib/arm/asm/pgtable.h
index a95e63002ef3..b614bce9528a 100644
--- a/lib/arm/asm/pgtable.h
+++ b/lib/arm/asm/pgtable.h
@@ -14,6 +14,14 @@
* This work is licensed under the terms of the GNU GPL, version 2.
*/
+/*
+ * We can convert va <=> pa page table addresses with simple casts
+ * because we always allocate their pages with alloc_page(), and
+ * alloc_page() always returns identity mapped pages.
+ */
+#define pgtable_va(x) ((void *)(unsigned long)(x))
+#define pgtable_pa(x) ((unsigned long)(x))
+
#define pgd_none(pgd) (!pgd_val(pgd))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pte_none(pte) (!pte_val(pte))
@@ -32,7 +40,7 @@ static inline pgd_t *pgd_alloc(void)
static inline pmd_t *pgd_page_vaddr(pgd_t pgd)
{
- return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
+ return pgtable_va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
}
#define pmd_index(addr) \
@@ -52,14 +60,14 @@ static inline pmd_t *pmd_alloc(pgd_t *pgd, unsigned long addr)
{
if (pgd_none(*pgd)) {
pmd_t *pmd = pmd_alloc_one();
- pgd_val(*pgd) = __pa(pmd) | PMD_TYPE_TABLE;
+ pgd_val(*pgd) = pgtable_pa(pmd) | PMD_TYPE_TABLE;
}
return pmd_offset(pgd, addr);
}
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
- return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
+ return pgtable_va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
}
#define pte_index(addr) \
@@ -79,7 +87,7 @@ static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)
{
if (pmd_none(*pmd)) {
pte_t *pte = pte_alloc_one();
- pmd_val(*pmd) = __pa(pte) | PMD_TYPE_TABLE;
+ pmd_val(*pmd) = pgtable_pa(pte) | PMD_TYPE_TABLE;
}
return pte_offset(pmd, addr);
}
diff --git a/lib/arm/mmu.c b/lib/arm/mmu.c
index b9387efe0065..9da3be38b339 100644
--- a/lib/arm/mmu.c
+++ b/lib/arm/mmu.c
@@ -171,3 +171,23 @@ void *setup_mmu(phys_addr_t phys_end)
mmu_enable(mmu_idmap);
return mmu_idmap;
}
+
+phys_addr_t __virt_to_phys(unsigned long addr)
+{
+ if (mmu_enabled()) {
+ pgd_t *pgtable = current_thread_info()->pgtable;
+ return virt_to_pte_phys(pgtable, (void *)addr);
+ }
+ return addr;
+}
+
+unsigned long __phys_to_virt(phys_addr_t addr)
+{
+ /*
+ * We don't guarantee that phys_to_virt(virt_to_phys(vaddr)) == vaddr, but
+ * the default page tables do identity map all physical addresses, which
+ * means phys_to_virt(virt_to_phys((void *)paddr)) == paddr.
+ */
+ assert(!mmu_enabled() || __virt_to_phys(addr) == addr);
+ return addr;
+}
diff --git a/lib/arm64/asm/page.h b/lib/arm64/asm/page.h
index f06a6941971c..46af552b91c7 100644
--- a/lib/arm64/asm/page.h
+++ b/lib/arm64/asm/page.h
@@ -42,16 +42,14 @@ typedef struct { pgd_t pgd; } pmd_t;
#define pmd_val(x) (pgd_val((x).pgd))
#define __pmd(x) ((pmd_t) { __pgd(x) } )
-#ifndef __virt_to_phys
-#define __phys_to_virt(x) ((unsigned long) (x))
-#define __virt_to_phys(x) (x)
-#endif
-
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+extern phys_addr_t __virt_to_phys(unsigned long addr);
+extern unsigned long __phys_to_virt(phys_addr_t addr);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASMARM64_PAGE_H_ */
diff --git a/lib/arm64/asm/pgtable.h b/lib/arm64/asm/pgtable.h
index 941a850c3f30..5860abe5b08b 100644
--- a/lib/arm64/asm/pgtable.h
+++ b/lib/arm64/asm/pgtable.h
@@ -18,6 +18,14 @@
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
+/*
+ * We can convert va <=> pa page table addresses with simple casts
+ * because we always allocate their pages with alloc_page(), and
+ * alloc_page() always returns identity mapped pages.
+ */
+#define pgtable_va(x) ((void *)(unsigned long)(x))
+#define pgtable_pa(x) ((unsigned long)(x))
+
#define pgd_none(pgd) (!pgd_val(pgd))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pte_none(pte) (!pte_val(pte))
@@ -40,7 +48,7 @@ static inline pgd_t *pgd_alloc(void)
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
- return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
+ return pgtable_va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
}
#define pte_index(addr) \
@@ -60,7 +68,7 @@ static inline pte_t *pte_alloc(pmd_t *pmd, unsigned long addr)
{
if (pmd_none(*pmd)) {
pte_t *pte = pte_alloc_one();
- pmd_val(*pmd) = __pa(pte) | PMD_TYPE_TABLE;
+ pmd_val(*pmd) = pgtable_pa(pte) | PMD_TYPE_TABLE;
}
return pte_offset(pmd, addr);
}
--
2.13.6
next prev parent reply other threads:[~2018-01-17 10:40 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-01-17 10:39 [PATCH kvm-unit-tests v2 00/12] arm/arm64: mmu fixes and feature Andrew Jones
2018-01-17 10:39 ` [PATCH kvm-unit-tests v2 01/12] arm/arm64: cleanup alloc.h includes Andrew Jones
2018-01-17 10:39 ` [PATCH kvm-unit-tests v2 02/12] arm/arm64: add pgtable to thread_info Andrew Jones
2018-01-17 10:39 ` Andrew Jones [this message]
2018-01-17 10:39 ` [PATCH kvm-unit-tests v2 04/12] arm/arm64: flush page table cache when installing entries Andrew Jones
2018-01-17 10:39 ` [PATCH kvm-unit-tests v2 05/12] arm/arm64: setup: don't allow gaps in phys range Andrew Jones
2018-01-17 10:39 ` [PATCH kvm-unit-tests v2 06/12] phys_alloc: ensure we account all allocations Andrew Jones
2018-01-17 12:15 ` David Hildenbrand
2018-01-17 10:40 ` [PATCH kvm-unit-tests v2 07/12] page_alloc: allow initialization before setup_vm call Andrew Jones
2018-01-17 10:40 ` [PATCH kvm-unit-tests v2 08/12] bitops: add fls Andrew Jones
2018-01-17 10:40 ` [PATCH kvm-unit-tests v2 09/12] page_alloc: add yet another memalign Andrew Jones
2018-01-17 10:40 ` [PATCH kvm-unit-tests v2 10/12] lib/auxinfo: add flags field Andrew Jones
2018-01-17 10:40 ` [PATCH kvm-unit-tests v2 11/12] arm/arm64: allow setup_vm to be skipped Andrew Jones
2018-01-17 10:40 ` [PATCH kvm-unit-tests v2 12/12] arm/arm64: sieve should start with the mmu off Andrew Jones
2018-01-17 11:17 ` [PATCH kvm-unit-tests v2 00/12] arm/arm64: mmu fixes and feature David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180117104005.29211-4-drjones@redhat.com \
--to=drjones@redhat.com \
--cc=cdall@linaro.org \
--cc=david@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=lvivier@redhat.com \
--cc=pbonzini@redhat.com \
--cc=rkrcmar@redhat.com \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox