* [PATCH RFC 1/7] x86: kill mk_pte_huge
2007-11-08 1:50 [PATCH RFC 0/7] Unify asm-x86/pgtable.h and page.h Jeremy Fitzhardinge
@ 2007-11-08 1:50 ` Jeremy Fitzhardinge
2007-11-08 1:50 ` [PATCH RFC 2/7] x86: clean up mm/init_32.c Jeremy Fitzhardinge
` (5 subsequent siblings)
6 siblings, 0 replies; 14+ messages in thread
From: Jeremy Fitzhardinge @ 2007-11-08 1:50 UTC (permalink / raw)
To: LKML
Cc: Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Glauber de Oliveira Costa, Jeremy Fitzhardinge
[-- Attachment #1: x86-kill-mk_pte_huge.patch --]
[-- Type: text/plain, Size: 1408 bytes --]
It only has a single use, which can be trivially replaced.
Signed-off-by: Jeremy Fitzhardinge <Jeremy.Fitzhardinge@citrix.com>
---
arch/x86/mm/init_64.c | 3 +--
include/asm-x86/pgtable_64.h | 9 ---------
2 files changed, 1 insertion(+), 11 deletions(-)
===================================================================
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -768,8 +768,7 @@ int __meminit vmemmap_populate(struct pa
if (!p)
return -ENOMEM;
- entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
- mk_pte_huge(entry);
+ entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL_LARGE);
set_pmd(pmd, __pmd(pte_val(entry)));
printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
===================================================================
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -378,15 +378,6 @@ static inline pte_t pte_clrhuge(pte_t pt
/* page, protection -> pte */
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-static inline pte_t __mk_pte_huge(pte_t entry)
-{
- unsigned long pte;
- pte = pte_val(entry);
- pte |= _PAGE_PRESENT | _PAGE_PSE;
- return __pte(pte);
-}
-#define mk_pte_huge(entry) ((entry) = __mk_pte_huge(entry))
-
#include <linux/mm_types.h>
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
--
^ permalink raw reply [flat|nested] 14+ messages in thread* [PATCH RFC 2/7] x86: clean up mm/init_32.c
2007-11-08 1:50 [PATCH RFC 0/7] Unify asm-x86/pgtable.h and page.h Jeremy Fitzhardinge
2007-11-08 1:50 ` [PATCH RFC 1/7] x86: kill mk_pte_huge Jeremy Fitzhardinge
@ 2007-11-08 1:50 ` Jeremy Fitzhardinge
2007-11-08 16:20 ` Glauber de Oliveira Costa
2007-11-08 1:50 ` [PATCH RFC 3/7] x86: clean up asm-x86/page*.h Jeremy Fitzhardinge
` (4 subsequent siblings)
6 siblings, 1 reply; 14+ messages in thread
From: Jeremy Fitzhardinge @ 2007-11-08 1:50 UTC (permalink / raw)
To: LKML
Cc: Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Glauber de Oliveira Costa, Jeremy Fitzhardinge
[-- Attachment #1: x86-cleanup-mm-init_32.patch --]
[-- Type: text/plain, Size: 2055 bytes --]
Some code reformatting in init_32.c. No functional change.
Signed-off-by: Jeremy Fitzhardinge <Jeremy.Fitzhardinge@citrix.com>
---
arch/x86/mm/init_32.c | 31 +++++++++++++++++++++----------
1 file changed, 21 insertions(+), 10 deletions(-)
===================================================================
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -165,16 +165,25 @@ static void __init kernel_physical_mappi
pmd = one_md_table_init(pgd);
if (pfn >= max_low_pfn)
continue;
- for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
+ for (pmd_idx = 0;
+ pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
+ pmd++, pmd_idx++) {
unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
- /* Map with big pages if possible, otherwise create normal page tables. */
+ /* Map with big pages if possible, otherwise
+ create normal page tables. */
if (cpu_has_pse) {
- unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
- if (is_kernel_text(address) || is_kernel_text(address2))
- set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
- else
- set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
+ unsigned int address2;
+ pgprot_t prot = PAGE_KERNEL_LARGE;
+
+ address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE +
+ PAGE_OFFSET + PAGE_SIZE-1;
+
+ if (is_kernel_text(address) ||
+ is_kernel_text(address2))
+ prot = PAGE_KERNEL_LARGE_EXEC;
+
+ set_pmd(pmd, pfn_pmd(pfn, prot));
pfn += PTRS_PER_PTE;
} else {
@@ -183,10 +192,12 @@ static void __init kernel_physical_mappi
for (pte_ofs = 0;
pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
+ pgprot_t prot = PAGE_KERNEL;
+
if (is_kernel_text(address))
- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
- else
- set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
+ prot = PAGE_KERNEL_EXEC;
+
+ set_pte(pte, pfn_pte(pfn, prot));
}
}
}
--
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH RFC 2/7] x86: clean up mm/init_32.c
2007-11-08 1:50 ` [PATCH RFC 2/7] x86: clean up mm/init_32.c Jeremy Fitzhardinge
@ 2007-11-08 16:20 ` Glauber de Oliveira Costa
2007-11-08 16:39 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 14+ messages in thread
From: Glauber de Oliveira Costa @ 2007-11-08 16:20 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: LKML, Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Jeremy Fitzhardinge
> - /* Map with big pages if possible, otherwise create normal page tables. */
> + /* Map with big pages if possible, otherwise
> + create normal page tables. */
/*
* multi comment lines are preferred
* this way, IIRC ;-)
*/
--
Glauber de Oliveira Costa.
"Free as in Freedom"
http://glommer.net
"The less confident you are, the more serious you have to act."
^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [PATCH RFC 2/7] x86: clean up mm/init_32.c
2007-11-08 16:20 ` Glauber de Oliveira Costa
@ 2007-11-08 16:39 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 14+ messages in thread
From: Jeremy Fitzhardinge @ 2007-11-08 16:39 UTC (permalink / raw)
To: Glauber de Oliveira Costa
Cc: LKML, Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Jeremy Fitzhardinge
Glauber de Oliveira Costa wrote:
>> - /* Map with big pages if possible, otherwise create normal page tables. */
>> + /* Map with big pages if possible, otherwise
>> + create normal page tables. */
>>
> /*
> * multi comment lines are preferred
> * this way, IIRC ;-)
> */
>
I don't think it's worth turning a one-liner into a great big banner.
J
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH RFC 3/7] x86: clean up asm-x86/page*.h
2007-11-08 1:50 [PATCH RFC 0/7] Unify asm-x86/pgtable.h and page.h Jeremy Fitzhardinge
2007-11-08 1:50 ` [PATCH RFC 1/7] x86: kill mk_pte_huge Jeremy Fitzhardinge
2007-11-08 1:50 ` [PATCH RFC 2/7] x86: clean up mm/init_32.c Jeremy Fitzhardinge
@ 2007-11-08 1:50 ` Jeremy Fitzhardinge
2007-11-08 16:42 ` Glauber de Oliveira Costa
2007-11-08 1:50 ` [PATCH RFC 4/7] x86: unify pgtable*.h Jeremy Fitzhardinge
` (3 subsequent siblings)
6 siblings, 1 reply; 14+ messages in thread
From: Jeremy Fitzhardinge @ 2007-11-08 1:50 UTC (permalink / raw)
To: LKML
Cc: Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Glauber de Oliveira Costa, Jeremy Fitzhardinge
[-- Attachment #1: x86-unify-page_h.patch --]
[-- Type: text/plain, Size: 8435 bytes --]
Unify common definitions in page*.h. To simplify other code, I added
typedefs for the value of pte/pmd/pud/pgd values, so they can be used
symbolically elsewhere without needing to have lots of 32/64/PAE
tests.
Also, add PAGETABLE_LEVELS define so that other definitions can test
for it directly rather than using indirect 32/64/PAE tests.
Signed-off-by: Jeremy Fitzhardinge <Jeremy.Fitzhardinge@citrix.com>
---
include/asm-x86/page.h | 49 ++++++++++++++++++++++------
include/asm-x86/page_32.h | 77 +++++++++++++++++++++++++--------------------
include/asm-x86/page_64.h | 37 +++++++--------------
3 files changed, 95 insertions(+), 68 deletions(-)
===================================================================
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -1,13 +1,42 @@
+#ifndef _ASM_X86_PAGE_H
+#define _ASM_X86_PAGE_H
+
+#include <linux/const.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
+
+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
+
#ifdef __KERNEL__
-# ifdef CONFIG_X86_32
-# include "page_32.h"
-# else
-# include "page_64.h"
-# endif
+
+#ifdef CONFIG_X86_32
+# include "page_32.h"
#else
-# ifdef __i386__
-# include "page_32.h"
-# else
-# include "page_64.h"
-# endif
+# include "page_64.h"
#endif
+
+#ifndef CONFIG_PARAVIRT
+#define pgd_val(x) native_pgd_val(x)
+#define __pgd(x) native_make_pgd(x)
+
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_val(x) native_pud_val(x)
+#define __pud(x) native_make_pud(x)
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pmd_val(x) native_pmd_val(x)
+#define __pmd(x) native_make_pmd(x)
+#endif
+
+#define pte_val(x) native_pte_val(x)
+#define __pte(x) native_make_pte(x)
+#endif /* CONFIG_PARAVIRT */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_X86_PAGE_H */
===================================================================
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -1,16 +1,13 @@
#ifndef _I386_PAGE_H
#define _I386_PAGE_H
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#ifndef _ASM_X86_PAGE_H
+#error Include asm/page.h
+#endif
-#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
+#ifndef __ASSEMBLY__
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
+#include <linux/types.h>
#ifdef CONFIG_X86_USE_3DNOW
@@ -43,71 +40,86 @@
*/
extern int nx_enabled;
+/* macro to avoid #include hell */
+#define native_pud_val(pud) native_pgd_val((pud).pgd)
+
#ifdef CONFIG_X86_PAE
+#define PAGETABLE_LEVELS 3
+
+typedef u64 pteval_t;
+typedef u64 pmdval_t;
+typedef u64 pudval_t;
+typedef u64 pgdval_t;
+
typedef struct { unsigned long pte_low, pte_high; } pte_t;
-typedef struct { unsigned long long pmd; } pmd_t;
-typedef struct { unsigned long long pgd; } pgd_t;
+typedef struct { pmdval_t pmd; } pmd_t;
+typedef struct { pgdval_t pgd; } pgd_t;
typedef struct { unsigned long long pgprot; } pgprot_t;
-static inline unsigned long long native_pgd_val(pgd_t pgd)
+static inline pgdval_t native_pgd_val(pgd_t pgd)
{
return pgd.pgd;
}
-static inline unsigned long long native_pmd_val(pmd_t pmd)
+static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return pmd.pmd;
}
-static inline unsigned long long native_pte_val(pte_t pte)
+static inline pteval_t native_pte_val(pte_t pte)
{
return pte.pte_low | ((unsigned long long)pte.pte_high << 32);
}
-static inline pgd_t native_make_pgd(unsigned long long val)
+static inline pgd_t native_make_pgd(pgdval_t val)
{
return (pgd_t) { val };
}
-static inline pmd_t native_make_pmd(unsigned long long val)
+static inline pmd_t native_make_pmd(pmdval_t val)
{
return (pmd_t) { val };
}
-static inline pte_t native_make_pte(unsigned long long val)
+static inline pte_t native_make_pte(pteval_t val)
{
return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ;
}
-#ifndef CONFIG_PARAVIRT
-#define pmd_val(x) native_pmd_val(x)
-#define __pmd(x) native_make_pmd(x)
-#endif
-
#define HPAGE_SHIFT 21
#include <asm-generic/pgtable-nopud.h>
#else /* !CONFIG_X86_PAE */
+
+#define PAGETABLE_LEVELS 2
+
+typedef u32 pteval_t;
+typedef u32 pmdval_t;
+typedef u32 pgdval_t;
+
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define boot_pte_t pte_t /* or would you rather have a typedef */
-static inline unsigned long native_pgd_val(pgd_t pgd)
+static inline pgdval_t native_pgd_val(pgd_t pgd)
{
return pgd.pgd;
}
-static inline unsigned long native_pte_val(pte_t pte)
+static inline pteval_t native_pte_val(pte_t pte)
{
return pte.pte_low;
}
-static inline pgd_t native_make_pgd(unsigned long val)
+/* macro to avoid #include hell */
+#define native_pmd_val(pmd) native_pud_val((pmd).pud)
+
+static inline pgd_t native_make_pgd(pgdval_t val)
{
return (pgd_t) { val };
}
-static inline pte_t native_make_pte(unsigned long val)
+static inline pte_t native_make_pte(pteval_t val)
{
return (pte_t) { .pte_low = val };
}
@@ -127,13 +139,6 @@ static inline pte_t native_make_pte(unsi
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
-
-#ifndef CONFIG_PARAVIRT
-#define pgd_val(x) native_pgd_val(x)
-#define __pgd(x) native_make_pgd(x)
-#define pte_val(x) native_pte_val(x)
-#define __pte(x) native_make_pte(x)
-#endif
#endif /* !__ASSEMBLY__ */
@@ -175,6 +180,13 @@ extern int page_is_ram(unsigned long pag
#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
#endif
+#ifdef CONFIG_X86_PAE
+#define __PHYSICAL_MASK_SHIFT 36
+#else
+#define __PHYSICAL_MASK_SHIFT 32
+#endif
+
+#define __PHYSICAL_MASK ((_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1)
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
@@ -201,6 +213,5 @@ extern int page_is_ram(unsigned long pag
#include <asm-generic/page.h>
#define __HAVE_ARCH_GATE_AREA 1
-#endif /* __KERNEL__ */
#endif /* _I386_PAGE_H */
===================================================================
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -1,13 +1,13 @@
#ifndef _X86_64_PAGE_H
#define _X86_64_PAGE_H
+#ifndef _ASM_X86_PAGE_H
+#error Include asm/page.h
+#endif
+
#include <linux/const.h>
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
+#define PAGETABLE_LEVELS 4
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
@@ -29,16 +29,14 @@
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
-#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
-#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
-
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
extern unsigned long end_pfn;
@@ -59,6 +57,11 @@ typedef struct { unsigned long pud; } pu
typedef struct { unsigned long pud; } pud_t;
typedef struct { unsigned long pgd; } pgd_t;
#define PTE_MASK PHYSICAL_PAGE_MASK
+
+typedef u64 pteval_t;
+typedef u64 pmdval_t;
+typedef u64 pudval_t;
+typedef u64 pgdval_t;
typedef struct { unsigned long pgprot; } pgprot_t;
@@ -104,20 +107,6 @@ static inline pgd_t native_make_pgd(unsi
{
return (pgd_t){ pgd };
}
-
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else
-#define pte_val(x) native_pte_val(x)
-#define pmd_val(x) native_pmd_val(x)
-#define pud_val(x) native_pud_val(x)
-#define pgd_val(x) native_pgd_val(x)
-
-#define __pte(x) ((pte_t) { (x) } )
-#define __pmd(x) ((pmd_t) { (x) } )
-#define __pud(x) ((pud_t) { (x) } )
-#define __pgd(x) ((pgd_t) { (x) } )
-#endif /* CONFIG_PARAVIRT */
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
@@ -185,6 +174,4 @@ extern unsigned long __phys_addr(unsigne
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
-
#endif /* _X86_64_PAGE_H */
--
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH RFC 3/7] x86: clean up asm-x86/page*.h
2007-11-08 1:50 ` [PATCH RFC 3/7] x86: clean up asm-x86/page*.h Jeremy Fitzhardinge
@ 2007-11-08 16:42 ` Glauber de Oliveira Costa
2007-11-08 20:59 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 14+ messages in thread
From: Glauber de Oliveira Costa @ 2007-11-08 16:42 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: LKML, Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Jeremy Fitzhardinge
On Nov 7, 2007 11:50 PM, Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> +#define PAGETABLE_LEVELS 3
> +
> +typedef u64 pteval_t;
> +typedef u64 pmdval_t;
> +typedef u64 pudval_t;
> +typedef u64 pgdval_t;
> +
> -static inline unsigned long long native_pgd_val(pgd_t pgd)
> +static inline pgdval_t native_pgd_val(pgd_t pgd)
> {
Maybe these kind of things, the typedef and native_xxx definitions can
go into the common header, after we define the PAGETABLE_LEVELS
constant?
I think the more goes into common headers, the better.
> -static inline pte_t native_make_pte(unsigned long long val)
> +static inline pte_t native_make_pte(pteval_t val)
> {
> return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ;
> }
Although these make_xxx things can probably be just left here...
--
Glauber de Oliveira Costa.
"Free as in Freedom"
http://glommer.net
"The less confident you are, the more serious you have to act."
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH RFC 3/7] x86: clean up asm-x86/page*.h
2007-11-08 16:42 ` Glauber de Oliveira Costa
@ 2007-11-08 20:59 ` Jeremy Fitzhardinge
2007-11-08 22:38 ` Glauber de Oliveira Costa
0 siblings, 1 reply; 14+ messages in thread
From: Jeremy Fitzhardinge @ 2007-11-08 20:59 UTC (permalink / raw)
To: Glauber de Oliveira Costa
Cc: LKML, Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Jeremy Fitzhardinge
Glauber de Oliveira Costa wrote:
> On Nov 7, 2007 11:50 PM, Jeremy Fitzhardinge <jeremy@goop.org> wrote:
>
>> +#define PAGETABLE_LEVELS 3
>> +
>> +typedef u64 pteval_t;
>> +typedef u64 pmdval_t;
>> +typedef u64 pudval_t;
>> +typedef u64 pgdval_t;
>> +
>>
>
>
>> -static inline unsigned long long native_pgd_val(pgd_t pgd)
>> +static inline pgdval_t native_pgd_val(pgd_t pgd)
>> {
>>
> Maybe these kind of things, the typedef and native_xxx definitions can
> go into the common header, after we define the PAGETABLE_LEVELS
> constant?
> I think the more goes into common headers, the better.
>
You mean put them in a common header, but conditionally by #if
PAGETABLE_LEVELS? I don't think that would be much of an improvement;
it would just add more #ifs, which adds lines and conceptual
complexity. If you go that way, you may as well put everything in one
header wrapped in #ifs, but personally I don't think that would help.
J
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH RFC 3/7] x86: clean up asm-x86/page*.h
2007-11-08 20:59 ` Jeremy Fitzhardinge
@ 2007-11-08 22:38 ` Glauber de Oliveira Costa
2007-11-08 23:03 ` Jeremy Fitzhardinge
0 siblings, 1 reply; 14+ messages in thread
From: Glauber de Oliveira Costa @ 2007-11-08 22:38 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: LKML, Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Jeremy Fitzhardinge
On Nov 8, 2007 6:59 PM, Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> Glauber de Oliveira Costa wrote:
> > On Nov 7, 2007 11:50 PM, Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> >
> >> +#define PAGETABLE_LEVELS 3
> >> +
> >> +typedef u64 pteval_t;
> >> +typedef u64 pmdval_t;
> >> +typedef u64 pudval_t;
> >> +typedef u64 pgdval_t;
> >> +
> >>
> >
> >
> >> -static inline unsigned long long native_pgd_val(pgd_t pgd)
> >> +static inline pgdval_t native_pgd_val(pgd_t pgd)
> >> {
> >>
> > Maybe these kind of things, the typedef and native_xxx definitions can
> > go into the common header, after we define the PAGETABLE_LEVELS
> > constant?
> > I think the more goes into common headers, the better.
> >
>
> You mean put them in a common header, but conditionally by #if
> PAGETABLE_LEVELS? I don't think that would be much of an improvement;
> it would just add more #ifs, which adds lines and conceptual
> complexity. If you go that way, you may as well put everything in one
> header wrapped in #ifs, but personally I don't think that would help.
Not exactly.
for the native_ functions, there's room for code sharing.
native_pgd_val, and native_pte_val seem to be the same, for at least
pae and x86_64.
As for the typedefs, the same thing can be done. Much like you did in
paravirt.h, just split out between the < 3 and >= 3 levels.
But if it turns out to be just code movement, and I'm wrong in my
supposition that we can turn three variants of the same code into two,
then I agree
with you, let's keep it this way.
--
Glauber de Oliveira Costa.
"Free as in Freedom"
http://glommer.net
"The less confident you are, the more serious you have to act."
^ permalink raw reply [flat|nested] 14+ messages in thread* Re: [PATCH RFC 3/7] x86: clean up asm-x86/page*.h
2007-11-08 22:38 ` Glauber de Oliveira Costa
@ 2007-11-08 23:03 ` Jeremy Fitzhardinge
0 siblings, 0 replies; 14+ messages in thread
From: Jeremy Fitzhardinge @ 2007-11-08 23:03 UTC (permalink / raw)
To: Glauber de Oliveira Costa
Cc: Jeremy Fitzhardinge, LKML, Andi Kleen, Ingo Molnar,
Thomas Gleixner, Zach Amsden
Glauber de Oliveira Costa wrote:
> Not exactly.
> for the native_ functions, there's room for code sharing.
> native_pgd_val, and native_pte_val seem to be the same, for at least
> pae and x86_64.
> As for the typedefs, the same thing can be done. Much like you did in
> paravirt.h, just split out between the < 3 and >= 3 levels.
Yeah, I see what you mean. I'll play with it and see how it turns out.
J
^ permalink raw reply [flat|nested] 14+ messages in thread
* [PATCH RFC 4/7] x86: unify pgtable*.h
2007-11-08 1:50 [PATCH RFC 0/7] Unify asm-x86/pgtable.h and page.h Jeremy Fitzhardinge
` (2 preceding siblings ...)
2007-11-08 1:50 ` [PATCH RFC 3/7] x86: clean up asm-x86/page*.h Jeremy Fitzhardinge
@ 2007-11-08 1:50 ` Jeremy Fitzhardinge
2007-11-08 1:50 ` [PATCH RFC 5/7] x86: simplify pagetable-related operationsin paravirt.h Jeremy Fitzhardinge
` (2 subsequent siblings)
6 siblings, 0 replies; 14+ messages in thread
From: Jeremy Fitzhardinge @ 2007-11-08 1:50 UTC (permalink / raw)
To: LKML
Cc: Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Glauber de Oliveira Costa, Jeremy Fitzhardinge
[-- Attachment #1: x86-unify-pgtable_h.patch --]
[-- Type: text/plain, Size: 37775 bytes --]
All x86 modes and architectures have very similar pagetable
structures: the page flags, the accessors for testing/setting them,
and the combinations of page flags used for kernel and usermode
mappings are all the same. The main difference is between 32 and
64-bit pagetable entries, with the latter supporting the NX bit.
The most significant difference between the modes/architectures is the
number of levels in the pagetable (4 for 64-bit, 3 for 32-bit/PAE, 2
for non-PAE 32-bit). This accounts for the remaining code in the
various mode-specific headers.
I've tried to avoid changing formatting as much as possible, so that
the code motion is more obvious. A subsequent patch will clean things
up in place.
Signed-off-by: Jeremy Fitzhardinge <Jeremy.Fitzhardinge@citrix.com>
---
include/asm-x86/pgtable-2level.h | 21 --
include/asm-x86/pgtable-3level.h | 40 ----
include/asm-x86/pgtable.h | 318 ++++++++++++++++++++++++++++++++++++++
include/asm-x86/pgtable_32.h | 204 ------------------------
include/asm-x86/pgtable_64.h | 225 --------------------------
5 files changed, 331 insertions(+), 477 deletions(-)
===================================================================
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -24,16 +24,13 @@ static inline void native_set_pmd(pmd_t
{
*pmdp = pmd;
}
-#ifndef CONFIG_PARAVIRT
-#define set_pte(pteptr, pteval) native_set_pte(pteptr, pteval)
-#define set_pte_at(mm,addr,ptep,pteval) native_set_pte_at(mm, addr, ptep, pteval)
-#define set_pmd(pmdptr, pmdval) native_set_pmd(pmdptr, pmdval)
-#endif
+#undef set_pte_atomic
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+#undef pmd_clear
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
@@ -50,12 +47,6 @@ static inline pte_t native_ptep_get_and_
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_none(x) (!(x).pte_low)
-#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-
/*
* All present pages are kernel-executable:
*/
@@ -64,17 +55,13 @@ static inline int pte_exec_kernel(pte_t
return 1;
}
+#define __supported_pte_mask (~0ul)
+
/*
* Bits 0, 6 and 7 are taken, split up the 29 bits of offset
* into this range:
*/
#define PTE_FILE_MAX_BITS 29
-
-#define pte_to_pgoff(pte) \
- ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 1) & 0x1f)
===================================================================
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -94,17 +94,6 @@ static inline void native_pmd_clear(pmd_
*(tmp + 1) = 0;
}
-#ifndef CONFIG_PARAVIRT
-#define set_pte(ptep, pte) native_set_pte(ptep, pte)
-#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
-#define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte)
-#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte)
-#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
-#define set_pud(pudp, pud) native_set_pud(pudp, pud)
-#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
-#define pmd_clear(pmd) native_pmd_clear(pmd)
-#endif
-
/*
* Pentium-II erratum A13: in PAE mode we explicitly have to flush
* the TLB via cr3 if the top-level pgd is changed...
@@ -119,10 +108,6 @@ static inline void pud_clear (pud_t * pu
#define pud_page_vaddr(pud) \
((unsigned long) __va(pud_val(pud) & PAGE_MASK))
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
- pmd_index(address))
#ifdef CONFIG_SMP
static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
@@ -146,38 +131,13 @@ static inline int pte_same(pte_t a, pte_
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
}
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-
-static inline int pte_none(pte_t pte)
-{
- return !pte.pte_low && !pte.pte_high;
-}
-
-static inline unsigned long pte_pfn(pte_t pte)
-{
- return pte_val(pte) >> PAGE_SHIFT;
-}
-
extern unsigned long long __supported_pte_mask;
-static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-{
- return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
- pgprot_val(pgprot)) & __supported_pte_mask);
-}
-
-static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
-{
- return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
- pgprot_val(pgprot)) & __supported_pte_mask);
-}
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
*/
-#define pte_to_pgoff(pte) ((pte).pte_high)
-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
#define PTE_FILE_MAX_BITS 32
/* Encode and de-code a swap entry */
===================================================================
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -1,5 +1,323 @@
+#ifndef _ASM_X86_PGTABLE_H
+#define _ASM_X86_PGTABLE_H
+
+#include <linux/const.h>
+#include <asm/page.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+void paging_init(void);
+
+#endif /* !__ASSEMBLY__ */
+
+
+/*
+ * _PAGE_PSE set in the page directory entry just means that
+ * the page directory entry points directly to a 4MB-aligned block of
+ * memory.
+ */
+#define _PAGE_BIT_PRESENT 0
+#define _PAGE_BIT_RW 1
+#define _PAGE_BIT_USER 2
+#define _PAGE_BIT_PWT 3
+#define _PAGE_BIT_PCD 4
+#define _PAGE_BIT_ACCESSED 5
+#define _PAGE_BIT_DIRTY 6
+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
+#define _PAGE_BIT_UNUSED2 10
+#define _PAGE_BIT_UNUSED3 11
+#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+
+#define _PAGE_PRESENT (_AC(1,UL) << _PAGE_BIT_PRESENT)
+#define _PAGE_RW (_AC(1,UL) << _PAGE_BIT_RW)
+#define _PAGE_USER (_AC(1,UL) << _PAGE_BIT_USER)
+#define _PAGE_PWT (_AC(1,UL) << _PAGE_BIT_PWT)
+#define _PAGE_PCD (_AC(1,UL) << _PAGE_BIT_PCD)
+#define _PAGE_ACCESSED (_AC(1,UL) << _PAGE_BIT_ACCESSED)
+#define _PAGE_DIRTY (_AC(1,UL) << _PAGE_BIT_DIRTY)
+#define _PAGE_PSE (_AC(1,UL) << _PAGE_BIT_PSE)
+#define _PAGE_GLOBAL (_AC(1,UL) << _PAGE_BIT_GLOBAL)
+#define _PAGE_UNUSED1 (_AC(1,UL) << _PAGE_BIT_UNUSED1)
+#define _PAGE_UNUSED2 (_AC(1,UL) << _PAGE_BIT_UNUSED2)
+#define _PAGE_UNUSED3 (_AC(1,UL) << _PAGE_BIT_UNUSED3)
+
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+#define _PAGE_NX (_AC(1,ULL) << _PAGE_BIT_NX)
+#else
+#define _PAGE_NX 0
+#endif
+
+/* If _PAGE_PRESENT is clear, we use these: */
+#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */
+#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
+ pte_present gives true */
+
+#ifndef __ASSEMBLY__
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define set_pte(ptep, pte) native_set_pte(ptep, pte)
+#define set_pte_at(mm, addr, ptep, pte) \
+ native_set_pte_at(mm, addr, ptep, pte)
+
+#define set_pte_present(mm, addr, ptep, pte) \
+ native_set_pte_present(mm, addr, ptep, pte)
+#define set_pte_atomic(ptep, pte) \
+ native_set_pte_atomic(ptep, pte)
+
+#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
+
+#ifndef __PAGETABLE_PUD_FOLDED
+#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
+#define set_pud(pudp, pud) native_set_pud(pudp, pud)
+#define pgd_clear(pgd) native_pgd_clear(pgd)
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pud_clear(pud) native_pud_clear(pud)
+#endif
+
+#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
+#define pmd_clear(pmd) native_pmd_clear(pmd)
+
+#define pte_update(mm, addr, ptep) do { } while (0)
+#define pte_update_defer(mm, addr, ptep) do { } while (0)
+
+#endif /* CONFIG_PARAVIRT */
+
#ifdef CONFIG_X86_32
# include "pgtable_32.h"
#else
# include "pgtable_64.h"
#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#endif
+
+#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
+
+#define FIRST_USER_ADDRESS 0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define _PAGE_TABLE \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK \
+ (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#ifndef __ASSEMBLY__
+
+#define PAGE_NONE \
+ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED \
+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+
+#define PAGE_SHARED_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY \
+ PAGE_COPY_NOEXEC
+#define PAGE_READONLY \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#endif /* __ASSEMBLY__ */
+
+#define _PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+#define _PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+
+#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
+#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
+#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+#define __PAGE_KERNEL_VSYSCALL \
+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
+
+
+#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
+#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
+#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
+
+/*
+ * Without NX, the i386 can't do page protection for execute, and
+ * considers that the same are read. Also, write permissions imply
+ * read permissions. This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+#ifndef __ASSEMBLY__
+
+static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
+static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; }
+static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; }
+
+#ifndef __PAGETABLE_PUD_FOLDED
+static inline bool pgd_bad(pgd_t pgd)
+{
+ return (pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
+}
+
+static inline bool pud_bad(pud_t pud)
+{
+ return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
+}
+#endif
+
+static inline bool pmd_bad(pmd_t pmd)
+{
+ return (pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
+}
+
+/* Change flags of a PTE */
+static inline pte_t pte_modify(pte_t pte_old, pgprot_t newprot)
+{
+ pteval_t pte = pte_val(pte_old);
+ pte &= _PAGE_CHG_MASK;
+ pte |= pgprot_val(newprot);
+ pte &= __supported_pte_mask;
+ return __pte(pte);
+}
+
+/* page, protection -> pte */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline int pmd_large(pmd_t pte)
+{
+ return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+ ((_PAGE_PSE | _PAGE_PRESENT));
+}
+
+/* PMD - Level 2 access */
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+#ifndef __PAGETABLE_PMD_FOLDED
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir, address) \
+ ((pmd_t *) pud_page_vaddr(*(dir)) + pmd_index(address))
+#endif
+
+/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
+#define pmd_none(x) (!(unsigned long)pmd_val(x))
+#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+
+static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+{
+ return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
+ pgprot_val(pgprot)) & __supported_pte_mask);
+}
+#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+
+#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+
+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
+
+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+ unsigned long pte;
+ pte = (page_nr << PAGE_SHIFT);
+ pte |= pgprot_val(pgprot);
+ pte &= __supported_pte_mask;
+ return __pte(pte);
+}
+
+#include <asm-generic/pgtable.h>
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_PGTABLE_H */
===================================================================
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -25,12 +25,6 @@ struct mm_struct;
struct mm_struct;
struct vm_area_struct;
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-extern unsigned long empty_zero_page[1024];
extern pgd_t swapper_pg_dir[1024];
extern struct kmem_cache *pmd_cache;
extern spinlock_t pgd_lock;
@@ -39,8 +33,8 @@ void check_pgt_cache(void);
void pmd_ctor(struct kmem_cache *, void *);
void pgtable_cache_init(void);
-void paging_init(void);
+extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
/*
* The Linux x86 paging architecture is 'compile-time dual-mode', it
@@ -49,20 +43,9 @@ void paging_init(void);
*/
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level-defs.h>
-# define PMD_SIZE (1UL << PMD_SHIFT)
-# define PMD_MASK (~(PMD_SIZE-1))
#else
# include <asm/pgtable-2level-defs.h>
#endif
-
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
-
-#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
-#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
#define TWOLEVEL_PGDIR_SHIFT 22
#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
@@ -84,112 +67,6 @@ void paging_init(void);
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#endif
-/*
- * _PAGE_PSE set in the page directory entry just means that
- * the page directory entry points directly to a 4MB-aligned block of
- * memory.
- */
-#define _PAGE_BIT_PRESENT 0
-#define _PAGE_BIT_RW 1
-#define _PAGE_BIT_USER 2
-#define _PAGE_BIT_PWT 3
-#define _PAGE_BIT_PCD 4
-#define _PAGE_BIT_ACCESSED 5
-#define _PAGE_BIT_DIRTY 6
-#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
-#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
-#define _PAGE_BIT_UNUSED2 10
-#define _PAGE_BIT_UNUSED3 11
-#define _PAGE_BIT_NX 63
-
-#define _PAGE_PRESENT 0x001
-#define _PAGE_RW 0x002
-#define _PAGE_USER 0x004
-#define _PAGE_PWT 0x008
-#define _PAGE_PCD 0x010
-#define _PAGE_ACCESSED 0x020
-#define _PAGE_DIRTY 0x040
-#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
-#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
-#define _PAGE_UNUSED1 0x200 /* available for programmer */
-#define _PAGE_UNUSED2 0x400
-#define _PAGE_UNUSED3 0x800
-
-/* If _PAGE_PRESENT is clear, we use these: */
-#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
-#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
- pte_present gives true */
-#ifdef CONFIG_X86_PAE
-#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
-#else
-#define _PAGE_NX 0
-#endif
-
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define PAGE_NONE \
- __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED \
- __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-
-#define PAGE_SHARED_EXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY_EXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY \
- PAGE_COPY_NOEXEC
-#define PAGE_READONLY \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC \
- __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-
-#define _PAGE_KERNEL \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-#define _PAGE_KERNEL_EXEC \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-
-extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
-#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
-#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
-#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
-#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
-#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-
-#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
-#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
-#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
-#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
-
-/*
- * The i386 can't do page protection for execute, and considers that
- * the same are read. Also, write permissions imply read permissions.
- * This is the closest we can get..
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_EXEC
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
/*
* Define this if things work differently on an i386 and an i486:
@@ -201,37 +78,6 @@ extern unsigned long long __PAGE_KERNEL,
/* The boot page tables (all created as a single array) */
extern unsigned long pg0[];
-#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-
-/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
-#define pmd_none(x) (!(unsigned long)pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-
-
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
-static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
-
-/*
- * The following only works if pte_present() is not true.
- */
-static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
-
-static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
@@ -365,27 +211,6 @@ static inline void clone_pgd_range(pgd_t
* and a page entry and page directory to the page they refer to.
*/
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
- pte.pte_low &= _PAGE_CHG_MASK;
- pte.pte_low |= pgprot_val(newprot);
-#ifdef CONFIG_X86_PAE
- /*
- * Chop off the NX bit (if present), and add the NX portion of
- * the newprot (if present):
- */
- pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
- pte.pte_high |= (pgprot_val(newprot) >> 32) & \
- (__supported_pte_mask >> 32);
-#endif
- return pte;
-}
-
-#define pmd_large(pmd) \
-((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
-
/*
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
*
@@ -406,31 +231,6 @@ static inline pte_t pte_modify(pte_t pte
* of a process's
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/*
- * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- *
- * this macro returns the index of the entry in the pmd page which would
- * control the given virtual address
- */
-#define pmd_index(address) \
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this macro returns the index of the entry in the pte page which would
- * control the given virtual address
- */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
-
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-
-#define pmd_page_vaddr(pmd) \
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
/*
* Helper function that returns the kernel pagetable entry controlling
@@ -504,6 +304,4 @@ static inline void paravirt_pagetable_se
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#include <asm-generic/pgtable.h>
-
#endif /* _I386_PGTABLE_H */
===================================================================
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -21,17 +21,14 @@ extern unsigned long __supported_pte_mas
#define swapper_pg_dir init_level4_pgt
-extern void paging_init(void);
extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+#endif /* !__ASSEMBLY__ */
-#endif /* !__ASSEMBLY__ */
+#define __PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | _PAGE_GLOBAL)
+#define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
@@ -56,28 +53,6 @@ extern unsigned long empty_zero_page[PAG
* entries per page directory level
*/
#define PTRS_PER_PTE 512
-
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else
-
-#define set_pte native_set_pte
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
-#define set_pmd native_set_pmd
-#define set_pud native_set_pud
-#define set_pgd native_set_pgd
-#define pte_clear(mm, addr, xp) \
-do { \
- set_pte_at(mm, addr, xp, __pte(0)); \
-} while (0)
-
-#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-#define pud_clear native_pud_clear
-#define pgd_clear native_pgd_clear
-#define pte_update(mm, addr, ptep) do { } while (0)
-#define pte_update_defer(mm, addr, ptep) do { } while (0)
-
-#endif
#ifndef __ASSEMBLY__
@@ -108,7 +83,7 @@ static inline void native_pud_clear(pud_
static inline void native_pgd_clear(pgd_t *pgd)
{
- set_pgd(pgd, __pgd(0));
+ native_set_pgd(pgd, __pgd(0));
}
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
@@ -175,16 +150,6 @@ static inline pte_t ptep_get_and_clear_f
#endif /* !__ASSEMBLY__ */
-#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE-1))
-#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
-#define FIRST_USER_ADDRESS 0
-
#define MAXMEM _AC(0x3fffffffffff, UL)
#define VMALLOC_START _AC(0xffffc20000000000, UL)
#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
@@ -193,145 +158,7 @@ static inline pte_t ptep_get_and_clear_f
#define MODULES_END _AC(0xfffffffffff00000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
-#define _PAGE_BIT_PRESENT 0
-#define _PAGE_BIT_RW 1
-#define _PAGE_BIT_USER 2
-#define _PAGE_BIT_PWT 3
-#define _PAGE_BIT_PCD 4
-#define _PAGE_BIT_ACCESSED 5
-#define _PAGE_BIT_DIRTY 6
-#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
-#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
-#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
-
-#define _PAGE_PRESENT 0x001
-#define _PAGE_RW 0x002
-#define _PAGE_USER 0x004
-#define _PAGE_PWT 0x008
-#define _PAGE_PCD 0x010
-#define _PAGE_ACCESSED 0x020
-#define _PAGE_DIRTY 0x040
-#define _PAGE_PSE 0x080 /* 2MB page */
-#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
-#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
-
-#define _PAGE_PROTNONE 0x080 /* If not present */
-#define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX)
-
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY PAGE_COPY_NOEXEC
-#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define __PAGE_KERNEL \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-#define __PAGE_KERNEL_EXEC \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_NOCACHE \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX)
-#define __PAGE_KERNEL_RO \
- (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
-#define __PAGE_KERNEL_VSYSCALL \
- (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
- (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
-#define __PAGE_KERNEL_LARGE \
- (__PAGE_KERNEL | _PAGE_PSE)
-#define __PAGE_KERNEL_LARGE_EXEC \
- (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-
-#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
-
-#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
-#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
-#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
-#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
-#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
-#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
-
-/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_EXEC
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
#ifndef __ASSEMBLY__
-
-static inline unsigned long pgd_bad(pgd_t pgd)
-{
- return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
-}
-
-static inline unsigned long pud_bad(pud_t pud)
-{
- return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
-}
-
-static inline unsigned long pmd_bad(pmd_t pmd)
-{
- return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
-}
-
-#define pte_none(x) (!pte_val(x))
-#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
- right? */
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-
-static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
-{
- unsigned long pte;
- pte = (page_nr << PAGE_SHIFT);
- pte |= pgprot_val(pgprot);
- pte &= __supported_pte_mask;
- return __pte(pte);
-}
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
-
-static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
-static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; }
-static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; }
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -349,7 +176,7 @@ static inline pte_t pte_clrhuge(pte_t pt
#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
#define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE })
-/* PUD - Level3 access */
+/* PUD - Level 3 access */
/* to find an entry in a page-table-directory. */
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
@@ -357,26 +184,9 @@ static inline pte_t pte_clrhuge(pte_t pt
#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
-/* PMD - Level 2 access */
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
-#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-#define pmd_offset(dir, address) ((pmd_t *) pud_page_vaddr(*(dir)) + \
- pmd_index(address))
-#define pmd_none(x) (!pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
-#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
-#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-
-#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
/* PTE - Level 1 access. */
-
-/* page, protection -> pte */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
#include <linux/mm_types.h>
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
@@ -402,25 +212,6 @@ static inline void ptep_set_wrprotect(st
*/
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
-static inline int pmd_large(pmd_t pte)
-{
- return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
-}
-
-/* Change flags of a PTE */
-static inline pte_t pte_modify(pte_t pte_old, pgprot_t newprot)
-{
- unsigned long pte = pte_val(pte_old);
- pte &= _PAGE_CHG_MASK;
- pte |= pgprot_val(newprot);
- pte &= __supported_pte_mask;
- return __pte(pte);
-}
-
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
- pte_index(address))
/* x86-64 always has all page tables mapped. */
#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
@@ -482,7 +273,7 @@ pte_t *lookup_address(unsigned long addr
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
-#include <asm-generic/pgtable.h>
+
#endif /* !__ASSEMBLY__ */
#endif /* _X86_64_PGTABLE_H */
--
^ permalink raw reply [flat|nested] 14+ messages in thread* [PATCH RFC 5/7] x86: simplify pagetable-related operationsin paravirt.h
2007-11-08 1:50 [PATCH RFC 0/7] Unify asm-x86/pgtable.h and page.h Jeremy Fitzhardinge
` (3 preceding siblings ...)
2007-11-08 1:50 ` [PATCH RFC 4/7] x86: unify pgtable*.h Jeremy Fitzhardinge
@ 2007-11-08 1:50 ` Jeremy Fitzhardinge
2007-11-08 1:50 ` [PATCH RFC 6/7] x86/xen: simplify Xen mmu operations Jeremy Fitzhardinge
2007-11-08 1:50 ` [PATCH RFC 7/7] x86: fix up formatting in pgtable*.h Jeremy Fitzhardinge
6 siblings, 0 replies; 14+ messages in thread
From: Jeremy Fitzhardinge @ 2007-11-08 1:50 UTC (permalink / raw)
To: LKML
Cc: Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Glauber de Oliveira Costa, Jeremy Fitzhardinge
[-- Attachment #1: x86-simplify-paravirt_h.patch --]
[-- Type: text/plain, Size: 12786 bytes --]
Simplify paravirt.h using the unified page/pgtable.h infrastructure.
This removes a fair amount of duplication of the ops function pointers
themselves, but also of PVOP_*CALL* wrappers.
The wrappers are complicated by the fact that on a 32-bit PAE system,
literal 64-bit values are passed in two arguments, and so a different
form of the call must be used compared to 64-bit or 32-bit non-PAE,
where all the arguments are less than or equal to the native register
size.
The code chooses the appropriate form to use by using the compile-time
comparison of sizeof(pteval_t) and sizeof(unsigned long). This does
not need to be done for calls which are either PAE or 64-bit specific.
Signed-off-by: Jeremy Fitzhardinge <Jeremy.Fitzhardinge@citrix.com>
---
include/asm-x86/paravirt.h | 324 +++++++++++++++++++-------------------------
1 file changed, 141 insertions(+), 183 deletions(-)
===================================================================
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -240,40 +240,38 @@ struct pv_mmu_ops {
void (*pte_update_defer)(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
+ pteval_t (*pte_val)(pte_t);
+ pgdval_t (*pgd_val)(pgd_t);
+
+ pte_t (*make_pte)(pteval_t pte);
+ pgd_t (*make_pgd)(pgdval_t pgd);
+
+#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
#endif
-#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
- void (*set_pud)(pud_t *pudp, pud_t pudval);
+
void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
void (*pmd_clear)(pmd_t *pmdp);
- unsigned long long (*pte_val)(pte_t);
- unsigned long long (*pmd_val)(pmd_t);
- unsigned long long (*pgd_val)(pgd_t);
+ pmdval_t (*pmd_val)(pmd_t);
+ pmd_t (*make_pmd)(pmdval_t pmd);
- pte_t (*make_pte)(unsigned long long pte);
- pmd_t (*make_pmd)(unsigned long long pmd);
- pgd_t (*make_pgd)(unsigned long long pgd);
- #ifdef CONFIG_X86_64
+ void (*set_pud)(pud_t *pudp, pud_t pudval);
+
+#if PAGETABLE_LEVELS == 4
void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
void (*pud_clear)(pud_t *pudp);
void (*pgd_clear)(pgd_t *pgdp);
- unsigned long long (*pud_val)(pud_t);
+ pudval_t (*pud_val)(pud_t);
- pud_t (*make_pud)(unsigned long long pud);
- #endif
-#else
- unsigned long (*pte_val)(pte_t);
- unsigned long (*pgd_val)(pgd_t);
-
- pte_t (*make_pte)(unsigned long pte);
- pgd_t (*make_pgd)(unsigned long pgd);
-#endif
+ pud_t (*make_pud)(pudval_t pud);
+#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* PAGETABLE_LEVELS >= 3 */
#ifdef CONFIG_HIGHPTE
void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
@@ -958,85 +956,137 @@ static inline void pte_update_defer(stru
PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
}
-#ifdef CONFIG_X86_PAE
-static inline pte_t __pte(unsigned long long val)
+/*
+ * Pagetable manipulators
+ *
+ * There are three cases to deal with:
+ * 32-bit processor, non-PAE: 2-level pagetable with 32-bit entries
+ * 32-bit processor, PAE: 3-level pagetable with 64-bit entries
+ * 64-bit processor: 4-level pagetable with 64-bit entries
+ *
+ * In 32-bit mode, passing 64-bit parameters must be done in two
+ * 32-bit chunks, so we need to use a separate PVOP_CALLx macro from
+ * either 64-bit mode or 32-bit/non-PAE.
+ *
+ * We rely on the predefined native_make_X/native_X_val to do
+ * packing/unpacking of the current pagetable type.
+ */
+static inline pte_t __pte(pteval_t val)
{
- unsigned long long ret = PVOP_CALL2(unsigned long long,
- pv_mmu_ops.make_pte,
- val, val >> 32);
- return (pte_t) { ret, ret >> 32 };
+ pteval_t ret;
+
+ if (sizeof(val) > sizeof(unsigned long))
+ ret = PVOP_CALL2(pteval_t, pv_mmu_ops.make_pte,
+ val, (u64)val>>32);
+ else
+ ret = PVOP_CALL1(pteval_t, pv_mmu_ops.make_pte, val);
+
+ return native_make_pte(ret);
}
-static inline pmd_t __pmd(unsigned long long val)
+static inline pteval_t pte_val(pte_t x)
{
- return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd,
- val, val >> 32) };
+ pteval_t val = native_pte_val(x);
+ if (sizeof(pteval_t) > sizeof(unsigned long))
+ return PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
+ val, (u64)val>>32);
+ else
+ return PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val, val);
}
-static inline pgd_t __pgd(unsigned long long val)
+static inline pgd_t __pgd(pgdval_t val)
{
- return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd,
- val, val >> 32) };
+ pgdval_t pgd;
+
+ if (sizeof(val) > sizeof(unsigned long))
+ pgd = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
+ val, (u64)val>>32);
+ else
+ pgd = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, val);
+
+ return native_make_pgd(pgd);
}
-static inline unsigned long long pte_val(pte_t x)
+static inline pgdval_t pgd_val(pgd_t x)
{
- return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val,
- x.pte_low, x.pte_high);
-}
-
-static inline unsigned long long pmd_val(pmd_t x)
-{
- return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val,
- x.pmd, x.pmd >> 32);
-}
-
-static inline unsigned long long pgd_val(pgd_t x)
-{
- return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val,
- x.pgd, x.pgd >> 32);
+ pgdval_t val = native_pgd_val(x);
+ if (sizeof(pgdval_t) > sizeof(unsigned long))
+ return PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
+ val, (u64)val>>32);
+ else
+ return PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, val);
}
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
- PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high);
+ pteval_t val = native_pte_val(pteval);
+ if (sizeof(pteval_t) > sizeof(unsigned long))
+ PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, val, (u64)val>>32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, val);
}
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
- /* 5 arg words */
- pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval);
+ if (sizeof(pteval_t) > sizeof(unsigned long))
+ pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval);
+ else
+ PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr,
+ ptep, native_pte_val(pteval));
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ pmdval_t val = native_pmd_val(pmdval);
+ if (sizeof(val) > sizeof(unsigned long))
+ PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val>>32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
+}
+
+#if PAGETABLE_LEVELS >= 3
+#ifdef CONFIG_X86_PAE
+static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
}
static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
{
- PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
- pteval.pte_low, pteval.pte_high);
+ pteval_t val = native_pte_val(pteval);
+
+ PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, val, (u64)val>>32);
}
-
-static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- /* 5 arg words */
- pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
-}
-
-static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp,
- pmdval.pmd, pmdval.pmd >> 32);
-}
-
-static inline void set_pud(pud_t *pudp, pud_t pudval)
-{
- PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
- pudval.pgd.pgd, pudval.pgd.pgd >> 32);
-}
+#endif /* X86_PAE */
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
+}
+
+static inline pmd_t __pmd(pmdval_t val)
+{
+ pmdval_t pmd;
+
+ if (sizeof(val) > sizeof(unsigned long))
+ pmd = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
+ val, (u64)val>>32);
+ else
+ pmd = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, val);
+
+ return native_make_pmd(pmd);
+}
+
+static inline pmdval_t pmd_val(pmd_t x)
+{
+ pmdval_t val = native_pmd_val(x);
+ if (sizeof(val) > sizeof(unsigned long))
+ return PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
+ val, (u64)val>>32);
+ else
+ return PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, val);
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -1044,95 +1094,25 @@ static inline void pmd_clear(pmd_t *pmdp
PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
}
-#elif defined(CONFIG_X86_64)
-/* FIXME: There ought to be a way to do it that duplicate less code */
-static inline pte_t __pte(unsigned long long val)
+static inline void set_pud(pud_t *pudp, pud_t pudval)
{
- unsigned long long ret;
- ret = PVOP_CALL1(unsigned long long, pv_mmu_ops.make_pte, val);
- return (pte_t) { ret };
+ pudval_t val = native_pud_val(pudval);
+
+ if (sizeof(val) > sizeof(unsigned long))
+ PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, val, (u64)val>>32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, val);
}
-static inline pmd_t __pmd(unsigned long long val)
-{
- unsigned long long ret;
- ret = PVOP_CALL1(unsigned long long, pv_mmu_ops.make_pmd, val);
- return (pmd_t) { ret };
-}
-
-static inline pud_t __pud(unsigned long long val)
-{
- unsigned long long ret;
- ret = PVOP_CALL1(unsigned long long, pv_mmu_ops.make_pud, val);
- return (pud_t) { ret };
-}
-
-static inline pgd_t __pgd(unsigned long long val)
-{
- unsigned long long ret;
- ret = PVOP_CALL1(unsigned long long, pv_mmu_ops.make_pgd, val);
- return (pgd_t) { ret };
-}
-
-static inline unsigned long long pte_val(pte_t x)
-{
- return PVOP_CALL1(unsigned long long, pv_mmu_ops.pte_val, x.pte);
-}
-
-static inline unsigned long long pmd_val(pmd_t x)
-{
- return PVOP_CALL1(unsigned long long, pv_mmu_ops.pmd_val, x.pmd);
-}
-
-static inline unsigned long long pud_val(pud_t x)
-{
- return PVOP_CALL1(unsigned long long, pv_mmu_ops.pud_val, x.pud);
-}
-
-static inline unsigned long long pgd_val(pgd_t x)
-{
- return PVOP_CALL1(unsigned long long, pv_mmu_ops.pgd_val, x.pgd);
-}
-
-static inline void set_pte(pte_t *ptep, pte_t pteval)
-{
- PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte);
-}
-
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte);
-}
-
-static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pmd);
-}
-
-static inline void set_pud(pud_t *pudp, pud_t pudval)
-{
- PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, pudval.pud);
-}
+#if PAGETABLE_LEVELS == 4
+/* Always 64-bit processor, so param passing needs no special handling */
+#ifdef CONFIG_X86_32
+#error 4 level pagetables in 32-bit mode?!
+#endif
static inline void set_pgd(pgd_t *pgdp, pgd_t pgdval)
{
- PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, pgdval.pgd);
-}
-
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
- PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
-}
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
- PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
-}
-
-static inline void pud_clear(pud_t *pudp)
-{
- PVOP_VCALL1(pv_mmu_ops.pud_clear, pudp);
+ PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgdval));
}
static inline void pgd_clear(pgd_t *pgdp)
@@ -1140,44 +1120,22 @@ static inline void pgd_clear(pgd_t *pgdp
PVOP_VCALL1(pv_mmu_ops.pgd_clear, pgdp);
}
-#else /* !CONFIG_X86_PAE && !CONFIG_X86_64*/
-
-static inline pte_t __pte(unsigned long val)
+static inline pud_t __pud(pudval_t val)
{
- return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) };
+ return native_make_pud(PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, val));
}
-static inline pgd_t __pgd(unsigned long val)
+static inline pudval_t pud_val(pud_t x)
{
- return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) };
+ return PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val, native_pud_val(x));
}
-static inline unsigned long pte_val(pte_t x)
+static inline void pud_clear(pud_t *pudp)
{
- return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low);
+ PVOP_VCALL1(pv_mmu_ops.pud_clear, pudp);
}
-
-static inline unsigned long pgd_val(pgd_t x)
-{
- return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd);
-}
-
-static inline void set_pte(pte_t *ptep, pte_t pteval)
-{
- PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low);
-}
-
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low);
-}
-
-static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd);
-}
-#endif /* CONFIG_X86_PAE */
+#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* PAGETABLE_LEVELS >= 3 */
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
--
^ permalink raw reply [flat|nested] 14+ messages in thread* [PATCH RFC 6/7] x86/xen: simplify Xen mmu operations
2007-11-08 1:50 [PATCH RFC 0/7] Unify asm-x86/pgtable.h and page.h Jeremy Fitzhardinge
` (4 preceding siblings ...)
2007-11-08 1:50 ` [PATCH RFC 5/7] x86: simplify pagetable-related operationsin paravirt.h Jeremy Fitzhardinge
@ 2007-11-08 1:50 ` Jeremy Fitzhardinge
2007-11-08 1:50 ` [PATCH RFC 7/7] x86: fix up formatting in pgtable*.h Jeremy Fitzhardinge
6 siblings, 0 replies; 14+ messages in thread
From: Jeremy Fitzhardinge @ 2007-11-08 1:50 UTC (permalink / raw)
To: LKML
Cc: Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Glauber de Oliveira Costa, Jeremy Fitzhardinge
[-- Attachment #1: x86-simplify-xen-mmu.patch --]
[-- Type: text/plain, Size: 5223 bytes --]
Take advantage of the unified page/pgtable.h definitions to reduce the
number of duplicate definitions of the various Xen mmu_ops functions.
Signed-off-by: Jeremy Fitzhardinge <Jeremy.Fitzhardinge@citrix.com>
---
arch/x86/xen/enlighten.c | 8 ++++-
arch/x86/xen/mmu.c | 67 +++++++++++++++++-----------------------------
arch/x86/xen/mmu.h | 26 +++++------------
3 files changed, 41 insertions(+), 60 deletions(-)
===================================================================
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1038,16 +1038,18 @@ static const struct pv_mmu_ops xen_mmu_o
.make_pte = xen_make_pte,
.make_pgd = xen_make_pgd,
+#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
.set_pte_atomic = xen_set_pte_atomic,
.set_pte_present = xen_set_pte_at,
+#endif /* PAE */
.set_pud = xen_set_pud,
.pte_clear = xen_pte_clear,
.pmd_clear = xen_pmd_clear,
.make_pmd = xen_make_pmd,
.pmd_val = xen_pmd_val,
-#endif /* PAE */
+#endif /* PAGETABLE_LEVELS >= 3 */
.activate_mm = xen_activate_mm,
.dup_mmap = xen_dup_mmap,
@@ -1175,6 +1177,10 @@ asmlinkage void __init xen_start_kernel(
xen_setup_vcpu_info_placement();
#endif
+#ifdef CONFIG_X86_PAE
+ __supported_pte_mask &= ~_PAGE_PCD;
+#endif
+
pv_info.kernel_rpl = 1;
if (xen_feature(XENFEAT_supervisor_mode_kernel))
pv_info.kernel_rpl = 0;
===================================================================
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -211,7 +211,7 @@ void xen_pmd_clear(pmd_t *pmdp)
xen_set_pmd(pmdp, __pmd(0));
}
-unsigned long long xen_pte_val(pte_t pte)
+pteval_t xen_pte_val(pte_t pte)
{
unsigned long long ret = 0;
@@ -223,23 +223,7 @@ unsigned long long xen_pte_val(pte_t pte
return ret;
}
-unsigned long long xen_pmd_val(pmd_t pmd)
-{
- unsigned long long ret = pmd.pmd;
- if (ret)
- ret = machine_to_phys(XMADDR(ret)).paddr | 1;
- return ret;
-}
-
-unsigned long long xen_pgd_val(pgd_t pgd)
-{
- unsigned long long ret = pgd.pgd;
- if (ret)
- ret = machine_to_phys(XMADDR(ret)).paddr | 1;
- return ret;
-}
-
-pte_t xen_make_pte(unsigned long long pte)
+pte_t xen_make_pte(pteval_t pte)
{
if (pte & 1)
pte = phys_to_machine(XPADDR(pte)).maddr;
@@ -247,20 +231,13 @@ pte_t xen_make_pte(unsigned long long pt
return (pte_t){ pte, pte >> 32 };
}
-pmd_t xen_make_pmd(unsigned long long pmd)
+
+pmd_t xen_make_pmd(pmdval_t pmd)
{
if (pmd & 1)
pmd = phys_to_machine(XPADDR(pmd)).maddr;
- return (pmd_t){ pmd };
-}
-
-pgd_t xen_make_pgd(unsigned long long pgd)
-{
- if (pgd & _PAGE_PRESENT)
- pgd = phys_to_machine(XPADDR(pgd)).maddr;
-
- return (pgd_t){ pgd };
+ return native_make_pmd(pmd);
}
#else /* !PAE */
void xen_set_pte(pte_t *ptep, pte_t pte)
@@ -268,7 +245,7 @@ void xen_set_pte(pte_t *ptep, pte_t pte)
*ptep = pte;
}
-unsigned long xen_pte_val(pte_t pte)
+pteval_t xen_pte_val(pte_t pte)
{
unsigned long ret = pte.pte_low;
@@ -278,30 +255,38 @@ unsigned long xen_pte_val(pte_t pte)
return ret;
}
-unsigned long xen_pgd_val(pgd_t pgd)
-{
- unsigned long ret = pgd.pgd;
- if (ret)
- ret = machine_to_phys(XMADDR(ret)).paddr | 1;
- return ret;
-}
-
-pte_t xen_make_pte(unsigned long pte)
+pte_t xen_make_pte(pteval_t pte)
{
if (pte & _PAGE_PRESENT)
pte = phys_to_machine(XPADDR(pte)).maddr;
return (pte_t){ pte };
}
+#endif /* CONFIG_X86_PAE */
-pgd_t xen_make_pgd(unsigned long pgd)
+pmdval_t xen_pmd_val(pmd_t pmd)
+{
+ pmdval_t ret = native_pmd_val(pmd);
+ if (ret)
+ ret = machine_to_phys(XMADDR(ret)).paddr | 1;
+ return ret;
+}
+
+pgdval_t xen_pgd_val(pgd_t pgd)
+{
+ pgdval_t ret = native_pgd_val(pgd);
+ if (ret)
+ ret = machine_to_phys(XMADDR(ret)).paddr | 1;
+ return ret;
+}
+
+pgd_t xen_make_pgd(pgdval_t pgd)
{
if (pgd & _PAGE_PRESENT)
pgd = phys_to_machine(XPADDR(pgd)).maddr;
- return (pgd_t){ pgd };
+ return native_make_pgd(pgd);
}
-#endif /* CONFIG_X86_PAE */
enum pt_level {
PT_PGD,
===================================================================
--- a/arch/x86/xen/mmu.h
+++ b/arch/x86/xen/mmu.h
@@ -30,31 +30,21 @@ void xen_pgd_pin(pgd_t *pgd);
void xen_pgd_pin(pgd_t *pgd);
//void xen_pgd_unpin(pgd_t *pgd);
+pteval_t xen_pte_val(pte_t);
+pmdval_t xen_pmd_val(pmd_t);
+pgdval_t xen_pgd_val(pgd_t);
+
+pte_t xen_make_pte(pteval_t);
+pmd_t xen_make_pmd(pmdval_t);
+pgd_t xen_make_pgd(pgdval_t);
+
#ifdef CONFIG_X86_PAE
-unsigned long long xen_pte_val(pte_t);
-unsigned long long xen_pmd_val(pmd_t);
-unsigned long long xen_pgd_val(pgd_t);
-
-pte_t xen_make_pte(unsigned long long);
-pmd_t xen_make_pmd(unsigned long long);
-pgd_t xen_make_pgd(unsigned long long);
-
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval);
void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
void xen_set_pud(pud_t *ptr, pud_t val);
void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
void xen_pmd_clear(pmd_t *pmdp);
-
-
-#else
-unsigned long xen_pte_val(pte_t);
-unsigned long xen_pmd_val(pmd_t);
-unsigned long xen_pgd_val(pgd_t);
-
-pte_t xen_make_pte(unsigned long);
-pmd_t xen_make_pmd(unsigned long);
-pgd_t xen_make_pgd(unsigned long);
#endif
#endif /* _XEN_MMU_H */
--
^ permalink raw reply [flat|nested] 14+ messages in thread* [PATCH RFC 7/7] x86: fix up formatting in pgtable*.h
2007-11-08 1:50 [PATCH RFC 0/7] Unify asm-x86/pgtable.h and page.h Jeremy Fitzhardinge
` (5 preceding siblings ...)
2007-11-08 1:50 ` [PATCH RFC 6/7] x86/xen: simplify Xen mmu operations Jeremy Fitzhardinge
@ 2007-11-08 1:50 ` Jeremy Fitzhardinge
6 siblings, 0 replies; 14+ messages in thread
From: Jeremy Fitzhardinge @ 2007-11-08 1:50 UTC (permalink / raw)
To: LKML
Cc: Andi Kleen, Ingo Molnar, Thomas Gleixner, Zach Amsden,
Glauber de Oliveira Costa, Jeremy Fitzhardinge
[-- Attachment #1: x86-normalize-pgtable-formatting.patch --]
[-- Type: text/plain, Size: 10038 bytes --]
Fix up various pieces of unconventional formatting in
asm-x86/pgtable*.h. In some cases, the old formatting was arguablly
clearer with a wide enough terminal, but this patch gives the option
of using a more standard form.
Signed-off-by: Jeremy Fitzhardinge <Jeremy.Fitzhardinge@citrix.com>
---
include/asm-x86/pgtable-2level.h | 24 +++++++---
include/asm-x86/pgtable-3level.h | 17 ++++---
include/asm-x86/pgtable.h | 91 ++++++++++++++++++++++++++++++++------
include/asm-x86/pgtable_64.h | 20 +++++---
4 files changed, 118 insertions(+), 34 deletions(-)
===================================================================
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -15,25 +15,36 @@ static inline void native_set_pte(pte_t
{
*ptep = pte;
}
+
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep , pte_t pte)
{
native_set_pte(ptep, pte);
}
+
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
}
#undef set_pte_atomic
-#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
-#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
+#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
-#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
+
+#define pte_clear(mm,addr,xp) \
+ do { \
+ set_pte_at(mm, addr, xp, __pte(0)); \
+ } while (0)
+
#undef pmd_clear
-#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
+#define pmd_clear(xp) \
+ do { \
+ set_pmd(xp, __pmd(0)); \
+ } while (0)
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
+static inline void native_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *xp)
{
*xp = __pte(0);
}
@@ -66,7 +77,8 @@ static inline int pte_exec_kernel(pte_t
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 1) & 0x1f)
#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __swp_entry(type, offset) \
+ ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
===================================================================
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -9,7 +9,8 @@
*/
#define pte_ERROR(e) \
- printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
+ printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
+ &(e), (e).pte_high, (e).pte_low)
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
#define pgd_ERROR(e) \
@@ -39,6 +40,7 @@ static inline void native_set_pte(pte_t
smp_wmb();
ptep->pte_low = pte.pte_low;
}
+
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep , pte_t pte)
{
@@ -65,10 +67,12 @@ static inline void native_set_pte_atomic
{
set_64bit((unsigned long long *)(ptep),native_pte_val(pte));
}
+
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
}
+
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
*pudp = pud;
@@ -79,7 +83,8 @@ static inline void native_set_pud(pud_t
* entry, so clear the bottom half first and enforce ordering with a compiler
* barrier.
*/
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline void native_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
ptep->pte_low = 0;
smp_wmb();
@@ -102,11 +107,11 @@ static inline void native_pmd_clear(pmd_
*/
static inline void pud_clear (pud_t * pud) { }
-#define pud_page(pud) \
-((struct page *) __va(pud_val(pud) & PAGE_MASK))
+#define pud_page(pud) \
+ ((struct page *) __va(pud_val(pud) & PAGE_MASK))
-#define pud_page_vaddr(pud) \
-((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+#define pud_page_vaddr(pud) \
+ ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
#ifdef CONFIG_SMP
===================================================================
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -66,11 +66,30 @@ void paging_init(void);
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_RW;
+}
+
+static inline int pte_file(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_FILE;
+}
+
+static inline int pte_huge(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PSE;
+}
#endif /* __ASSEMBLY__ */
@@ -212,15 +231,59 @@ static inline int pte_huge(pte_t pte) {
#ifndef __ASSEMBLY__
-static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
-static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; }
-static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; }
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY));
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED));
+ return pte;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW));
+ return pte;
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX));
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY));
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED));
+ return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW));
+ return pte;
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE));
+ return pte;
+}
+
+static inline pte_t pte_clrhuge(pte_t pte)
+{
+ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE));
+ return pte;
+}
#ifndef __PAGETABLE_PUD_FOLDED
static inline bool pgd_bad(pgd_t pgd)
===================================================================
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -168,7 +168,8 @@ static inline pte_t ptep_get_and_clear_f
/*
* Level 4 access.
*/
-#define pgd_page_vaddr(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
+#define pgd_page_vaddr(pgd) \
+ ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK))
#define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT))
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
@@ -178,11 +179,13 @@ static inline pte_t ptep_get_and_clear_f
/* PUD - Level 3 access */
/* to find an entry in a page-table-directory. */
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
+#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
#define pud_page(pud) (pfn_to_page(pud_val(pud) >> PAGE_SHIFT))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
-#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
+#define pud_index(address) \
+ (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#define pud_offset(pgd, address) \
+ ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
+#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
@@ -241,7 +244,8 @@ static inline void ptep_set_wrprotect(st
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 1) & 0x3f)
#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+#define __swp_entry(type, offset) \
+ ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
@@ -252,8 +256,8 @@ extern int kern_addr_valid(unsigned long
pte_t *lookup_address(unsigned long addr);
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
#define HAVE_ARCH_UNMAPPED_AREA
--
^ permalink raw reply [flat|nested] 14+ messages in thread