From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Linus Torvalds <torvalds@linux-foundation.org>,
Andrew Morton <akpm@linux-foundation.org>,
x86@kernel.org, Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Arnd Bergmann <arnd@arndb.de>,
"H. Peter Anvin" <hpa@zytor.com>
Cc: Andi Kleen <ak@linux.intel.com>,
Dave Hansen <dave.hansen@intel.com>,
Andy Lutomirski <luto@amacapital.net>,
linux-arch@vger.kernel.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv4 16/33] x86/mm/pat: handle additional page table
Date: Mon, 6 Mar 2017 16:53:40 +0300 [thread overview]
Message-ID: <20170306135357.3124-17-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20170306135357.3124-1-kirill.shutemov@linux.intel.com>
Straight-forward extension of existing code to support additional page
table level.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
arch/x86/mm/pageattr.c | 56 ++++++++++++++++++++++++++++++++++++--------------
1 file changed, 41 insertions(+), 15 deletions(-)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 28d42130243c..eb0ad12cdfde 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -346,6 +346,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level)
{
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -354,7 +355,15 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (pgd_none(*pgd))
return NULL;
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none(*p4d))
+ return NULL;
+
+ *level = PG_LEVEL_512G;
+ if (p4d_large(*p4d) || !p4d_present(*p4d))
+ return (pte_t *)p4d;
+
+ pud = pud_offset(p4d, address);
if (pud_none(*pud))
return NULL;
@@ -406,13 +415,18 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
pmd_t *lookup_pmd_address(unsigned long address)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pgd = pgd_offset_k(address);
if (pgd_none(*pgd))
return NULL;
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
+ return NULL;
+
+ pud = pud_offset(p4d, address);
if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
return NULL;
@@ -477,11 +491,13 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = (pgd_t *)page_address(page) + pgd_index(address);
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ pud = pud_offset(p4d, address);
pmd = pmd_offset(pud, address);
set_pte_atomic((pte_t *)pmd, pte);
}
@@ -836,9 +852,9 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
pud_clear(pud);
}
-static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
+static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
{
- pud_t *pud = pud_offset(pgd, start);
+ pud_t *pud = pud_offset(p4d, start);
/*
* Not on a GB page boundary?
@@ -1004,8 +1020,8 @@ static long populate_pmd(struct cpa_data *cpa,
return num_pages;
}
-static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
- pgprot_t pgprot)
+static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
+ pgprot_t pgprot)
{
pud_t *pud;
unsigned long end;
@@ -1026,7 +1042,7 @@ static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
cur_pages = (pre_end - start) >> PAGE_SHIFT;
cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
- pud = pud_offset(pgd, start);
+ pud = pud_offset(p4d, start);
/*
* Need a PMD page?
@@ -1047,7 +1063,7 @@ static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
if (cpa->numpages == cur_pages)
return cur_pages;
- pud = pud_offset(pgd, start);
+ pud = pud_offset(p4d, start);
pud_pgprot = pgprot_4k_2_large(pgprot);
/*
@@ -1067,7 +1083,7 @@ static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
if (start < end) {
long tmp;
- pud = pud_offset(pgd, start);
+ pud = pud_offset(p4d, start);
if (pud_none(*pud))
if (alloc_pmd_page(pud))
return -1;
@@ -1090,33 +1106,43 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
{
pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
pud_t *pud = NULL; /* shut up gcc */
+ p4d_t *p4d;
pgd_t *pgd_entry;
long ret;
pgd_entry = cpa->pgd + pgd_index(addr);
+ if (pgd_none(*pgd_entry)) {
+ p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ if (!p4d)
+ return -1;
+
+ set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
+ }
+
/*
- * Allocate a PUD page and hand it down for mapping.
+ * Allocate a P4D page and hand it down for mapping.
*/
- if (pgd_none(*pgd_entry)) {
+ p4d = p4d_offset(pgd_entry, addr);
+ if (p4d_none(*p4d)) {
pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
if (!pud)
return -1;
- set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
+ set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
}
pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
- ret = populate_pud(cpa, addr, pgd_entry, pgprot);
+ ret = populate_pud(cpa, addr, p4d, pgprot);
if (ret < 0) {
/*
* Leave the PUD page in place in case some other CPU or thread
* already found it, but remove any useless entries we just
* added to it.
*/
- unmap_pud_range(pgd_entry, addr,
+ unmap_pud_range(p4d, addr,
addr + (cpa->numpages << PAGE_SHIFT));
return ret;
}
--
2.11.0
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-03-06 13:54 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-03-06 13:53 [PATCHv4 00/33] 5-level paging Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 01/33] x86/cpufeature: Add 5-level paging detection Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 02/33] asm-generic: introduce 5level-fixup.h Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 03/33] asm-generic: introduce __ARCH_USE_5LEVEL_HACK Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 04/33] arch, mm: convert all architectures to use 5level-fixup.h Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 05/33] asm-generic: introduce <asm-generic/pgtable-nop4d.h> Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 06/33] mm: convert generic code to 5-level paging Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 07/33] mm: introduce __p4d_alloc() Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 08/33] x86: basic changes into headers for 5-level paging Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 09/33] x86: trivial portion of 5-level paging conversion Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 10/33] x86/gup: add 5-level paging support Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 11/33] x86/ident_map: " Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 12/33] x86/mm: add support of p4d_t in vmalloc_fault() Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 13/33] x86/power: support p4d_t in hibernate code Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 14/33] x86/kexec: support p4d_t Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 15/33] x86/efi: handle p4d in EFI pagetables Kirill A. Shutemov
2017-03-06 13:53 ` Kirill A. Shutemov [this message]
2017-03-06 13:53 ` [PATCHv4 17/33] x86/kasan: prepare clear_pgds() to switch to <asm-generic/pgtable-nop4d.h> Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 18/33] x86/xen: convert __xen_pgd_walk() and xen_cleanmfnmap() to support p4d Kirill A. Shutemov
2017-03-06 20:48 ` Boris Ostrovsky
2017-03-07 13:00 ` Kirill A. Shutemov
2017-03-07 18:18 ` Boris Ostrovsky
2017-03-07 18:26 ` [Xen-devel] " Andrew Cooper
2017-03-07 18:45 ` Boris Ostrovsky
2017-03-06 13:53 ` [PATCHv4 19/33] x86: convert the rest of the code to support p4d_t Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 20/33] x86: detect 5-level paging support Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 21/33] x86/asm: remove __VIRTUAL_MASK_SHIFT==47 assert Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 22/33] x86/mm: define virtual memory map for 5-level paging Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 23/33] x86/paravirt: make paravirt code support " Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 24/33] x86/mm: basic defines/helpers for CONFIG_X86_5LEVEL Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 25/33] x86/dump_pagetables: support 5-level paging Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 26/33] x86/kasan: extend to " Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 27/33] x86/espfix: " Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 28/33] x86/mm: add support of additional page table level during early boot Kirill A. Shutemov
2017-03-06 20:05 ` Boris Ostrovsky
2017-03-06 20:23 ` Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 29/33] x86/mm: add sync_global_pgds() for configuration with 5-level paging Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 30/33] x86/mm: make kernel_physical_mapping_init() support " Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 31/33] x86/mm: add support for 5-level paging for KASLR Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 32/33] x86: enable 5-level paging support Kirill A. Shutemov
2017-03-06 13:53 ` [PATCHv4 33/33] x86/mm: allow to have userspace mappigs above 47-bits Kirill A. Shutemov
2017-03-06 18:27 ` [PATCHv4 00/33] 5-level paging Linus Torvalds
2017-03-06 18:42 ` Thomas Gleixner
2017-03-06 19:03 ` Linus Torvalds
2017-03-06 19:09 ` Kirill A. Shutemov
2017-03-06 19:35 ` Linus Torvalds
2017-03-07 0:41 ` Stephen Rothwell
2017-03-07 9:32 ` Thomas Gleixner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170306135357.3124-17-kirill.shutemov@linux.intel.com \
--to=kirill.shutemov@linux.intel.com \
--cc=ak@linux.intel.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=dave.hansen@intel.com \
--cc=hpa@zytor.com \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@amacapital.net \
--cc=mingo@redhat.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).