From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Dave Hansen <dave.hansen@linux.intel.com>,
Andy Lutomirski <luto@kernel.org>,
Peter Zijlstra <peterz@infradead.org>
Cc: x86@kernel.org, Kostya Serebryany <kcc@google.com>,
Andrey Ryabinin <ryabinin.a.a@gmail.com>,
Andrey Konovalov <andreyknvl@gmail.com>,
Alexander Potapenko <glider@google.com>,
Taras Madan <tarasmadan@google.com>,
Dmitry Vyukov <dvyukov@google.com>,
"H . J . Lu" <hjl.tools@gmail.com>,
Andi Kleen <ak@linux.intel.com>,
Rick Edgecombe <rick.p.edgecombe@intel.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv5 OPTIONAL 12/13] x86/mm: Extend LAM to support to LAM_U48
Date: Wed, 13 Jul 2022 02:13:27 +0300 [thread overview]
Message-ID: <20220712231328.5294-13-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20220712231328.5294-1-kirill.shutemov@linux.intel.com>
LAM_U48 allows to encode 15 bits of tags into address.
LAM_U48 steals bits above 47-bit for tags and makes it impossible for
userspace to use full address space on 5-level paging machine.
Make these features mutually exclusive: whichever gets enabled first
blocks the other one.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
arch/x86/include/asm/elf.h | 3 ++-
arch/x86/include/asm/mmu_context.h | 13 +++++++++++++
arch/x86/kernel/process_64.c | 23 +++++++++++++++++++++++
arch/x86/kernel/sys_x86_64.c | 5 +++--
arch/x86/mm/hugetlbpage.c | 6 ++++--
arch/x86/mm/mmap.c | 10 +++++++++-
6 files changed, 54 insertions(+), 6 deletions(-)
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index cb0ff1055ab1..4df13497a770 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -317,7 +317,8 @@ static inline int mmap_is_ia32(void)
extern unsigned long task_size_32bit(void);
extern unsigned long task_size_64bit(int full_addr_space);
extern unsigned long get_mmap_base(int is_legacy);
-extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
+extern bool mmap_address_hint_valid(struct mm_struct *mm,
+ unsigned long addr, unsigned long len);
extern unsigned long get_sigframe_size(void);
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index b0e9ea23758b..3736f41948e9 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -263,6 +263,19 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
unsigned long __get_current_cr3_fast(void);
+#ifdef CONFIG_X86_5LEVEL
+static inline bool full_va_allowed(struct mm_struct *mm)
+{
+ /* LAM_U48 steals VA bits above 47-bit for tags */
+ return mm->context.lam_cr3_mask != X86_CR3_LAM_U48;
+}
+#else
+static inline bool full_va_allowed(struct mm_struct *mm)
+{
+ return false;
+}
+#endif
+
#include <asm-generic/mmu_context.h>
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 82a19168bfa4..cfa2e42a135a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -761,6 +761,16 @@ static void enable_lam_func(void *mm)
set_tlbstate_cr3_lam_mask(lam_mask);
}
+static bool lam_u48_allowed(void)
+{
+ struct mm_struct *mm = current->mm;
+
+ if (!full_va_allowed(mm))
+ return true;
+
+ return find_vma(mm, DEFAULT_MAP_WINDOW) == NULL;
+}
+
static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
{
int ret = 0;
@@ -768,6 +778,10 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
if (!cpu_feature_enabled(X86_FEATURE_LAM))
return -ENODEV;
+ /* lam_u48_allowed() requires mmap_lock */
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
mutex_lock(&mm->context.lock);
/* Already enabled? */
@@ -782,6 +796,14 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
} else if (nr_bits <= 6) {
mm->context.lam_cr3_mask = X86_CR3_LAM_U57;
mm->context.untag_mask = ~GENMASK(62, 57);
+ } else if (nr_bits <= 15) {
+ if (!lam_u48_allowed()) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mm->context.lam_cr3_mask = X86_CR3_LAM_U48;
+ mm->context.untag_mask = ~GENMASK(62, 48);
} else {
ret = -EINVAL;
goto out;
@@ -793,6 +815,7 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
on_each_cpu_mask(mm_cpumask(mm), enable_lam_func, mm, true);
out:
mutex_unlock(&mm->context.lock);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 8cc653ffdccd..5ea6aaed89ba 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -21,6 +21,7 @@
#include <asm/elf.h>
#include <asm/ia32.h>
+#include <asm/mmu_context.h>
/*
* Align a virtual address to avoid aliasing in the I$ on AMD F15h.
@@ -182,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
/* requesting a specific address */
if (addr) {
addr &= PAGE_MASK;
- if (!mmap_address_hint_valid(addr, len))
+ if (!mmap_address_hint_valid(mm, addr, len))
goto get_unmapped_area;
vma = find_vma(mm, addr);
@@ -203,7 +204,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
* !in_32bit_syscall() check to avoid high addresses for x32
* (and make it no op on native i386).
*/
- if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
+ if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall() && full_va_allowed(mm))
info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
info.align_mask = 0;
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index a0d023cb4292..9fdc8db42365 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -18,6 +18,7 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/elf.h>
+#include <asm/mmu_context.h>
#if 0 /* This is just for testing */
struct page *
@@ -103,6 +104,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
@@ -114,7 +116,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
* If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
* in the full address space.
*/
- if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
+ if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall() && full_va_allowed(mm))
info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
@@ -161,7 +163,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) {
addr &= huge_page_mask(h);
- if (!mmap_address_hint_valid(addr, len))
+ if (!mmap_address_hint_valid(mm, addr, len))
goto get_unmapped_area;
vma = find_vma(mm, addr);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index c90c20904a60..aa0086722a38 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -21,6 +21,7 @@
#include <linux/elf-randomize.h>
#include <asm/elf.h>
#include <asm/io.h>
+#include <asm/mmu_context.h>
#include "physaddr.h"
@@ -35,6 +36,8 @@ unsigned long task_size_32bit(void)
unsigned long task_size_64bit(int full_addr_space)
{
+ if (!full_va_allowed(current->mm))
+ return DEFAULT_MAP_WINDOW;
return full_addr_space ? TASK_SIZE_MAX : DEFAULT_MAP_WINDOW;
}
@@ -170,6 +173,7 @@ const char *arch_vma_name(struct vm_area_struct *vma)
/**
* mmap_address_hint_valid - Validate the address hint of mmap
+ * @mm: Address space
* @addr: Address hint
* @len: Mapping length
*
@@ -206,11 +210,15 @@ const char *arch_vma_name(struct vm_area_struct *vma)
* the failure of such a fixed mapping request, so the restriction is not
* applied.
*/
-bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
+bool mmap_address_hint_valid(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
{
if (TASK_SIZE - len < addr)
return false;
+ if (addr + len > DEFAULT_MAP_WINDOW && !full_va_allowed(mm))
+ return false;
+
return (addr > DEFAULT_MAP_WINDOW) == (addr + len > DEFAULT_MAP_WINDOW);
}
--
2.35.1
next prev parent reply other threads:[~2022-07-12 23:13 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-12 23:13 [PATCHv5 00/13] Linear Address Masking enabling Kirill A. Shutemov
2022-07-12 23:13 ` [PATCHv5 01/13] x86/mm: Fix CR3_ADDR_MASK Kirill A. Shutemov
2022-07-21 13:10 ` Alexander Potapenko
2022-07-29 3:00 ` Hu, Robert
2022-07-12 23:13 ` [PATCHv5 02/13] x86: CPUID and CR3/CR4 flags for Linear Address Masking Kirill A. Shutemov
2022-07-21 13:10 ` Alexander Potapenko
2022-07-12 23:13 ` [PATCHv5 03/13] mm: Pass down mm_struct to untagged_addr() Kirill A. Shutemov
2022-07-21 13:12 ` Alexander Potapenko
2022-07-12 23:13 ` [PATCHv5 04/13] x86/mm: Handle LAM on context switch Kirill A. Shutemov
2022-07-12 23:13 ` [PATCHv5 05/13] x86/uaccess: Provide untagged_addr() and remove tags before address check Kirill A. Shutemov
2022-07-13 15:02 ` [PATCHv5.1 04/13] x86/mm: Handle LAM on context switch Kirill A. Shutemov
2022-07-20 8:57 ` Alexander Potapenko
2022-07-20 12:38 ` Kirill A. Shutemov
2022-07-21 13:13 ` Alexander Potapenko
2022-07-21 13:14 ` [PATCHv5 05/13] x86/uaccess: Provide untagged_addr() and remove tags before address check Alexander Potapenko
2022-07-12 23:13 ` [PATCHv5 06/13] x86/mm: Provide ARCH_GET_UNTAG_MASK and ARCH_ENABLE_TAGGED_ADDR Kirill A. Shutemov
2022-07-18 17:47 ` Alexander Potapenko
2022-07-20 0:57 ` Kirill A. Shutemov
2022-07-20 8:19 ` Alexander Potapenko
2022-07-20 12:47 ` Kirill A. Shutemov
2022-07-20 12:54 ` Alexander Potapenko
2022-07-12 23:13 ` [PATCHv5 07/13] x86: Expose untagging mask in /proc/$PID/arch_status Kirill A. Shutemov
2022-07-21 13:47 ` Alexander Potapenko
2022-07-12 23:13 ` [PATCHv5 08/13] selftests/x86/lam: Add malloc test cases for linear-address masking Kirill A. Shutemov
2022-07-12 23:13 ` [PATCHv5 09/13] selftests/x86/lam: Add mmap and SYSCALL " Kirill A. Shutemov
2022-07-12 23:13 ` [PATCHv5 10/13] selftests/x86/lam: Add io_uring " Kirill A. Shutemov
2022-07-12 23:13 ` [PATCHv5 11/13] selftests/x86/lam: Add inherit " Kirill A. Shutemov
2022-07-12 23:13 ` Kirill A. Shutemov [this message]
2022-07-12 23:13 ` [PATCHv5 OPTIONAL 13/13] selftests/x86/lam: Add tests cases for LAM_U48 Kirill A. Shutemov
2022-07-18 17:39 ` [PATCHv5 00/13] Linear Address Masking enabling Alexander Potapenko
2022-07-20 0:59 ` Kirill A. Shutemov
2022-07-21 13:09 ` Alexander Potapenko
2022-07-21 17:07 ` Dave Hansen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220712231328.5294-13-kirill.shutemov@linux.intel.com \
--to=kirill.shutemov@linux.intel.com \
--cc=ak@linux.intel.com \
--cc=andreyknvl@gmail.com \
--cc=dave.hansen@linux.intel.com \
--cc=dvyukov@google.com \
--cc=glider@google.com \
--cc=hjl.tools@gmail.com \
--cc=kcc@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=peterz@infradead.org \
--cc=rick.p.edgecombe@intel.com \
--cc=ryabinin.a.a@gmail.com \
--cc=tarasmadan@google.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).