From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Dave Hansen <dave.hansen@linux.intel.com>,
Andy Lutomirski <luto@kernel.org>,
Peter Zijlstra <peterz@infradead.org>
Cc: x86@kernel.org, Kostya Serebryany <kcc@google.com>,
Andrey Ryabinin <ryabinin.a.a@gmail.com>,
Andrey Konovalov <andreyknvl@gmail.com>,
Alexander Potapenko <glider@google.com>,
Taras Madan <tarasmadan@google.com>,
Dmitry Vyukov <dvyukov@google.com>,
"H . J . Lu" <hjl.tools@gmail.com>,
Andi Kleen <ak@linux.intel.com>,
Rick Edgecombe <rick.p.edgecombe@intel.com>,
Bharata B Rao <bharata@amd.com>,
Jacob Pan <jacob.jun.pan@linux.intel.com>,
Ashok Raj <ashok.raj@intel.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv11 11/16] x86/mm, iommu/sva: Make LAM and SVA mutually exclusive
Date: Tue, 25 Oct 2022 03:17:17 +0300 [thread overview]
Message-ID: <20221025001722.17466-12-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <20221025001722.17466-1-kirill.shutemov@linux.intel.com>
IOMMU and SVA-capable devices know nothing about LAM and only expect
canonical addresses. An attempt to pass down tagged pointer will lead
to address translation failure.
By default do not allow to enable both LAM and use SVA in the same
process.
The new ARCH_FORCE_TAGGED_SVA arch_prctl() overrides the limitation.
By using the arch_prctl() userspace takes responsibility to never pass
tagged address to the device.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: Ashok Raj <ashok.raj@intel.com>
Reviewed-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
---
arch/x86/include/asm/mmu.h | 6 ++++--
arch/x86/include/asm/mmu_context.h | 6 ++++++
arch/x86/include/uapi/asm/prctl.h | 1 +
arch/x86/kernel/process_64.c | 12 ++++++++++++
drivers/iommu/iommu-sva-lib.c | 12 ++++++++++++
include/linux/mmu_context.h | 7 +++++++
6 files changed, 42 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 2fdb390040b5..1215b0a714c9 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,9 +9,11 @@
#include <linux/bits.h>
/* Uprobes on this MM assume 32-bit code */
-#define MM_CONTEXT_UPROBE_IA32 BIT(0)
+#define MM_CONTEXT_UPROBE_IA32 BIT(0)
/* vsyscall page is accessible on this MM */
-#define MM_CONTEXT_HAS_VSYSCALL BIT(1)
+#define MM_CONTEXT_HAS_VSYSCALL BIT(1)
+/* Allow LAM and SVA coexisting */
+#define MM_CONTEXT_FORCE_TAGGED_SVA BIT(2)
/*
* x86 has arch-specific MMU state beyond what lives in mm_struct.
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 84277547ad28..7bb572d3f612 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -114,6 +114,12 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
mm->context.untag_mask = -1UL;
}
+#define arch_pgtable_dma_compat arch_pgtable_dma_compat
+static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
+{
+ return !mm_lam_cr3_mask(mm) ||
+ (mm->context.flags & MM_CONTEXT_FORCE_TAGGED_SVA);
+}
#else
static inline unsigned long mm_lam_cr3_mask(struct mm_struct *mm)
diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
index a31e27b95b19..eb290d89cb32 100644
--- a/arch/x86/include/uapi/asm/prctl.h
+++ b/arch/x86/include/uapi/asm/prctl.h
@@ -23,5 +23,6 @@
#define ARCH_GET_UNTAG_MASK 0x4001
#define ARCH_ENABLE_TAGGED_ADDR 0x4002
#define ARCH_GET_MAX_TAG_BITS 0x4003
+#define ARCH_FORCE_TAGGED_SVA 0x4004
#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9952e9f517ec..3c9a4d923d6d 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -783,6 +783,12 @@ static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits)
goto out;
}
+ if (mm_valid_pasid(mm) &&
+ !(mm->context.flags & MM_CONTEXT_FORCE_TAGGED_SVA)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
if (!nr_bits) {
ret = -EINVAL;
goto out;
@@ -893,6 +899,12 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
(unsigned long __user *)arg2);
case ARCH_ENABLE_TAGGED_ADDR:
return prctl_enable_tagged_addr(task->mm, arg2);
+ case ARCH_FORCE_TAGGED_SVA:
+ if (mmap_write_lock_killable(task->mm))
+ return -EINTR;
+ task->mm->context.flags |= MM_CONTEXT_FORCE_TAGGED_SVA;
+ mmap_write_unlock(task->mm);
+ return 0;
case ARCH_GET_MAX_TAG_BITS:
if (!cpu_feature_enabled(X86_FEATURE_LAM))
return put_user(0, (unsigned long __user *)arg2);
diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
index 27be6b81e0b5..8934c32e923c 100644
--- a/drivers/iommu/iommu-sva-lib.c
+++ b/drivers/iommu/iommu-sva-lib.c
@@ -2,6 +2,8 @@
/*
* Helpers for IOMMU drivers implementing SVA
*/
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
@@ -31,6 +33,15 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
min == 0 || max < min)
return -EINVAL;
+ /* Serialize against address tagging enabling */
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ if (!arch_pgtable_dma_compat(mm)) {
+ mmap_write_unlock(mm);
+ return -EBUSY;
+ }
+
mutex_lock(&iommu_sva_lock);
/* Is a PASID already associated with this mm? */
if (mm_valid_pasid(mm)) {
@@ -46,6 +57,7 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
mm_pasid_set(mm, pasid);
out:
mutex_unlock(&iommu_sva_lock);
+ mmap_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index 14b9c1fa05c4..f2b7a3f04099 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -35,4 +35,11 @@ static inline unsigned long mm_untag_mask(struct mm_struct *mm)
}
#endif
+#ifndef arch_pgtable_dma_compat
+static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
+{
+ return true;
+}
+#endif
+
#endif
--
2.38.0
next prev parent reply other threads:[~2022-10-25 0:17 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-25 0:17 [PATCHv11 00/16] Linear Address Masking enabling Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 01/16] x86/mm: Fix CR3_ADDR_MASK Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 02/16] x86: CPUID and CR3/CR4 flags for Linear Address Masking Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 03/16] mm: Pass down mm_struct to untagged_addr() Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 04/16] x86/mm: Handle LAM on context switch Kirill A. Shutemov
2022-11-07 14:58 ` Andy Lutomirski
2022-11-07 17:14 ` Kirill A. Shutemov
2022-11-07 18:02 ` Dave Hansen
2022-11-07 21:35 ` [PATCHv11.1 " Kirill A. Shutemov
2022-11-09 3:54 ` Andy Lutomirski
2022-11-09 9:17 ` kirill
2022-10-25 0:17 ` [PATCHv11 05/16] x86/uaccess: Provide untagged_addr() and remove tags before address check Kirill A. Shutemov
2022-11-07 14:50 ` Andy Lutomirski
2022-11-07 17:33 ` Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 06/16] KVM: Serialize tagged address check against tagging enabling Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 07/16] x86/mm: Provide arch_prctl() interface for LAM Kirill A. Shutemov
2022-11-07 21:37 ` [PATCHv11.1 " Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 08/16] x86/mm: Reduce untagged_addr() overhead until the first LAM user Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 09/16] mm: Expose untagging mask in /proc/$PID/status Kirill A. Shutemov
2022-10-28 14:02 ` Catalin Marinas
2022-10-25 0:17 ` [PATCHv11 10/16] iommu/sva: Replace pasid_valid() helper with mm_valid_pasid() Kirill A. Shutemov
2022-10-25 0:17 ` Kirill A. Shutemov [this message]
2022-10-25 0:17 ` [PATCHv11 12/16] selftests/x86/lam: Add malloc and tag-bits test cases for linear-address masking Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 13/16] selftests/x86/lam: Add mmap and SYSCALL " Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 14/16] selftests/x86/lam: Add io_uring " Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 15/16] selftests/x86/lam: Add inherit " Kirill A. Shutemov
2022-10-25 0:17 ` [PATCHv11 16/16] selftests/x86/lam: Add ARCH_FORCE_TAGGED_SVA " Kirill A. Shutemov
2022-11-07 11:25 ` [PATCHv11 00/16] Linear Address Masking enabling Kirill A. Shutemov
2022-11-07 14:59 ` Andy Lutomirski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221025001722.17466-12-kirill.shutemov@linux.intel.com \
--to=kirill.shutemov@linux.intel.com \
--cc=ak@linux.intel.com \
--cc=andreyknvl@gmail.com \
--cc=ashok.raj@intel.com \
--cc=bharata@amd.com \
--cc=dave.hansen@linux.intel.com \
--cc=dvyukov@google.com \
--cc=glider@google.com \
--cc=hjl.tools@gmail.com \
--cc=jacob.jun.pan@linux.intel.com \
--cc=kcc@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=peterz@infradead.org \
--cc=rick.p.edgecombe@intel.com \
--cc=ryabinin.a.a@gmail.com \
--cc=tarasmadan@google.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).