From: Kefeng Wang <wangkefeng.wang@huawei.com>
To: <linux-mm@kvack.org>, Andrew Morton <akpm@linux-foundation.org>,
<surenb@google.com>
Cc: Russell King <linux@armlinux.org.uk>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>,
Huacai Chen <chenhuacai@kernel.org>,
WANG Xuerui <kernel@xen0n.name>,
Michael Ellerman <mpe@ellerman.id.au>,
Nicholas Piggin <npiggin@gmail.com>,
Christophe Leroy <christophe.leroy@csgroup.eu>,
Paul Walmsley <paul.walmsley@sifive.com>,
Palmer Dabbelt <palmer@dabbelt.com>,
Albert Ou <aou@eecs.berkeley.edu>,
Alexander Gordeev <agordeev@linux.ibm.com>,
Gerald Schaefer <gerald.schaefer@linux.ibm.com>,
Heiko Carstens <hca@linux.ibm.com>,
Vasily Gorbik <gor@linux.ibm.com>,
Christian Borntraeger <borntraeger@linux.ibm.com>,
Sven Schnelle <svens@linux.ibm.com>,
Dave Hansen <dave.hansen@linux.intel.com>,
Andy Lutomirski <luto@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
<x86@kernel.org>, <linux-arm-kernel@lists.infradead.org>,
<linux-kernel@vger.kernel.org>, <loongarch@lists.linux.dev>,
<linuxppc-dev@lists.ozlabs.org>,
<linux-riscv@lists.infradead.org>, <linux-s390@vger.kernel.org>,
Kefeng Wang <wangkefeng.wang@huawei.com>
Subject: [PATCH rfc -next 01/10] mm: add a generic VMA lock-based page fault handler
Date: Thu, 13 Jul 2023 17:53:29 +0800 [thread overview]
Message-ID: <20230713095339.189715-2-wangkefeng.wang@huawei.com> (raw)
In-Reply-To: <20230713095339.189715-1-wangkefeng.wang@huawei.com>
There are more and more architectures enabled ARCH_SUPPORTS_PER_VMA_LOCK,
eg, x86, arm64, powerpc and s390, and riscv, those implementation are very
similar which results in some duplicated codes, let's add a generic VMA
lock-based page fault handler to eliminate them, and which also make it
easy to support this feature on new architectures.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
include/linux/mm.h | 28 ++++++++++++++++++++++++++++
mm/memory.c | 42 ++++++++++++++++++++++++++++++++++++++++++
2 files changed, 70 insertions(+)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c7886784832b..cba1b7b19c9d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -633,6 +633,15 @@ static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
#endif /* CONFIG_NUMA_BALANCING */
+struct vm_locked_fault {
+ struct mm_struct *mm;
+ unsigned long address;
+ unsigned int fault_flags;
+ unsigned long vm_flags;
+ struct pt_regs *regs;
+ unsigned long fault_code;
+};
+
#ifdef CONFIG_PER_VMA_LOCK
/*
* Try to read-lock a vma. The function is allowed to occasionally yield false
@@ -733,6 +742,19 @@ static inline void assert_fault_locked(struct vm_fault *vmf)
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);
+#define VM_LOCKED_FAULT_INIT(_name, _mm, _address, _fault_flags, _vm_flags, _regs, _fault_code) \
+ _name.mm = _mm; \
+ _name.address = _address; \
+ _name.fault_flags = _fault_flags; \
+ _name.vm_flags = _vm_flags; \
+ _name.regs = _regs; \
+ _name.fault_code = _fault_code
+
+int __weak arch_vma_check_access(struct vm_area_struct *vma,
+ struct vm_locked_fault *vmlf);
+
+int try_vma_locked_page_fault(struct vm_locked_fault *vmlf, vm_fault_t *ret);
+
#else /* CONFIG_PER_VMA_LOCK */
static inline bool vma_start_read(struct vm_area_struct *vma)
@@ -742,6 +764,12 @@ static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
static inline void vma_mark_detached(struct vm_area_struct *vma,
bool detached) {}
+#define VM_LOCKED_FAULT_INIT(_name, _mm, _address, _fault_flags, _vm_flags, _regs, _fault_code)
+static inline int try_vma_locked_page_fault(struct vm_locked_fault *vmlf,
+ vm_fault_t *ret)
+{
+ return -EINVAL;
+}
static inline void release_fault_lock(struct vm_fault *vmf)
{
diff --git a/mm/memory.c b/mm/memory.c
index ad790394963a..d3f5d1270e7a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5449,6 +5449,48 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
count_vm_vma_lock_event(VMA_LOCK_ABORT);
return NULL;
}
+
+int __weak arch_vma_check_access(struct vm_area_struct *vma,
+ struct vm_locked_fault *vmlf)
+{
+ if (!(vma->vm_flags & vmlf->vm_flags))
+ return -EINVAL;
+ return 0;
+}
+
+int try_vma_locked_page_fault(struct vm_locked_fault *vmlf, vm_fault_t *ret)
+{
+ struct vm_area_struct *vma;
+ vm_fault_t fault;
+
+ if (!(vmlf->fault_flags & FAULT_FLAG_USER))
+ return -EINVAL;
+
+ vma = lock_vma_under_rcu(vmlf->mm, vmlf->address);
+ if (!vma)
+ return -EINVAL;
+
+ if (arch_vma_check_access(vma, vmlf)) {
+ vma_end_read(vma);
+ return -EINVAL;
+ }
+
+ fault = handle_mm_fault(vma, vmlf->address,
+ vmlf->fault_flags | FAULT_FLAG_VMA_LOCK,
+ vmlf->regs);
+ *ret = fault;
+
+ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+ vma_end_read(vma);
+
+ if ((fault & VM_FAULT_RETRY))
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ else
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+
+ return 0;
+}
+
#endif /* CONFIG_PER_VMA_LOCK */
#ifndef __PAGETABLE_P4D_FOLDED
--
2.27.0
next prev parent reply other threads:[~2023-07-13 9:40 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-13 9:53 [PATCH rfc -next 00/10] mm: convert to generic VMA lock-based page fault Kefeng Wang
2023-07-13 9:53 ` Kefeng Wang [this message]
2023-07-13 16:15 ` [PATCH rfc -next 01/10] mm: add a generic VMA lock-based page fault handler Matthew Wilcox
2023-07-13 20:12 ` Suren Baghdasaryan
2023-07-14 1:52 ` Kefeng Wang
2023-07-15 1:54 ` Kefeng Wang
2023-07-13 9:53 ` [PATCH rfc -next 02/10] x86: mm: use try_vma_locked_page_fault() Kefeng Wang
2023-07-13 9:53 ` [PATCH rfc -next 03/10] arm64: " Kefeng Wang
2023-07-13 9:53 ` [PATCH rfc -next 04/10] s390: " Kefeng Wang
2023-07-13 9:53 ` [PATCH rfc -next 05/10] powerpc: " Kefeng Wang
2023-07-13 9:53 ` [PATCH rfc -next 06/10] riscv: " Kefeng Wang
2023-07-13 9:53 ` [PATCH rfc -next 07/10] ARM: mm: try VMA lock-based page fault handling first Kefeng Wang
2023-07-13 9:53 ` [PATCH rfc -next 08/10] loongarch: mm: cleanup __do_page_fault() Kefeng Wang
2023-07-13 9:53 ` [PATCH rfc -next 09/10] loongarch: mm: add access_error() helper Kefeng Wang
2023-07-13 9:53 ` [PATCH rfc -next 10/10] loongarch: mm: try VMA lock-based page fault handling first Kefeng Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230713095339.189715-2-wangkefeng.wang@huawei.com \
--to=wangkefeng.wang@huawei.com \
--cc=agordeev@linux.ibm.com \
--cc=akpm@linux-foundation.org \
--cc=aou@eecs.berkeley.edu \
--cc=borntraeger@linux.ibm.com \
--cc=bp@alien8.de \
--cc=catalin.marinas@arm.com \
--cc=chenhuacai@kernel.org \
--cc=christophe.leroy@csgroup.eu \
--cc=dave.hansen@linux.intel.com \
--cc=gerald.schaefer@linux.ibm.com \
--cc=gor@linux.ibm.com \
--cc=hca@linux.ibm.com \
--cc=kernel@xen0n.name \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux-s390@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=loongarch@lists.linux.dev \
--cc=luto@kernel.org \
--cc=mingo@redhat.com \
--cc=mpe@ellerman.id.au \
--cc=npiggin@gmail.com \
--cc=palmer@dabbelt.com \
--cc=paul.walmsley@sifive.com \
--cc=peterz@infradead.org \
--cc=surenb@google.com \
--cc=svens@linux.ibm.com \
--cc=tglx@linutronix.de \
--cc=will@kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).