From: Peter Oskolkov <posk@posk.io>
To: Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>,
Thomas Gleixner <tglx@linutronix.de>,
Andrew Morton <akpm@linux-foundation.org>,
Dave Hansen <dave.hansen@linux.intel.com>,
Andy Lutomirski <luto@kernel.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
linux-api@vger.kernel.org
Cc: Paul Turner <pjt@google.com>, Ben Segall <bsegall@google.com>,
Peter Oskolkov <posk@google.com>, Peter Oskolkov <posk@posk.io>,
Andrei Vagin <avagin@google.com>, Jann Horn <jannh@google.com>,
Thierry Delisle <tdelisle@uwaterloo.ca>
Subject: [PATCH v0.9 2/6] mm, x86/uaccess: add userspace atomic helpers
Date: Sun, 21 Nov 2021 13:20:36 -0800 [thread overview]
Message-ID: <20211121212040.8649-3-posk@google.com> (raw)
In-Reply-To: <20211121212040.8649-1-posk@google.com>
In addition to futexes needing to do atomic operations in the userspace,
a second use case is now in the works (UMCG, see
https://lore.kernel.org/all/20210917180323.278250-1-posk@google.com/),
so a generic facility to perform these operations has been called for
(see https://lore.kernel.org/all/87ilyk9xc0.ffs@tglx/).
Add a set of generic helpers to perform 32/64-bit xchg and cmpxchg
operations in the userspace. Also implement the required
architecture-specific support on x86_64.
Signed-off-by: Peter Oskolkov <posk@google.com>
---
arch/x86/include/asm/uaccess_64.h | 93 +++++++++++
include/linux/uaccess.h | 46 ++++++
mm/maccess.c | 264 ++++++++++++++++++++++++++++++
3 files changed, 403 insertions(+)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 45697e04d771..41e2f96d3ec4 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -79,4 +79,97 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
kasan_check_write(dst, size);
return __copy_user_flushcache(dst, src, size);
}
+
+#define ARCH_HAS_ATOMIC_UACCESS_HELPERS 1
+
+static inline int __try_cmpxchg_user_32(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+
+ asm volatile("\n"
+ "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
+ "2:\n"
+ "\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE_UA(1b, 3b)
+ : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+ : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "memory"
+ );
+ *uval = oldval;
+ return ret;
+}
+
+static inline int __try_cmpxchg_user_64(u64 *uval, u64 __user *uaddr,
+ u64 oldval, u64 newval)
+{
+ int ret = 0;
+
+ asm volatile("\n"
+ "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"
+ "2:\n"
+ "\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE_UA(1b, 3b)
+ : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+ : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "memory"
+ );
+ *uval = oldval;
+ return ret;
+}
+
+static inline int __try_xchg_user_32(u32 *oval, u32 __user *uaddr, u32 newval)
+{
+ u32 oldval = 0;
+ int ret = 0;
+
+ asm volatile("\n"
+ "1:\txchgl %0, %2\n"
+ "2:\n"
+ "\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %1\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE_UA(1b, 3b)
+ : "=r" (oldval), "=r" (ret), "+m" (*uaddr)
+ : "i" (-EFAULT), "0" (newval), "1" (0)
+ );
+
+ if (ret)
+ return ret;
+
+ *oval = oldval;
+ return 0;
+}
+
+static inline int __try_xchg_user_64(u64 *oval, u64 __user *uaddr, u64 newval)
+{
+ u64 oldval = 0;
+ int ret = 0;
+
+ asm volatile("\n"
+ "1:\txchgq %0, %2\n"
+ "2:\n"
+ "\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %1\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE_UA(1b, 3b)
+ : "=r" (oldval), "=r" (ret), "+m" (*uaddr)
+ : "i" (-EFAULT), "0" (newval), "1" (0)
+ );
+
+ if (ret)
+ return ret;
+
+ *oval = oldval;
+ return 0;
+}
+
#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ac0394087f7d..dcb3ac093075 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -408,4 +408,50 @@ void __noreturn usercopy_abort(const char *name, const char *detail,
unsigned long len);
#endif
+#ifdef ARCH_HAS_ATOMIC_UACCESS_HELPERS
+/**
+ * cmpxchg_user_[32|64][_nofault|]() - compare_exchange 32/64-bit values
+ * @uaddr: Destination address, in user space;
+ * @curr_val: Source address, in kernel space;
+ * @new_val: The value to write to the destination address.
+ *
+ * This is the standard cmpxchg: atomically: compare *@uaddr to *@curr_val;
+ * if the values match, write @new_val to @uaddr, return 0; if the values
+ * do not match, write *@uaddr to @curr_val, return -EAGAIN.
+ *
+ * The _nofault versions don't fault and can be used in
+ * atomic/preempt-disabled contexts.
+ *
+ * Return:
+ * 0 : OK/success;
+ * -EINVAL: @uaddr is not properly aligned ('may fault' versions only);
+ * -EFAULT: memory access error (including mis-aligned @uaddr in _nofault);
+ * -EAGAIN: @old did not match.
+ */
+int cmpxchg_user_32_nofault(u32 __user *uaddr, u32 *curr_val, u32 new_val);
+int cmpxchg_user_64_nofault(u64 __user *uaddr, u64 *curr_val, u64 new_val);
+int cmpxchg_user_32(u32 __user *uaddr, u32 *curr_val, u32 new_val);
+int cmpxchg_user_64(u64 __user *uaddr, u64 *curr_val, u64 new_val);
+
+/**
+ * xchg_user_[32|64][_nofault|]() - exchange 32/64-bit values
+ * @uaddr: Destination address, in user space;
+ * @val: Source address, in kernel space.
+ *
+ * This is the standard atomic xchg: exchange values pointed to by @uaddr and @val.
+ *
+ * The _nofault versions don't fault and can be used in
+ * atomic/preempt-disabled contexts.
+ *
+ * Return:
+ * 0 : OK/success;
+ * -EINVAL: @uaddr is not properly aligned ('may fault' versions only);
+ * -EFAULT: memory access error (including mis-aligned @uaddr in _nofault).
+ */
+int xchg_user_32_nofault(u32 __user *uaddr, u32 *val);
+int xchg_user_64_nofault(u64 __user *uaddr, u64 *val);
+int xchg_user_32(u32 __user *uaddr, u32 *val);
+int xchg_user_64(u64 __user *uaddr, u64 *val);
+#endif /* ARCH_HAS_ATOMIC_UACCESS_HELPERS */
+
#endif /* __LINUX_UACCESS_H__ */
diff --git a/mm/maccess.c b/mm/maccess.c
index d3f1a1f0b1c1..620556b11550 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -335,3 +335,267 @@ long strnlen_user_nofault(const void __user *unsafe_addr, long count)
return ret;
}
+
+#ifdef ARCH_HAS_ATOMIC_UACCESS_HELPERS
+
+static int fix_pagefault(unsigned long uaddr, bool write_fault, int bytes)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ mmap_read_lock(mm);
+ ret = fixup_user_fault(mm, uaddr, write_fault ? FAULT_FLAG_WRITE : 0,
+ NULL);
+ mmap_read_unlock(mm);
+
+ return ret < 0 ? ret : 0;
+}
+
+int cmpxchg_user_32_nofault(u32 __user *uaddr, u32 *curr_val, u32 new_val)
+{
+ int ret = -EFAULT;
+ u32 __old = *curr_val;
+
+ if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ if (!user_access_begin(uaddr, sizeof(*uaddr))) {
+ pagefault_enable();
+ return -EFAULT;
+ }
+ ret = __try_cmpxchg_user_32(curr_val, uaddr, __old, new_val);
+ user_access_end();
+
+ if (!ret)
+ ret = *curr_val == __old ? 0 : -EAGAIN;
+
+ pagefault_enable();
+ return ret;
+}
+
+int cmpxchg_user_64_nofault(u64 __user *uaddr, u64 *curr_val, u64 new_val)
+{
+ int ret = -EFAULT;
+ u64 __old = *curr_val;
+
+ if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ if (!user_access_begin(uaddr, sizeof(*uaddr))) {
+ pagefault_enable();
+ return -EFAULT;
+ }
+ ret = __try_cmpxchg_user_64(curr_val, uaddr, __old, new_val);
+ user_access_end();
+
+ if (!ret)
+ ret = *curr_val == __old ? 0 : -EAGAIN;
+
+ pagefault_enable();
+
+ return ret;
+}
+
+int cmpxchg_user_32(u32 __user *uaddr, u32 *curr_val, u32 new_val)
+{
+ int ret = -EFAULT;
+ u32 __old = *curr_val;
+
+ /* Validate proper alignment. */
+ if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
+ ((unsigned long)curr_val % sizeof(*curr_val))))
+ return -EINVAL;
+
+ if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ while (true) {
+ ret = -EFAULT;
+ if (!user_access_begin(uaddr, sizeof(*uaddr)))
+ break;
+
+ ret = __try_cmpxchg_user_32(curr_val, uaddr, __old, new_val);
+ user_access_end();
+
+ if (!ret) {
+ ret = *curr_val == __old ? 0 : -EAGAIN;
+ break;
+ }
+
+ if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
+ break;
+ }
+
+ pagefault_enable();
+ return ret;
+}
+
+int cmpxchg_user_64(u64 __user *uaddr, u64 *curr_val, u64 new_val)
+{
+ int ret = -EFAULT;
+ u64 __old = *curr_val;
+
+ /* Validate proper alignment. */
+ if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
+ ((unsigned long)curr_val % sizeof(*curr_val))))
+ return -EINVAL;
+
+ if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ while (true) {
+ ret = -EFAULT;
+ if (!user_access_begin(uaddr, sizeof(*uaddr)))
+ break;
+
+ ret = __try_cmpxchg_user_64(curr_val, uaddr, __old, new_val);
+ user_access_end();
+
+ if (!ret) {
+ ret = *curr_val == __old ? 0 : -EAGAIN;
+ break;
+ }
+
+ if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
+ break;
+ }
+
+ pagefault_enable();
+ return ret;
+}
+
+/**
+ * xchg_user_[32|64][_nofault|]() - exchange 32/64-bit values
+ * @uaddr: Destination address, in user space;
+ * @val: Source address, in kernel space.
+ *
+ * This is the standard atomic xchg: exchange values pointed to by @uaddr and @val.
+ *
+ * The _nofault versions don't fault and can be used in
+ * atomic/preempt-disabled contexts.
+ *
+ * Return:
+ * 0 : OK/success;
+ * -EINVAL: @uaddr is not properly aligned ('may fault' versions only);
+ * -EFAULT: memory access error (including mis-aligned @uaddr in _nofault).
+ */
+int xchg_user_32_nofault(u32 __user *uaddr, u32 *val)
+{
+ int ret;
+
+ if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ if (!user_access_begin(uaddr, sizeof(*uaddr))) {
+ pagefault_enable();
+ return -EFAULT;
+ }
+
+ ret = __try_xchg_user_32(val, uaddr, *val);
+ user_access_end();
+
+ pagefault_enable();
+
+ return ret;
+}
+
+int xchg_user_64_nofault(u64 __user *uaddr, u64 *val)
+{
+ int ret;
+
+ if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ if (!user_access_begin(uaddr, sizeof(*uaddr))) {
+ pagefault_enable();
+ return -EFAULT;
+ }
+
+ ret = __try_xchg_user_64(val, uaddr, *val);
+ user_access_end();
+
+ pagefault_enable();
+
+ return ret;
+}
+
+int xchg_user_32(u32 __user *uaddr, u32 *val)
+{
+ int ret = -EFAULT;
+
+ /* Validate proper alignment. */
+ if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
+ ((unsigned long)val % sizeof(*val))))
+ return -EINVAL;
+
+ if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ while (true) {
+ ret = -EFAULT;
+ if (!user_access_begin(uaddr, sizeof(*uaddr)))
+ break;
+
+ ret = __try_xchg_user_32(val, uaddr, *val);
+ user_access_end();
+
+ if (!ret)
+ break;
+
+ if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
+ break;
+ }
+
+ pagefault_enable();
+
+ return ret;
+}
+
+int xchg_user_64(u64 __user *uaddr, u64 *val)
+{
+ int ret = -EFAULT;
+
+ /* Validate proper alignment. */
+ if (unlikely(((unsigned long)uaddr % sizeof(*uaddr)) ||
+ ((unsigned long)val % sizeof(*val))))
+ return -EINVAL;
+
+ if (unlikely(!access_ok(uaddr, sizeof(*uaddr))))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ while (true) {
+ ret = -EFAULT;
+ if (!user_access_begin(uaddr, sizeof(*uaddr)))
+ break;
+
+ ret = __try_xchg_user_64(val, uaddr, *val);
+ user_access_end();
+
+ if (!ret)
+ break;
+
+ if (fix_pagefault((unsigned long)uaddr, true, sizeof(*uaddr)) < 0)
+ break;
+ }
+
+ pagefault_enable();
+
+ return ret;
+}
+#endif /* ARCH_HAS_ATOMIC_UACCESS_HELPERS */
--
2.25.1
next prev parent reply other threads:[~2021-11-21 21:21 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-21 21:20 [PATCH v0.9 0/6] sched,mm,x86/uaccess: implement User Managed Concurrency Groups Peter Oskolkov
2021-11-21 21:20 ` [PATCH v0.9 1/6] sched/umcg: add WF_CURRENT_CPU and externise ttwu Peter Oskolkov
2021-11-21 21:20 ` Peter Oskolkov [this message]
2021-11-22 7:54 ` [PATCH v0.9 2/6] mm, x86/uaccess: add userspace atomic helpers kernel test robot
2021-11-21 21:20 ` [PATCH v0.9 3/6] sched/umcg: implement UMCG syscalls Peter Oskolkov
2021-11-22 9:09 ` kernel test robot
2021-11-22 10:01 ` kernel test robot
2021-11-22 20:17 ` Peter Oskolkov
2021-11-22 14:45 ` kernel test robot
2021-11-21 21:20 ` [PATCH v0.9 4/6] sched/umcg, lib/umcg: implement libumcg Peter Oskolkov
2021-11-21 21:20 ` [PATCH v0.9 5/6] sched/umcg: add Documentation/userspace-api/umcg.txt Peter Oskolkov
2021-11-21 21:20 ` [PATCH v0.9 6/6] sched/umcg, lib/umcg: add tools/lib/umcg/libumcg.txt Peter Oskolkov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211121212040.8649-3-posk@google.com \
--to=posk@posk.io \
--cc=akpm@linux-foundation.org \
--cc=avagin@google.com \
--cc=bsegall@google.com \
--cc=dave.hansen@linux.intel.com \
--cc=jannh@google.com \
--cc=linux-api@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
--cc=posk@google.com \
--cc=tdelisle@uwaterloo.ca \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).