From: Peter Oskolkov <posk@posk.io>
To: Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>,
Thomas Gleixner <tglx@linutronix.de>,
linux-kernel@vger.kernel.org, linux-api@vger.kernel.org
Cc: Paul Turner <pjt@google.com>, Ben Segall <bsegall@google.com>,
Peter Oskolkov <posk@google.com>, Peter Oskolkov <posk@posk.io>,
Joel Fernandes <joel@joelfernandes.org>,
Andrei Vagin <avagin@google.com>,
Jim Newsome <jnewsome@torproject.org>,
Jann Horn <jannh@google.com>
Subject: [RFC PATCH 2/3 v0.2] sched/umcg: RFC: add userspace atomic helpers
Date: Thu, 8 Jul 2021 12:46:37 -0700 [thread overview]
Message-ID: <20210708194638.128950-3-posk@google.com> (raw)
In-Reply-To: <20210708194638.128950-1-posk@google.com>
Add helper functions to work atomically with userspace 32/64 bit values -
there are some .*futex.* named helpers, but they are not exactly
what is needed for UMCG; I haven't found what else I could use, so I
rolled these.
At the moment only X86_64 is supported.
Note: the helpers should probably go into arch/ somewhere; I have
them in kernel/sched/umcg.h temporarily for convenience. Please
let me know where I should put them and how to name them.
Signed-off-by: Peter Oskolkov <posk@google.com>
---
kernel/sched/umcg.h | 264 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 264 insertions(+)
create mode 100644 kernel/sched/umcg.h
diff --git a/kernel/sched/umcg.h b/kernel/sched/umcg.h
new file mode 100644
index 000000000000..aa8fb24964ed
--- /dev/null
+++ b/kernel/sched/umcg.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _KERNEL_SCHED_UMCG_H
+#define _KERNEL_SCHED_UMCG_H
+
+#ifdef CONFIG_UMCG
+#ifdef CONFIG_X86_64
+
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/umcg.h>
+
+#include <asm/asm.h>
+#include <linux/atomic.h>
+
+/* TODO: move atomic operations below into arch/ headers */
+static inline int umcg_atomic_cmpxchg_32(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+
+ if (!user_access_begin(uaddr, sizeof(u32)))
+ return -EFAULT;
+ asm volatile("\n"
+ "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
+ "2:\n"
+ "\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE_UA(1b, 3b)
+ : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+ : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "memory"
+ );
+ user_access_end();
+ *uval = oldval;
+ return ret;
+}
+
+static inline int umcg_atomic_cmpxchg_64(u64 *uval, u64 __user *uaddr,
+ u64 oldval, u64 newval)
+{
+ int ret = 0;
+
+ if (!user_access_begin(uaddr, sizeof(u64)))
+ return -EFAULT;
+ asm volatile("\n"
+ "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"
+ "2:\n"
+ "\t.section .fixup, \"ax\"\n"
+ "3:\tmov %3, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE_UA(1b, 3b)
+ : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
+ : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "memory"
+ );
+ user_access_end();
+ *uval = oldval;
+ return ret;
+}
+
+static inline int fix_pagefault(unsigned long uaddr, bool write_fault)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ mmap_read_lock(mm);
+ ret = fixup_user_fault(mm, uaddr, write_fault ? FAULT_FLAG_WRITE : 0,
+ NULL);
+ mmap_read_unlock(mm);
+
+ return ret < 0 ? ret : 0;
+}
+
+static inline int umcg_get_user_32(u32 __user *uaddr, u32 *val)
+{
+ while (true) {
+ int ret;
+ u32 out;
+
+ pagefault_disable();
+ ret = __get_user(out, uaddr);
+ pagefault_enable();
+
+ if (!ret) {
+ *val = out;
+ return 0;
+ }
+
+ if (WARN_ONCE(ret != -EFAULT, "Unexpected error"))
+ return -EFAULT;
+
+ ret = fix_pagefault((unsigned long)uaddr, false);
+ if (ret)
+ return -EFAULT;
+ }
+}
+
+/**
+ * umcg_cmpxchg_32_user - compare_exchange 32-bit values
+ *
+ * Return:
+ * 0 - OK
+ * -EFAULT: memory access error
+ * -EAGAIN: @expected did not match; consult @prev
+ */
+static inline int umcg_cmpxchg_32_user(u32 __user *uaddr, u32 *prev, u32 val)
+{
+ while (true) {
+ int ret;
+ u32 expected = *prev;
+
+ pagefault_disable();
+ ret = umcg_atomic_cmpxchg_32(prev, uaddr, expected, val);
+ pagefault_enable();
+
+ if (!ret)
+ return *prev == expected ? 0 : -EAGAIN;
+
+ if (WARN_ONCE(ret != -EFAULT, "Unexpected error"))
+ return -EFAULT;
+
+ ret = fix_pagefault((unsigned long)uaddr, true);
+ if (ret)
+ return -EFAULT;
+ }
+}
+
+/**
+ * umcg_cmpxchg_64_user - compare_exchange 64-bit values
+ *
+ * Return:
+ * 0 - OK
+ * -EFAULT: memory access error
+ * -EAGAIN: @expected did not match; consult @prev
+ */
+static inline int umcg_cmpxchg_64_user(u64 __user *uaddr, u64 *prev, u64 val)
+{
+ while (true) {
+ int ret;
+ u64 expected = *prev;
+
+ pagefault_disable();
+ ret = umcg_atomic_cmpxchg_64(prev, uaddr, expected, val);
+ pagefault_enable();
+
+ if (!ret)
+ return *prev == expected ? 0 : -EAGAIN;
+
+ if (WARN_ONCE(ret != -EFAULT, "Unexpected error"))
+ return -EFAULT;
+
+ ret = fix_pagefault((unsigned long)uaddr, true);
+ if (ret)
+ return -EFAULT;
+ }
+}
+
+/**
+ * atomic_stack_push_user - push a node onto the stack
+ * @head - a pointer to the head of the stack;
+ * @node - a pointer to the node to push.
+ *
+ * Push a node onto a single-linked list (stack). Atomicity/correctness
+ * is guaranteed by locking the head via settings its first bit (assuming
+ * the pointer is aligned).
+ *
+ * Return: 0 on success, -EFAULT on error.
+ */
+static inline int atomic_stack_push_user(u64 __user *head, u64 __user *node)
+{
+ while (true) {
+ int ret;
+ u64 first;
+
+ smp_mb(); /* Make the read below clean. */
+ if (get_user(first, head))
+ return -EFAULT;
+
+ if (first & 1UL) {
+ cpu_relax();
+ continue; /* first is being deleted. */
+ }
+
+ if (put_user(first, node))
+ return -EFAULT;
+ smp_mb(); /* Make the write above visible. */
+
+ ret = umcg_cmpxchg_64_user(head, &first, (u64)node);
+ if (!ret)
+ return 0;
+
+ if (ret == -EAGAIN) {
+ cpu_relax();
+ continue;
+ }
+
+ if (WARN_ONCE(ret != -EFAULT, "unexpected umcg_cmpxchg result"))
+ return -EFAULT;
+
+ return -EFAULT;
+ }
+}
+
+/**
+ * atomic_stack_pop_user - pop a node from the stack
+ * @head - a pointer to the head of the stack;
+ * @value - a pointer to where store the popped value.
+ *
+ * Pop a node from a single-linked list (stack). Atomicity/correctness
+ * is guaranteed by locking the head via settings its first bit (assuming
+ * the pointer is aligned).
+ *
+ * Note: on success, @value should be cast to (u64 __user *) if not zero.
+ *
+ * Return: 0 on success, -EFAULT on error.
+ */
+static inline int atomic_stack_pop_user(u64 __user *head, u64 *value)
+{
+ while (true) {
+ int ret;
+ u64 next, first;
+
+ smp_mb(); /* Make the read below clean. */
+ if (get_user(first, head))
+ return -EFAULT;
+
+ if (!first) {
+ *value = 0UL;
+ return 0;
+ }
+
+ if (first & 1UL) {
+ cpu_relax();
+ continue; /* first is being deleted. */
+ }
+
+ ret = umcg_cmpxchg_64_user(head, &first, first | 1UL);
+ if (ret == -EAGAIN) {
+ cpu_relax();
+ continue;
+ }
+
+ if (ret)
+ return -EFAULT;
+
+ if (get_user(next, (u64 __user *)first))
+ return -EFAULT;
+
+ first |= 1UL;
+
+ ret = umcg_cmpxchg_64_user(head, &first, next);
+ if (ret)
+ return -EFAULT;
+
+ *value = first & ~1UL;
+ return 0;
+ }
+}
+#endif /* CONFIG_X86_64 */
+#endif /* CONFIG_UMCG */
+#endif /* _KERNEL_SCHED_UMCG_H */
--
2.25.1
next prev parent reply other threads:[~2021-07-08 19:46 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-08 19:46 [RFC PATCH 0/3 v0.2] RFC: sched/UMCG Peter Oskolkov
2021-07-08 19:46 ` [RFC PATCH 1/3 v0.2] sched: add WF_CURRENT_CPU and externise ttwu Peter Oskolkov
2021-07-08 19:46 ` Peter Oskolkov [this message]
2021-07-08 21:12 ` [RFC PATCH 2/3 v0.2] sched/umcg: RFC: add userspace atomic helpers Jann Horn
2021-07-09 4:01 ` Peter Oskolkov
2021-07-09 8:01 ` Peter Zijlstra
2021-07-09 16:57 ` Peter Oskolkov
2021-07-09 17:33 ` Peter Oskolkov
2021-07-13 16:10 ` Peter Zijlstra
2021-07-13 17:14 ` Peter Oskolkov
2021-07-08 19:46 ` [RFC PATCH 3/3 v0.2] sched/umcg: RFC: implement UMCG syscalls Peter Oskolkov
2021-07-11 16:35 ` Peter Oskolkov
2021-07-11 18:29 ` Thierry Delisle
2021-07-12 15:40 ` Peter Oskolkov
2021-07-12 21:44 ` Thierry Delisle
2021-07-12 23:31 ` Peter Oskolkov
2021-07-13 14:02 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210708194638.128950-3-posk@google.com \
--to=posk@posk.io \
--cc=avagin@google.com \
--cc=bsegall@google.com \
--cc=jannh@google.com \
--cc=jnewsome@torproject.org \
--cc=joel@joelfernandes.org \
--cc=linux-api@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
--cc=posk@google.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox