From: guoren@kernel.org
To: paul.walmsley@sifive.com, palmer@dabbelt.com, guoren@kernel.org,
panqinglin2020@iscas.ac.cn, bjorn@rivosinc.com,
conor.dooley@microchip.com, leobras@redhat.com,
peterz@infradead.org, anup@brainfault.org, keescook@chromium.org,
wuwei2016@iscas.ac.cn, xiaoguang.xing@sophgo.com,
chao.wei@sophgo.com, unicorn_wang@outlook.com, uwu@icenowy.me,
jszhang@kernel.org, wefu@redhat.com, atishp@atishpatra.org
Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org,
kvm@vger.kernel.org, virtualization@lists.linux-foundation.org,
Guo Ren <guoren@linux.alibaba.com>
Subject: [PATCH V12 10/14] RISC-V: paravirt: Add pvqspinlock frontend skeleton
Date: Mon, 25 Dec 2023 07:58:43 -0500 [thread overview]
Message-ID: <20231225125847.2778638-11-guoren@kernel.org> (raw)
In-Reply-To: <20231225125847.2778638-1-guoren@kernel.org>
From: Guo Ren <guoren@linux.alibaba.com>
Using static_call to switch between:
native_queued_spin_lock_slowpath() __pv_queued_spin_lock_slowpath()
native_queued_spin_unlock() __pv_queued_spin_unlock()
Finish the pv_wait implementation, but pv_kick needs the SBI
definition of the next patches.
Reviewed-by: Leonardo Bras <leobras@redhat.com>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
---
arch/riscv/include/asm/Kbuild | 1 -
arch/riscv/include/asm/qspinlock.h | 35 +++++++++++++
arch/riscv/include/asm/qspinlock_paravirt.h | 29 +++++++++++
arch/riscv/kernel/qspinlock_paravirt.c | 57 +++++++++++++++++++++
arch/riscv/kernel/setup.c | 4 ++
5 files changed, 125 insertions(+), 1 deletion(-)
create mode 100644 arch/riscv/include/asm/qspinlock.h
create mode 100644 arch/riscv/include/asm/qspinlock_paravirt.h
create mode 100644 arch/riscv/kernel/qspinlock_paravirt.c
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index ad72f2bd4cc9..85a428ad116d 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -8,6 +8,5 @@ generic-y += spinlock_types.h
generic-y += ticket_spinlock.h
generic-y += qrwlock.h
generic-y += qrwlock_types.h
-generic-y += qspinlock.h
generic-y += user.h
generic-y += vmlinux.lds.h
diff --git a/arch/riscv/include/asm/qspinlock.h b/arch/riscv/include/asm/qspinlock.h
new file mode 100644
index 000000000000..02ce973b5b6e
--- /dev/null
+++ b/arch/riscv/include/asm/qspinlock.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c), 2023 Alibaba
+ * Authors:
+ * Guo Ren <guoren@linux.alibaba.com>
+ */
+
+#ifndef _ASM_RISCV_QSPINLOCK_H
+#define _ASM_RISCV_QSPINLOCK_H
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#include <asm/qspinlock_paravirt.h>
+
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD (1 << 15)
+
+void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+void __pv_init_lock_hash(void);
+void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ static_call(pv_queued_spin_lock_slowpath)(lock, val);
+}
+
+#define queued_spin_unlock queued_spin_unlock
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ static_call(pv_queued_spin_unlock)(lock);
+}
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_RISCV_QSPINLOCK_H */
diff --git a/arch/riscv/include/asm/qspinlock_paravirt.h b/arch/riscv/include/asm/qspinlock_paravirt.h
new file mode 100644
index 000000000000..9681e851f69d
--- /dev/null
+++ b/arch/riscv/include/asm/qspinlock_paravirt.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c), 2023 Alibaba Cloud
+ * Authors:
+ * Guo Ren <guoren@linux.alibaba.com>
+ */
+
+#ifndef _ASM_RISCV_QSPINLOCK_PARAVIRT_H
+#define _ASM_RISCV_QSPINLOCK_PARAVIRT_H
+
+void pv_wait(u8 *ptr, u8 val);
+void pv_kick(int cpu);
+
+void dummy_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+void dummy_queued_spin_unlock(struct qspinlock *lock);
+
+DECLARE_STATIC_CALL(pv_queued_spin_lock_slowpath, dummy_queued_spin_lock_slowpath);
+DECLARE_STATIC_CALL(pv_queued_spin_unlock, dummy_queued_spin_unlock);
+
+void __init pv_qspinlock_init(void);
+
+static inline bool pv_is_native_spin_unlock(void)
+{
+ return false;
+}
+
+void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+#endif /* _ASM_RISCV_QSPINLOCK_PARAVIRT_H */
diff --git a/arch/riscv/kernel/qspinlock_paravirt.c b/arch/riscv/kernel/qspinlock_paravirt.c
new file mode 100644
index 000000000000..85ff5a3ec234
--- /dev/null
+++ b/arch/riscv/kernel/qspinlock_paravirt.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c), 2023 Alibaba Cloud
+ * Authors:
+ * Guo Ren <guoren@linux.alibaba.com>
+ */
+
+#include <linux/static_call.h>
+#include <asm/qspinlock_paravirt.h>
+#include <asm/sbi.h>
+
+void pv_kick(int cpu)
+{
+ return;
+}
+
+void pv_wait(u8 *ptr, u8 val)
+{
+ unsigned long flags;
+
+ if (in_nmi())
+ return;
+
+ local_irq_save(flags);
+ if (READ_ONCE(*ptr) != val)
+ goto out;
+
+ /* wait_for_interrupt(); */
+out:
+ local_irq_restore(flags);
+}
+
+static void native_queued_spin_unlock(struct qspinlock *lock)
+{
+ smp_store_release(&lock->locked, 0);
+}
+
+DEFINE_STATIC_CALL(pv_queued_spin_lock_slowpath, native_queued_spin_lock_slowpath);
+EXPORT_STATIC_CALL(pv_queued_spin_lock_slowpath);
+
+DEFINE_STATIC_CALL(pv_queued_spin_unlock, native_queued_spin_unlock);
+EXPORT_STATIC_CALL(pv_queued_spin_unlock);
+
+void __init pv_qspinlock_init(void)
+{
+ if (num_possible_cpus() == 1)
+ return;
+
+ if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM)
+ return;
+
+ pr_info("PV qspinlocks enabled\n");
+ __pv_init_lock_hash();
+
+ static_call_update(pv_queued_spin_lock_slowpath, __pv_queued_spin_lock_slowpath);
+ static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock);
+}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index e33430e9d97e..052bbfbb7f32 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -304,6 +304,10 @@ static void __init riscv_spinlock_init(void)
#ifdef CONFIG_QUEUED_SPINLOCKS
virt_spin_lock_init();
#endif
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+ pv_qspinlock_init();
+#endif
}
#endif
--
2.40.1
next prev parent reply other threads:[~2023-12-25 12:59 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-25 12:58 [PATCH V12 00/14] riscv: Add Native/Paravirt qspinlock support guoren
2023-12-25 12:58 ` [PATCH V12 01/14] asm-generic: ticket-lock: Reuse arch_spinlock_t of qspinlock guoren
2023-12-25 12:58 ` [PATCH V12 02/14] asm-generic: ticket-lock: Add separate ticket-lock.h guoren
2023-12-25 12:58 ` [PATCH V12 03/14] riscv: errata: Move errata vendor func-id into vendorid_list.h guoren
2024-01-04 4:00 ` Leonardo Bras
2023-12-25 12:58 ` [PATCH V12 04/14] riscv: qspinlock: errata: Add ERRATA_THEAD_WRITE_ONCE fixup guoren
2023-12-25 12:58 ` [PATCH V12 05/14] riscv: qspinlock: Add basic queued_spinlock support guoren
2023-12-25 12:58 ` [PATCH V12 06/14] riscv: qspinlock: Introduce combo spinlock guoren
2024-01-04 4:56 ` Leonardo Bras
2023-12-25 12:58 ` [PATCH V12 07/14] riscv: qspinlock: Add virt_spin_lock() support for VM guest guoren
2024-01-04 5:06 ` Leonardo Bras
2023-12-25 12:58 ` [PATCH V12 08/14] riscv: qspinlock: Force virt_spin_lock for KVM guests guoren
2024-01-04 5:11 ` Leonardo Bras
2023-12-25 12:58 ` [PATCH V12 09/14] RISC-V: paravirt: Add pvqspinlock KVM backend guoren
2023-12-25 12:58 ` guoren [this message]
2023-12-25 12:58 ` [PATCH V12 11/14] RISC-V: paravirt: pvqspinlock: Add SBI implementation guoren
2023-12-25 12:58 ` [PATCH V12 12/14] RISC-V: paravirt: pvqspinlock: Add nopvspin kernel parameter guoren
2023-12-25 12:58 ` [PATCH V12 13/14] RISC-V: paravirt: pvqspinlock: Add kconfig entry guoren
2023-12-25 12:58 ` [PATCH V12 14/14] RISC-V: paravirt: pvqspinlock: Add trace point for pv_kick/wait guoren
2023-12-26 0:35 ` [PATCH V12 00/14] riscv: Add Native/Paravirt qspinlock support Guo Ren
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231225125847.2778638-11-guoren@kernel.org \
--to=guoren@kernel.org \
--cc=anup@brainfault.org \
--cc=atishp@atishpatra.org \
--cc=bjorn@rivosinc.com \
--cc=chao.wei@sophgo.com \
--cc=conor.dooley@microchip.com \
--cc=guoren@linux.alibaba.com \
--cc=jszhang@kernel.org \
--cc=keescook@chromium.org \
--cc=kvm@vger.kernel.org \
--cc=leobras@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=palmer@dabbelt.com \
--cc=panqinglin2020@iscas.ac.cn \
--cc=paul.walmsley@sifive.com \
--cc=peterz@infradead.org \
--cc=unicorn_wang@outlook.com \
--cc=uwu@icenowy.me \
--cc=virtualization@lists.linux-foundation.org \
--cc=wefu@redhat.com \
--cc=wuwei2016@iscas.ac.cn \
--cc=xiaoguang.xing@sophgo.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).