* [v3, 1/5] riscv: Add support for kernel mode vector
2023-10-19 15:45 [v3, 0/5] riscv: support kernel-mode Vector Andy Chiu
@ 2023-10-19 15:45 ` Andy Chiu
2023-10-24 11:16 ` Conor Dooley
2023-10-19 15:45 ` [v3, 2/5] riscv: Add vector extension XOR implementation Andy Chiu
` (3 subsequent siblings)
4 siblings, 1 reply; 10+ messages in thread
From: Andy Chiu @ 2023-10-19 15:45 UTC (permalink / raw)
To: linux-riscv, palmer
Cc: paul.walmsley, greentime.hu, guoren, bjorn, peterz, tglx,
Vincent Chen, Andy Chiu, Albert Ou, Heiko Stuebner, Guo Ren,
Björn Töpel, Conor Dooley, Alexandre Ghiti, Evan Green,
Xianting Tian, Anup Patel, Sami Tolvanen, Sia Jee Heng,
Jisheng Zhang
From: Greentime Hu <greentime.hu@sifive.com>
Add kernel_vector_begin() and kernel_vector_end() function declarations
and corresponding definitions in kernel_mode_vector.c
These are needed to wrap uses of vector in kernel mode.
Co-developed-by: Vincent Chen <vincent.chen@sifive.com>
Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
Signed-off-by: Greentime Hu <greentime.hu@sifive.com>
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
---
Changelog v3:
- Reorder patch 1 to patch 3 to make use of
{get,put}_cpu_vector_context later.
- Export {get,put}_cpu_vector_context.
- Save V context after disabling preemption. (Guo)
- Fix a build fail. (Conor)
- Remove irqs_disabled() check as it is not needed, fix styling. (Björn)
Changelog v2:
- 's/kernel_rvv/kernel_vector' and return void in kernel_vector_begin
(Conor)
- export may_use_simd to include/asm/simd.h
---
arch/riscv/include/asm/simd.h | 49 +++++++++++++
arch/riscv/include/asm/vector.h | 4 ++
arch/riscv/kernel/Makefile | 1 +
arch/riscv/kernel/kernel_mode_vector.c | 99 ++++++++++++++++++++++++++
4 files changed, 153 insertions(+)
create mode 100644 arch/riscv/include/asm/simd.h
create mode 100644 arch/riscv/kernel/kernel_mode_vector.c
diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h
new file mode 100644
index 000000000000..0c5ba555b460
--- /dev/null
+++ b/arch/riscv/include/asm/simd.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2023 SiFive
+ */
+
+#ifndef __ASM_SIMD_H
+#define __ASM_SIMD_H
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_RISCV_ISA_V
+
+DECLARE_PER_CPU(bool, vector_context_busy);
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue vector
+ * instructions or access the vector register file
+ *
+ * Callers must not assume that the result remains true beyond the next
+ * preempt_enable() or return from softirq context.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ /*
+ * vector_context_busy is only set while preemption is disabled,
+ * and is clear whenever preemption is enabled. Since
+ * this_cpu_read() is atomic w.r.t. preemption, vector_context_busy
+ * cannot change under our feet -- if it's set we cannot be
+ * migrated, and if it's clear we cannot be migrated to a CPU
+ * where it is set.
+ */
+ return !in_hardirq() && !in_nmi() && !this_cpu_read(vector_context_busy);
+}
+
+#else /* ! CONFIG_RISCV_ISA_V */
+
+static __must_check inline bool may_use_simd(void)
+{
+ return false;
+}
+
+#endif /* ! CONFIG_RISCV_ISA_V */
+
+#endif
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index c5ee07b3df07..8b8ece690ea1 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -22,6 +22,10 @@
extern unsigned long riscv_v_vsize;
int riscv_v_setup_vsize(void);
bool riscv_v_first_use_handler(struct pt_regs *regs);
+void kernel_vector_begin(void);
+void kernel_vector_end(void);
+void get_cpu_vector_context(void);
+void put_cpu_vector_context(void);
static __always_inline bool has_vector(void)
{
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 95cf25d48405..0597bb668b6e 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_MMU) += vdso.o vdso/
obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_RISCV_ISA_V) += vector.o
+obj-$(CONFIG_RISCV_ISA_V) += kernel_mode_vector.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += cpu_ops.o
diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
new file mode 100644
index 000000000000..74936e108771
--- /dev/null
+++ b/arch/riscv/kernel/kernel_mode_vector.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2021 SiFive
+ */
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+#include <asm/vector.h>
+#include <asm/switch_to.h>
+#include <asm/simd.h>
+
+DEFINE_PER_CPU(bool, vector_context_busy);
+
+/*
+ * Claim ownership of the CPU vector context for use by the calling context.
+ *
+ * The caller may freely manipulate the vector context metadata until
+ * put_cpu_vector_context() is called.
+ */
+void get_cpu_vector_context(void)
+{
+ bool busy;
+
+ preempt_disable();
+ busy = __this_cpu_xchg(vector_context_busy, true);
+
+ WARN_ON(busy);
+}
+
+/*
+ * Release the CPU vector context.
+ *
+ * Must be called from a context in which get_cpu_vector_context() was
+ * previously called, with no call to put_cpu_vector_context() in the
+ * meantime.
+ */
+void put_cpu_vector_context(void)
+{
+ bool busy = __this_cpu_xchg(vector_context_busy, false);
+
+ WARN_ON(!busy);
+ preempt_enable();
+}
+
+/*
+ * kernel_vector_begin(): obtain the CPU vector registers for use by the calling
+ * context
+ *
+ * Must not be called unless may_use_simd() returns true.
+ * Task context in the vector registers is saved back to memory as necessary.
+ *
+ * A matching call to kernel_vector_end() must be made before returning from the
+ * calling context.
+ *
+ * The caller may freely use the vector registers until kernel_vector_end() is
+ * called.
+ */
+void kernel_vector_begin(void)
+{
+ if (WARN_ON(!has_vector()))
+ return;
+
+ BUG_ON(!may_use_simd());
+
+ get_cpu_vector_context();
+
+ riscv_v_vstate_save(current, task_pt_regs(current));
+
+ riscv_v_enable();
+}
+EXPORT_SYMBOL_GPL(kernel_vector_begin);
+
+/*
+ * kernel_vector_end(): give the CPU vector registers back to the current task
+ *
+ * Must be called from a context in which kernel_vector_begin() was previously
+ * called, with no call to kernel_vector_end() in the meantime.
+ *
+ * The caller must not use the vector registers after this function is called,
+ * unless kernel_vector_begin() is called again in the meantime.
+ */
+void kernel_vector_end(void)
+{
+ if (WARN_ON(!has_vector()))
+ return;
+
+ riscv_v_vstate_restore(current, task_pt_regs(current));
+
+ riscv_v_disable();
+
+ put_cpu_vector_context();
+}
+EXPORT_SYMBOL_GPL(kernel_vector_end);
--
2.17.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [v3, 1/5] riscv: Add support for kernel mode vector
2023-10-19 15:45 ` [v3, 1/5] riscv: Add support for kernel mode vector Andy Chiu
@ 2023-10-24 11:16 ` Conor Dooley
0 siblings, 0 replies; 10+ messages in thread
From: Conor Dooley @ 2023-10-24 11:16 UTC (permalink / raw)
To: Andy Chiu
Cc: linux-riscv, palmer, paul.walmsley, greentime.hu, guoren, bjorn,
peterz, tglx, Vincent Chen, Albert Ou, Heiko Stuebner, Guo Ren,
Björn Töpel, Conor Dooley, Alexandre Ghiti, Evan Green,
Xianting Tian, Anup Patel, Sami Tolvanen, Sia Jee Heng,
Jisheng Zhang
[-- Attachment #1.1: Type: text/plain, Size: 7542 bytes --]
On Thu, Oct 19, 2023 at 03:45:48PM +0000, Andy Chiu wrote:
> From: Greentime Hu <greentime.hu@sifive.com>
>
> Add kernel_vector_begin() and kernel_vector_end() function declarations
> and corresponding definitions in kernel_mode_vector.c
>
> These are needed to wrap uses of vector in kernel mode.
>
> Co-developed-by: Vincent Chen <vincent.chen@sifive.com>
> Signed-off-by: Vincent Chen <vincent.chen@sifive.com>
> Signed-off-by: Greentime Hu <greentime.hu@sifive.com>
> Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
> ---
> Changelog v3:
> - Reorder patch 1 to patch 3 to make use of
> {get,put}_cpu_vector_context later.
> - Export {get,put}_cpu_vector_context.
> - Save V context after disabling preemption. (Guo)
> - Fix a build fail. (Conor)
Acked-by: Conor Dooley <conor.dooley@microchip.com>
Thanks,
Conor.
> - Remove irqs_disabled() check as it is not needed, fix styling. (Björn)
> Changelog v2:
> - 's/kernel_rvv/kernel_vector' and return void in kernel_vector_begin
> (Conor)
> - export may_use_simd to include/asm/simd.h
> ---
> arch/riscv/include/asm/simd.h | 49 +++++++++++++
> arch/riscv/include/asm/vector.h | 4 ++
> arch/riscv/kernel/Makefile | 1 +
> arch/riscv/kernel/kernel_mode_vector.c | 99 ++++++++++++++++++++++++++
> 4 files changed, 153 insertions(+)
> create mode 100644 arch/riscv/include/asm/simd.h
> create mode 100644 arch/riscv/kernel/kernel_mode_vector.c
>
> diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h
> new file mode 100644
> index 000000000000..0c5ba555b460
> --- /dev/null
> +++ b/arch/riscv/include/asm/simd.h
> @@ -0,0 +1,49 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
> + * Copyright (C) 2023 SiFive
> + */
> +
> +#ifndef __ASM_SIMD_H
> +#define __ASM_SIMD_H
> +
> +#include <linux/compiler.h>
> +#include <linux/irqflags.h>
> +#include <linux/percpu.h>
> +#include <linux/preempt.h>
> +#include <linux/types.h>
> +
> +#ifdef CONFIG_RISCV_ISA_V
> +
> +DECLARE_PER_CPU(bool, vector_context_busy);
> +
> +/*
> + * may_use_simd - whether it is allowable at this time to issue vector
> + * instructions or access the vector register file
> + *
> + * Callers must not assume that the result remains true beyond the next
> + * preempt_enable() or return from softirq context.
> + */
> +static __must_check inline bool may_use_simd(void)
> +{
> + /*
> + * vector_context_busy is only set while preemption is disabled,
> + * and is clear whenever preemption is enabled. Since
> + * this_cpu_read() is atomic w.r.t. preemption, vector_context_busy
> + * cannot change under our feet -- if it's set we cannot be
> + * migrated, and if it's clear we cannot be migrated to a CPU
> + * where it is set.
> + */
> + return !in_hardirq() && !in_nmi() && !this_cpu_read(vector_context_busy);
> +}
> +
> +#else /* ! CONFIG_RISCV_ISA_V */
> +
> +static __must_check inline bool may_use_simd(void)
> +{
> + return false;
> +}
> +
> +#endif /* ! CONFIG_RISCV_ISA_V */
> +
> +#endif
> diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
> index c5ee07b3df07..8b8ece690ea1 100644
> --- a/arch/riscv/include/asm/vector.h
> +++ b/arch/riscv/include/asm/vector.h
> @@ -22,6 +22,10 @@
> extern unsigned long riscv_v_vsize;
> int riscv_v_setup_vsize(void);
> bool riscv_v_first_use_handler(struct pt_regs *regs);
> +void kernel_vector_begin(void);
> +void kernel_vector_end(void);
> +void get_cpu_vector_context(void);
> +void put_cpu_vector_context(void);
>
> static __always_inline bool has_vector(void)
> {
> diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
> index 95cf25d48405..0597bb668b6e 100644
> --- a/arch/riscv/kernel/Makefile
> +++ b/arch/riscv/kernel/Makefile
> @@ -62,6 +62,7 @@ obj-$(CONFIG_MMU) += vdso.o vdso/
> obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
> obj-$(CONFIG_FPU) += fpu.o
> obj-$(CONFIG_RISCV_ISA_V) += vector.o
> +obj-$(CONFIG_RISCV_ISA_V) += kernel_mode_vector.o
> obj-$(CONFIG_SMP) += smpboot.o
> obj-$(CONFIG_SMP) += smp.o
> obj-$(CONFIG_SMP) += cpu_ops.o
> diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
> new file mode 100644
> index 000000000000..74936e108771
> --- /dev/null
> +++ b/arch/riscv/kernel/kernel_mode_vector.c
> @@ -0,0 +1,99 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/*
> + * Copyright (C) 2012 ARM Ltd.
> + * Author: Catalin Marinas <catalin.marinas@arm.com>
> + * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
> + * Copyright (C) 2021 SiFive
> + */
> +#include <linux/compiler.h>
> +#include <linux/irqflags.h>
> +#include <linux/percpu.h>
> +#include <linux/preempt.h>
> +#include <linux/types.h>
> +
> +#include <asm/vector.h>
> +#include <asm/switch_to.h>
> +#include <asm/simd.h>
> +
> +DEFINE_PER_CPU(bool, vector_context_busy);
> +
> +/*
> + * Claim ownership of the CPU vector context for use by the calling context.
> + *
> + * The caller may freely manipulate the vector context metadata until
> + * put_cpu_vector_context() is called.
> + */
> +void get_cpu_vector_context(void)
> +{
> + bool busy;
> +
> + preempt_disable();
> + busy = __this_cpu_xchg(vector_context_busy, true);
> +
> + WARN_ON(busy);
> +}
> +
> +/*
> + * Release the CPU vector context.
> + *
> + * Must be called from a context in which get_cpu_vector_context() was
> + * previously called, with no call to put_cpu_vector_context() in the
> + * meantime.
> + */
> +void put_cpu_vector_context(void)
> +{
> + bool busy = __this_cpu_xchg(vector_context_busy, false);
> +
> + WARN_ON(!busy);
> + preempt_enable();
> +}
> +
> +/*
> + * kernel_vector_begin(): obtain the CPU vector registers for use by the calling
> + * context
> + *
> + * Must not be called unless may_use_simd() returns true.
> + * Task context in the vector registers is saved back to memory as necessary.
> + *
> + * A matching call to kernel_vector_end() must be made before returning from the
> + * calling context.
> + *
> + * The caller may freely use the vector registers until kernel_vector_end() is
> + * called.
> + */
> +void kernel_vector_begin(void)
> +{
> + if (WARN_ON(!has_vector()))
> + return;
> +
> + BUG_ON(!may_use_simd());
> +
> + get_cpu_vector_context();
> +
> + riscv_v_vstate_save(current, task_pt_regs(current));
> +
> + riscv_v_enable();
> +}
> +EXPORT_SYMBOL_GPL(kernel_vector_begin);
> +
> +/*
> + * kernel_vector_end(): give the CPU vector registers back to the current task
> + *
> + * Must be called from a context in which kernel_vector_begin() was previously
> + * called, with no call to kernel_vector_end() in the meantime.
> + *
> + * The caller must not use the vector registers after this function is called,
> + * unless kernel_vector_begin() is called again in the meantime.
> + */
> +void kernel_vector_end(void)
> +{
> + if (WARN_ON(!has_vector()))
> + return;
> +
> + riscv_v_vstate_restore(current, task_pt_regs(current));
> +
> + riscv_v_disable();
> +
> + put_cpu_vector_context();
> +}
> +EXPORT_SYMBOL_GPL(kernel_vector_end);
> --
> 2.17.1
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
[-- Attachment #2: Type: text/plain, Size: 161 bytes --]
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 10+ messages in thread
* [v3, 2/5] riscv: Add vector extension XOR implementation
2023-10-19 15:45 [v3, 0/5] riscv: support kernel-mode Vector Andy Chiu
2023-10-19 15:45 ` [v3, 1/5] riscv: Add support for kernel mode vector Andy Chiu
@ 2023-10-19 15:45 ` Andy Chiu
2023-10-19 15:45 ` [v3, 3/5] riscv: sched: defer restoring Vector context for user Andy Chiu
` (2 subsequent siblings)
4 siblings, 0 replies; 10+ messages in thread
From: Andy Chiu @ 2023-10-19 15:45 UTC (permalink / raw)
To: linux-riscv, palmer
Cc: paul.walmsley, greentime.hu, guoren, bjorn, peterz, tglx,
Han-Kuan Chen, Andy Chiu, Albert Ou, Conor Dooley, Andrew Jones,
Heiko Stuebner
From: Greentime Hu <greentime.hu@sifive.com>
This patch adds support for vector optimized XOR and it is tested in
qemu.
Co-developed-by: Han-Kuan Chen <hankuan.chen@sifive.com>
Signed-off-by: Han-Kuan Chen <hankuan.chen@sifive.com>
Signed-off-by: Greentime Hu <greentime.hu@sifive.com>
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
Acked-by: Conor Dooley <conor.dooley@microchip.com>
---
Changelog v2:
- 's/rvv/vector/' (Conor)
---
arch/riscv/include/asm/xor.h | 82 ++++++++++++++++++++++++++++++++++++
arch/riscv/lib/Makefile | 1 +
arch/riscv/lib/xor.S | 81 +++++++++++++++++++++++++++++++++++
3 files changed, 164 insertions(+)
create mode 100644 arch/riscv/include/asm/xor.h
create mode 100644 arch/riscv/lib/xor.S
diff --git a/arch/riscv/include/asm/xor.h b/arch/riscv/include/asm/xor.h
new file mode 100644
index 000000000000..903c3275f8d0
--- /dev/null
+++ b/arch/riscv/include/asm/xor.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+
+#include <linux/hardirq.h>
+#include <asm-generic/xor.h>
+#ifdef CONFIG_RISCV_ISA_V
+#include <asm/vector.h>
+#include <asm/switch_to.h>
+
+void xor_regs_2_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2);
+void xor_regs_3_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3);
+void xor_regs_4_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4);
+void xor_regs_5_(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4,
+ const unsigned long *__restrict p5);
+
+static void xor_vector_2(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2)
+{
+ kernel_vector_begin();
+ xor_regs_2_(bytes, p1, p2);
+ kernel_vector_end();
+}
+
+static void xor_vector_3(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3)
+{
+ kernel_vector_begin();
+ xor_regs_3_(bytes, p1, p2, p3);
+ kernel_vector_end();
+}
+
+static void xor_vector_4(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4)
+{
+ kernel_vector_begin();
+ xor_regs_4_(bytes, p1, p2, p3, p4);
+ kernel_vector_end();
+}
+
+static void xor_vector_5(unsigned long bytes, unsigned long *__restrict p1,
+ const unsigned long *__restrict p2,
+ const unsigned long *__restrict p3,
+ const unsigned long *__restrict p4,
+ const unsigned long *__restrict p5)
+{
+ kernel_vector_begin();
+ xor_regs_5_(bytes, p1, p2, p3, p4, p5);
+ kernel_vector_end();
+}
+
+static struct xor_block_template xor_block_rvv = {
+ .name = "rvv",
+ .do_2 = xor_vector_2,
+ .do_3 = xor_vector_3,
+ .do_4 = xor_vector_4,
+ .do_5 = xor_vector_5
+};
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_32regs); \
+ if (has_vector()) { \
+ xor_speed(&xor_block_rvv);\
+ } \
+ } while (0)
+#endif
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 26cb2502ecf8..494f9cd1a00c 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -11,3 +11,4 @@ lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+lib-$(CONFIG_RISCV_ISA_V) += xor.o
diff --git a/arch/riscv/lib/xor.S b/arch/riscv/lib/xor.S
new file mode 100644
index 000000000000..3bc059e18171
--- /dev/null
+++ b/arch/riscv/lib/xor.S
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+#include <linux/linkage.h>
+#include <asm-generic/export.h>
+#include <asm/asm.h>
+
+ENTRY(xor_regs_2_)
+ vsetvli a3, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a3
+ vxor.vv v16, v0, v8
+ add a2, a2, a3
+ vse8.v v16, (a1)
+ add a1, a1, a3
+ bnez a0, xor_regs_2_
+ ret
+END(xor_regs_2_)
+EXPORT_SYMBOL(xor_regs_2_)
+
+ENTRY(xor_regs_3_)
+ vsetvli a4, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a4
+ vxor.vv v0, v0, v8
+ vle8.v v16, (a3)
+ add a2, a2, a4
+ vxor.vv v16, v0, v16
+ add a3, a3, a4
+ vse8.v v16, (a1)
+ add a1, a1, a4
+ bnez a0, xor_regs_3_
+ ret
+END(xor_regs_3_)
+EXPORT_SYMBOL(xor_regs_3_)
+
+ENTRY(xor_regs_4_)
+ vsetvli a5, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a5
+ vxor.vv v0, v0, v8
+ vle8.v v16, (a3)
+ add a2, a2, a5
+ vxor.vv v0, v0, v16
+ vle8.v v24, (a4)
+ add a3, a3, a5
+ vxor.vv v16, v0, v24
+ add a4, a4, a5
+ vse8.v v16, (a1)
+ add a1, a1, a5
+ bnez a0, xor_regs_4_
+ ret
+END(xor_regs_4_)
+EXPORT_SYMBOL(xor_regs_4_)
+
+ENTRY(xor_regs_5_)
+ vsetvli a6, a0, e8, m8, ta, ma
+ vle8.v v0, (a1)
+ vle8.v v8, (a2)
+ sub a0, a0, a6
+ vxor.vv v0, v0, v8
+ vle8.v v16, (a3)
+ add a2, a2, a6
+ vxor.vv v0, v0, v16
+ vle8.v v24, (a4)
+ add a3, a3, a6
+ vxor.vv v0, v0, v24
+ vle8.v v8, (a5)
+ add a4, a4, a6
+ vxor.vv v16, v0, v8
+ add a5, a5, a6
+ vse8.v v16, (a1)
+ add a1, a1, a6
+ bnez a0, xor_regs_5_
+ ret
+END(xor_regs_5_)
+EXPORT_SYMBOL(xor_regs_5_)
--
2.17.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 10+ messages in thread* [v3, 3/5] riscv: sched: defer restoring Vector context for user
2023-10-19 15:45 [v3, 0/5] riscv: support kernel-mode Vector Andy Chiu
2023-10-19 15:45 ` [v3, 1/5] riscv: Add support for kernel mode vector Andy Chiu
2023-10-19 15:45 ` [v3, 2/5] riscv: Add vector extension XOR implementation Andy Chiu
@ 2023-10-19 15:45 ` Andy Chiu
2023-10-24 11:24 ` Conor Dooley
2023-10-19 15:45 ` [v3, 4/5] riscv: vector: do not pass task_struct into riscv_v_vstate_{save,restore}() Andy Chiu
2023-10-19 15:45 ` [v3, 5/5] riscv: vector: allow kernel-mode Vector with preemption Andy Chiu
4 siblings, 1 reply; 10+ messages in thread
From: Andy Chiu @ 2023-10-19 15:45 UTC (permalink / raw)
To: linux-riscv, palmer
Cc: paul.walmsley, greentime.hu, guoren, bjorn, peterz, tglx,
Andy Chiu, Albert Ou, Oleg Nesterov, Guo Ren,
Björn Töpel, Jisheng Zhang, Conor Dooley, Vincent Chen,
Heiko Stuebner, Andrew Bresticker, Mathis Salmen
User will use its Vector registers only after the kernel really returns
to the userspace. So we can delay restoring Vector registers as long as
we are still running in kernel mode. So, add a thread flag to indicates
the need of restoring Vector and do the restore at the last
arch-specific exit-to-user hook. This save the context restoring cost
when we switch over multiple processes that run V in kernel mode. For
example, if the kernel performs a context swicth from A->B->C, and
returns to C's userspace, then there is no need to restore B's
V-register.
Besides, this also prevents us from repeatedly restoring V context when
executing kernel-mode Vector multiple times for the upcoming kenel-mode
Vector patches.
The cost of this is that we must disable preemption and mark vector as
busy during vstate_{save,restore}. Because then the V context will not
get restored back immediately when a trap-causing context switch happens
in the middle of vstate_{save,restore}.
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
---
Changelog v3:
- Guard {get,put}_cpu_vector_context between vstate_* operation and
explain it in the commit msg.
- Drop R-b from Björn and A-b from Conor.
Changelog v2:
- rename and add comment for the new thread flag (Conor)
---
arch/riscv/include/asm/entry-common.h | 17 +++++++++++++++++
arch/riscv/include/asm/thread_info.h | 2 ++
arch/riscv/include/asm/vector.h | 11 ++++++++++-
arch/riscv/kernel/kernel_mode_vector.c | 2 +-
arch/riscv/kernel/process.c | 2 ++
arch/riscv/kernel/ptrace.c | 5 ++++-
arch/riscv/kernel/signal.c | 5 ++++-
arch/riscv/kernel/vector.c | 2 +-
8 files changed, 41 insertions(+), 5 deletions(-)
diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h
index 6e4dee49d84b..8d64f1c18169 100644
--- a/arch/riscv/include/asm/entry-common.h
+++ b/arch/riscv/include/asm/entry-common.h
@@ -4,6 +4,23 @@
#define _ASM_RISCV_ENTRY_COMMON_H
#include <asm/stacktrace.h>
+#include <asm/thread_info.h>
+#include <asm/vector.h>
+
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+ if (ti_work & _TIF_RISCV_V_DEFER_RESTORE) {
+ clear_thread_flag(TIF_RISCV_V_DEFER_RESTORE);
+ /*
+ * We are already called with irq disabled, so go without
+ * keepping track of vector_context_busy.
+ */
+ riscv_v_vstate_restore(current, regs);
+ }
+}
+
+#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
void handle_page_fault(struct pt_regs *regs);
void handle_break(struct pt_regs *regs);
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 1833beb00489..b182f2d03e25 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -93,12 +93,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
#define TIF_32BIT 11 /* compat-mode 32bit process */
+#define TIF_RISCV_V_DEFER_RESTORE 12 /* restore Vector before returing to user */
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_UPROBE (1 << TIF_UPROBE)
+#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
#define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index 8b8ece690ea1..2f11c6f3ad96 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -188,6 +188,15 @@ static inline void riscv_v_vstate_restore(struct task_struct *task,
}
}
+static inline void riscv_v_vstate_set_restore(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_VS) != SR_VS_OFF) {
+ set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE);
+ riscv_v_vstate_on(regs);
+ }
+}
+
static inline void __switch_to_vector(struct task_struct *prev,
struct task_struct *next)
{
@@ -195,7 +204,7 @@ static inline void __switch_to_vector(struct task_struct *prev,
regs = task_pt_regs(prev);
riscv_v_vstate_save(prev, regs);
- riscv_v_vstate_restore(next, task_pt_regs(next));
+ riscv_v_vstate_set_restore(next, task_pt_regs(next));
}
void riscv_v_vstate_ctrl_init(struct task_struct *tsk);
diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
index 74936e108771..fa01dc62256f 100644
--- a/arch/riscv/kernel/kernel_mode_vector.c
+++ b/arch/riscv/kernel/kernel_mode_vector.c
@@ -90,7 +90,7 @@ void kernel_vector_end(void)
if (WARN_ON(!has_vector()))
return;
- riscv_v_vstate_restore(current, task_pt_regs(current));
+ riscv_v_vstate_set_restore(current, task_pt_regs(current));
riscv_v_disable();
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index e32d737e039f..ec89e7edb6fd 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -153,6 +153,7 @@ void flush_thread(void)
riscv_v_vstate_off(task_pt_regs(current));
kfree(current->thread.vstate.datap);
memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
+ clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
#endif
}
@@ -169,6 +170,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
*dst = *src;
/* clear entire V context, including datap for a new task */
memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
+ clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
return 0;
}
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 2afe460de16a..7b93bcbdf9fa 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -99,8 +99,11 @@ static int riscv_vr_get(struct task_struct *target,
* Ensure the vector registers have been saved to the memory before
* copying them to membuf.
*/
- if (target == current)
+ if (target == current) {
+ get_cpu_vector_context();
riscv_v_vstate_save(current, task_pt_regs(current));
+ put_cpu_vector_context();
+ }
ptrace_vstate.vstart = vstate->vstart;
ptrace_vstate.vl = vstate->vl;
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 180d951d3624..d31d2c74d31f 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -86,7 +86,10 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec)
/* datap is designed to be 16 byte aligned for better performance */
WARN_ON(unlikely(!IS_ALIGNED((unsigned long)datap, 16)));
+ get_cpu_vector_context();
riscv_v_vstate_save(current, regs);
+ put_cpu_vector_context();
+
/* Copy everything of vstate but datap. */
err = __copy_to_user(&state->v_state, ¤t->thread.vstate,
offsetof(struct __riscv_v_ext_state, datap));
@@ -134,7 +137,7 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
if (unlikely(err))
return err;
- riscv_v_vstate_restore(current, regs);
+ riscv_v_vstate_set_restore(current, regs);
return err;
}
diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
index 8d92fb6c522c..9d583b760db4 100644
--- a/arch/riscv/kernel/vector.c
+++ b/arch/riscv/kernel/vector.c
@@ -167,7 +167,7 @@ bool riscv_v_first_use_handler(struct pt_regs *regs)
return true;
}
riscv_v_vstate_on(regs);
- riscv_v_vstate_restore(current, regs);
+ riscv_v_vstate_set_restore(current, regs);
return true;
}
--
2.17.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [v3, 3/5] riscv: sched: defer restoring Vector context for user
2023-10-19 15:45 ` [v3, 3/5] riscv: sched: defer restoring Vector context for user Andy Chiu
@ 2023-10-24 11:24 ` Conor Dooley
0 siblings, 0 replies; 10+ messages in thread
From: Conor Dooley @ 2023-10-24 11:24 UTC (permalink / raw)
To: Andy Chiu
Cc: linux-riscv, palmer, paul.walmsley, greentime.hu, guoren, bjorn,
peterz, tglx, Albert Ou, Oleg Nesterov, Guo Ren,
Björn Töpel, Jisheng Zhang, Conor Dooley, Vincent Chen,
Heiko Stuebner, Andrew Bresticker, Mathis Salmen
[-- Attachment #1.1: Type: text/plain, Size: 8801 bytes --]
Hey Andy,
On Thu, Oct 19, 2023 at 03:45:50PM +0000, Andy Chiu wrote:
> User will use its Vector registers only after the kernel really returns
> to the userspace. So we can delay restoring Vector registers as long as
> we are still running in kernel mode. So, add a thread flag to indicates
> the need of restoring Vector and do the restore at the last
> arch-specific exit-to-user hook. This save the context restoring cost
> when we switch over multiple processes that run V in kernel mode. For
> example, if the kernel performs a context swicth from A->B->C, and
> returns to C's userspace, then there is no need to restore B's
> V-register.
>
> Besides, this also prevents us from repeatedly restoring V context when
> executing kernel-mode Vector multiple times for the upcoming kenel-mode
> Vector patches.
This comment now seems misplaced, as this patch has moved after adding
kernel mode vector in the series.
> The cost of this is that we must disable preemption and mark vector as
> busy during vstate_{save,restore}. Because then the V context will not
> get restored back immediately when a trap-causing context switch happens
> in the middle of vstate_{save,restore}.
>
> Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
> ---
> Changelog v3:
> - Guard {get,put}_cpu_vector_context between vstate_* operation and
> explain it in the commit msg.
> - Drop R-b from Björn and A-b from Conor.
You can keep mine,
Acked-by: Conor Dooley <conor.dooley@microchip.com>
> Changelog v2:
> - rename and add comment for the new thread flag (Conor)
> ---
> arch/riscv/include/asm/entry-common.h | 17 +++++++++++++++++
> arch/riscv/include/asm/thread_info.h | 2 ++
> arch/riscv/include/asm/vector.h | 11 ++++++++++-
> arch/riscv/kernel/kernel_mode_vector.c | 2 +-
> arch/riscv/kernel/process.c | 2 ++
> arch/riscv/kernel/ptrace.c | 5 ++++-
> arch/riscv/kernel/signal.c | 5 ++++-
> arch/riscv/kernel/vector.c | 2 +-
> 8 files changed, 41 insertions(+), 5 deletions(-)
>
> diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h
> index 6e4dee49d84b..8d64f1c18169 100644
> --- a/arch/riscv/include/asm/entry-common.h
> +++ b/arch/riscv/include/asm/entry-common.h
> @@ -4,6 +4,23 @@
> #define _ASM_RISCV_ENTRY_COMMON_H
>
> #include <asm/stacktrace.h>
> +#include <asm/thread_info.h>
> +#include <asm/vector.h>
> +
> +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
> + unsigned long ti_work)
> +{
> + if (ti_work & _TIF_RISCV_V_DEFER_RESTORE) {
> + clear_thread_flag(TIF_RISCV_V_DEFER_RESTORE);
> + /*
> + * We are already called with irq disabled, so go without
> + * keepping track of vector_context_busy.
nit: s/keepping/keeping/
Cheers,
Conor.
> + */
> + riscv_v_vstate_restore(current, regs);
> + }
> +}
> +
> +#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
>
> void handle_page_fault(struct pt_regs *regs);
> void handle_break(struct pt_regs *regs);
> diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
> index 1833beb00489..b182f2d03e25 100644
> --- a/arch/riscv/include/asm/thread_info.h
> +++ b/arch/riscv/include/asm/thread_info.h
> @@ -93,12 +93,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
> #define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
> #define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
> #define TIF_32BIT 11 /* compat-mode 32bit process */
> +#define TIF_RISCV_V_DEFER_RESTORE 12 /* restore Vector before returing to user */
>
> #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
> #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
> #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
> #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
> #define _TIF_UPROBE (1 << TIF_UPROBE)
> +#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
>
> #define _TIF_WORK_MASK \
> (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
> diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
> index 8b8ece690ea1..2f11c6f3ad96 100644
> --- a/arch/riscv/include/asm/vector.h
> +++ b/arch/riscv/include/asm/vector.h
> @@ -188,6 +188,15 @@ static inline void riscv_v_vstate_restore(struct task_struct *task,
> }
> }
>
> +static inline void riscv_v_vstate_set_restore(struct task_struct *task,
> + struct pt_regs *regs)
> +{
> + if ((regs->status & SR_VS) != SR_VS_OFF) {
> + set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE);
> + riscv_v_vstate_on(regs);
> + }
> +}
> +
> static inline void __switch_to_vector(struct task_struct *prev,
> struct task_struct *next)
> {
> @@ -195,7 +204,7 @@ static inline void __switch_to_vector(struct task_struct *prev,
>
> regs = task_pt_regs(prev);
> riscv_v_vstate_save(prev, regs);
> - riscv_v_vstate_restore(next, task_pt_regs(next));
> + riscv_v_vstate_set_restore(next, task_pt_regs(next));
> }
>
> void riscv_v_vstate_ctrl_init(struct task_struct *tsk);
> diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
> index 74936e108771..fa01dc62256f 100644
> --- a/arch/riscv/kernel/kernel_mode_vector.c
> +++ b/arch/riscv/kernel/kernel_mode_vector.c
> @@ -90,7 +90,7 @@ void kernel_vector_end(void)
> if (WARN_ON(!has_vector()))
> return;
>
> - riscv_v_vstate_restore(current, task_pt_regs(current));
> + riscv_v_vstate_set_restore(current, task_pt_regs(current));
>
> riscv_v_disable();
>
> diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
> index e32d737e039f..ec89e7edb6fd 100644
> --- a/arch/riscv/kernel/process.c
> +++ b/arch/riscv/kernel/process.c
> @@ -153,6 +153,7 @@ void flush_thread(void)
> riscv_v_vstate_off(task_pt_regs(current));
> kfree(current->thread.vstate.datap);
> memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
> + clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
> #endif
> }
>
> @@ -169,6 +170,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
> *dst = *src;
> /* clear entire V context, including datap for a new task */
> memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
> + clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
>
> return 0;
> }
> diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
> index 2afe460de16a..7b93bcbdf9fa 100644
> --- a/arch/riscv/kernel/ptrace.c
> +++ b/arch/riscv/kernel/ptrace.c
> @@ -99,8 +99,11 @@ static int riscv_vr_get(struct task_struct *target,
> * Ensure the vector registers have been saved to the memory before
> * copying them to membuf.
> */
> - if (target == current)
> + if (target == current) {
> + get_cpu_vector_context();
> riscv_v_vstate_save(current, task_pt_regs(current));
> + put_cpu_vector_context();
> + }
>
> ptrace_vstate.vstart = vstate->vstart;
> ptrace_vstate.vl = vstate->vl;
> diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
> index 180d951d3624..d31d2c74d31f 100644
> --- a/arch/riscv/kernel/signal.c
> +++ b/arch/riscv/kernel/signal.c
> @@ -86,7 +86,10 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec)
> /* datap is designed to be 16 byte aligned for better performance */
> WARN_ON(unlikely(!IS_ALIGNED((unsigned long)datap, 16)));
>
> + get_cpu_vector_context();
> riscv_v_vstate_save(current, regs);
> + put_cpu_vector_context();
> +
> /* Copy everything of vstate but datap. */
> err = __copy_to_user(&state->v_state, ¤t->thread.vstate,
> offsetof(struct __riscv_v_ext_state, datap));
> @@ -134,7 +137,7 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
> if (unlikely(err))
> return err;
>
> - riscv_v_vstate_restore(current, regs);
> + riscv_v_vstate_set_restore(current, regs);
>
> return err;
> }
> diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
> index 8d92fb6c522c..9d583b760db4 100644
> --- a/arch/riscv/kernel/vector.c
> +++ b/arch/riscv/kernel/vector.c
> @@ -167,7 +167,7 @@ bool riscv_v_first_use_handler(struct pt_regs *regs)
> return true;
> }
> riscv_v_vstate_on(regs);
> - riscv_v_vstate_restore(current, regs);
> + riscv_v_vstate_set_restore(current, regs);
> return true;
> }
>
> --
> 2.17.1
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
[-- Attachment #2: Type: text/plain, Size: 161 bytes --]
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 10+ messages in thread
* [v3, 4/5] riscv: vector: do not pass task_struct into riscv_v_vstate_{save,restore}()
2023-10-19 15:45 [v3, 0/5] riscv: support kernel-mode Vector Andy Chiu
` (2 preceding siblings ...)
2023-10-19 15:45 ` [v3, 3/5] riscv: sched: defer restoring Vector context for user Andy Chiu
@ 2023-10-19 15:45 ` Andy Chiu
2023-10-24 11:26 ` Conor Dooley
2023-10-19 15:45 ` [v3, 5/5] riscv: vector: allow kernel-mode Vector with preemption Andy Chiu
4 siblings, 1 reply; 10+ messages in thread
From: Andy Chiu @ 2023-10-19 15:45 UTC (permalink / raw)
To: linux-riscv, palmer
Cc: paul.walmsley, greentime.hu, guoren, bjorn, peterz, tglx,
Andy Chiu, Albert Ou, Oleg Nesterov, Guo Ren, Jisheng Zhang,
Björn Töpel, Vincent Chen, Heiko Stuebner, Conor Dooley,
Andrew Bresticker, Mathis Salmen
riscv_v_vstate_{save,restore}() can operate only on the knowlege of
struct __riscv_v_ext_state, and struct pt_regs. Let the caller decides
which should be passed into the function. Meanwhile, the kernel-mode
Vector is going to introduce another vstate, so this also makes functions
potentially able to be reused.
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
---
Changelog v3:
- save V context after get_cpu_vector_context
Changelog v2:
- fix build fail that get caught on this patch (Conor)
---
arch/riscv/include/asm/entry-common.h | 2 +-
arch/riscv/include/asm/vector.h | 14 +++++---------
arch/riscv/kernel/kernel_mode_vector.c | 2 +-
arch/riscv/kernel/ptrace.c | 2 +-
arch/riscv/kernel/signal.c | 2 +-
5 files changed, 9 insertions(+), 13 deletions(-)
diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h
index 8d64f1c18169..fd78361bf301 100644
--- a/arch/riscv/include/asm/entry-common.h
+++ b/arch/riscv/include/asm/entry-common.h
@@ -16,7 +16,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
* We are already called with irq disabled, so go without
* keepping track of vector_context_busy.
*/
- riscv_v_vstate_restore(current, regs);
+ riscv_v_vstate_restore(¤t->thread.vstate, regs);
}
}
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index 2f11c6f3ad96..d356eac8c0b4 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -166,23 +166,19 @@ static inline void riscv_v_vstate_discard(struct pt_regs *regs)
__riscv_v_vstate_dirty(regs);
}
-static inline void riscv_v_vstate_save(struct task_struct *task,
+static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate,
struct pt_regs *regs)
{
if ((regs->status & SR_VS) == SR_VS_DIRTY) {
- struct __riscv_v_ext_state *vstate = &task->thread.vstate;
-
__riscv_v_vstate_save(vstate, vstate->datap);
__riscv_v_vstate_clean(regs);
}
}
-static inline void riscv_v_vstate_restore(struct task_struct *task,
+static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate,
struct pt_regs *regs)
{
if ((regs->status & SR_VS) != SR_VS_OFF) {
- struct __riscv_v_ext_state *vstate = &task->thread.vstate;
-
__riscv_v_vstate_restore(vstate, vstate->datap);
__riscv_v_vstate_clean(regs);
}
@@ -203,7 +199,7 @@ static inline void __switch_to_vector(struct task_struct *prev,
struct pt_regs *regs;
regs = task_pt_regs(prev);
- riscv_v_vstate_save(prev, regs);
+ riscv_v_vstate_save(&prev->thread.vstate, regs);
riscv_v_vstate_set_restore(next, task_pt_regs(next));
}
@@ -221,8 +217,8 @@ static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
#define riscv_v_vsize (0)
#define riscv_v_vstate_discard(regs) do {} while (0)
-#define riscv_v_vstate_save(task, regs) do {} while (0)
-#define riscv_v_vstate_restore(task, regs) do {} while (0)
+#define riscv_v_vstate_save(vstate, regs) do {} while (0)
+#define riscv_v_vstate_restore(vstate, regs) do {} while (0)
#define __switch_to_vector(__prev, __next) do {} while (0)
#define riscv_v_vstate_off(regs) do {} while (0)
#define riscv_v_vstate_on(regs) do {} while (0)
diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
index fa01dc62256f..2344817f8640 100644
--- a/arch/riscv/kernel/kernel_mode_vector.c
+++ b/arch/riscv/kernel/kernel_mode_vector.c
@@ -70,7 +70,7 @@ void kernel_vector_begin(void)
get_cpu_vector_context();
- riscv_v_vstate_save(current, task_pt_regs(current));
+ riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current));
riscv_v_enable();
}
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 7b93bcbdf9fa..e8515aa9d80b 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -101,7 +101,7 @@ static int riscv_vr_get(struct task_struct *target,
*/
if (target == current) {
get_cpu_vector_context();
- riscv_v_vstate_save(current, task_pt_regs(current));
+ riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current));
put_cpu_vector_context();
}
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index d31d2c74d31f..13cb00cbcd58 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -87,7 +87,7 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec)
WARN_ON(unlikely(!IS_ALIGNED((unsigned long)datap, 16)));
get_cpu_vector_context();
- riscv_v_vstate_save(current, regs);
+ riscv_v_vstate_save(¤t->thread.vstate, regs);
put_cpu_vector_context();
/* Copy everything of vstate but datap. */
--
2.17.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [v3, 4/5] riscv: vector: do not pass task_struct into riscv_v_vstate_{save,restore}()
2023-10-19 15:45 ` [v3, 4/5] riscv: vector: do not pass task_struct into riscv_v_vstate_{save,restore}() Andy Chiu
@ 2023-10-24 11:26 ` Conor Dooley
0 siblings, 0 replies; 10+ messages in thread
From: Conor Dooley @ 2023-10-24 11:26 UTC (permalink / raw)
To: Andy Chiu
Cc: linux-riscv, palmer, paul.walmsley, greentime.hu, guoren, bjorn,
peterz, tglx, Albert Ou, Oleg Nesterov, Guo Ren, Jisheng Zhang,
Björn Töpel, Vincent Chen, Heiko Stuebner, Conor Dooley,
Andrew Bresticker, Mathis Salmen
[-- Attachment #1.1: Type: text/plain, Size: 667 bytes --]
On Thu, Oct 19, 2023 at 03:45:51PM +0000, Andy Chiu wrote:
> riscv_v_vstate_{save,restore}() can operate only on the knowlege of
> struct __riscv_v_ext_state, and struct pt_regs. Let the caller decides
> which should be passed into the function. Meanwhile, the kernel-mode
> Vector is going to introduce another vstate, so this also makes functions
> potentially able to be reused.
>
> Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
> ---
> Changelog v3:
> - save V context after get_cpu_vector_context
> Changelog v2:
> - fix build fail that get caught on this patch (Conor)
Acked-by: Conor Dooley <conor.dooley@microchip.com>
Cheers,
Conor.
[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 228 bytes --]
[-- Attachment #2: Type: text/plain, Size: 161 bytes --]
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 10+ messages in thread
* [v3, 5/5] riscv: vector: allow kernel-mode Vector with preemption
2023-10-19 15:45 [v3, 0/5] riscv: support kernel-mode Vector Andy Chiu
` (3 preceding siblings ...)
2023-10-19 15:45 ` [v3, 4/5] riscv: vector: do not pass task_struct into riscv_v_vstate_{save,restore}() Andy Chiu
@ 2023-10-19 15:45 ` Andy Chiu
2023-10-20 7:02 ` Andy Chiu
4 siblings, 1 reply; 10+ messages in thread
From: Andy Chiu @ 2023-10-19 15:45 UTC (permalink / raw)
To: linux-riscv, palmer
Cc: paul.walmsley, greentime.hu, guoren, bjorn, peterz, tglx,
Andy Chiu, Albert Ou, Heiko Stuebner, Vincent Chen, Conor Dooley,
Charlie Jenkins, Guo Ren, Jisheng Zhang, Björn Töpel,
Andrew Jones, Ley Foon Tan, Sia Jee Heng, Han-Kuan Chen,
Andrew Bresticker, Fangrui Song, Nick Knight
Add kernel_vstate to keep track of kernel-mode Vector registers when
trap introduced context switch happens. Also, provide trap_pt_regs to
let context save/restore routine reference status.VS at which the trap
takes place. The thread flag TIF_RISCV_V_KERNEL_MODE indicates whether
a task is running in kernel-mode Vector with preemption 'ON'. So context
switch routines know and would save V-regs to kernel_vstate and restore
V-regs immediately from kernel_vstate if the bit is set.
Apart from a task's preemption status, the capability of
running preemptive kernel-mode Vector is as well controlled by the
RISCV_V_VSTATE_CTRL_PREEMPTIBLE mask in the task's
thread.vstate_ctrl. This bit is masked whenever a trap takes place in
kernel mode while executing preemptive Vector code.
Also, provide a config CONFIG_RISCV_ISA_V_PREEMPTIVE to give users an
option to disable preemptible kernel-mode Vector at build time. Users
with constraint memory may want to disable this config as preemptible
kernel-mode Vector needs extra space for tracking per thread's
kernel-mode V context. Or, users might as well want to disable it if all
kernel-mode Vector code is time sensitive and cannot tolerate context
switch overhead.
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
---
Changelog v3:
- Guard vstate_save with {get,set}_cpu_vector_context
- Add comments on preventions of nesting V contexts
- remove warnings in context switch when trap's reg is not pressent (Conor)
- refactor code (Björn)
Changelog v2:
- fix build fail when compiling without RISCV_ISA_V (Conor)
- 's/TIF_RISCV_V_KMV/TIF_RISCV_V_KERNEL_MODE' and add comment (Conor)
- merge Kconfig patch into this oine (Conor).
- 's/CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV/CONFIG_RISCV_ISA_V_PREEMPTIVE/'
(Conor)
- fix some typos (Conor)
- enclose assembly with RISCV_ISA_V_PREEMPTIVE.
- change riscv_v_vstate_ctrl_config_kmv() to
kernel_vector_allow_preemption() for better understanding. (Conor)
- 's/riscv_v_kmv_preempitble/kernel_vector_preemptible/'
---
arch/riscv/Kconfig | 10 +++++
arch/riscv/include/asm/processor.h | 2 +
arch/riscv/include/asm/simd.h | 9 +++-
arch/riscv/include/asm/thread_info.h | 4 ++
arch/riscv/include/asm/vector.h | 25 +++++++++--
arch/riscv/kernel/asm-offsets.c | 2 +
arch/riscv/kernel/entry.S | 49 ++++++++++++++++++++++
arch/riscv/kernel/kernel_mode_vector.c | 57 ++++++++++++++++++++++++--
arch/riscv/kernel/process.c | 8 +++-
arch/riscv/kernel/vector.c | 3 +-
10 files changed, 159 insertions(+), 10 deletions(-)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index d607ab0f7c6d..dc51164b8fd4 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -520,6 +520,16 @@ config RISCV_ISA_V_DEFAULT_ENABLE
If you don't know what to do here, say Y.
+config RISCV_ISA_V_PREEMPTIVE
+ bool "Run kernel-mode Vector with kernel preemption"
+ depends on PREEMPTION
+ depends on RISCV_ISA_V
+ default y
+ help
+ Ordinarily the kernel disables preemption before running in-kernel
+ Vector code. This config frees the kernel from disabling preemption
+ by adding memory on demand for tracking kernel's V-context.
+
config TOOLCHAIN_HAS_ZBB
bool
default y
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3e23e1786d05..f9b85e37e624 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -82,6 +82,8 @@ struct thread_struct {
unsigned long bad_cause;
unsigned long vstate_ctrl;
struct __riscv_v_ext_state vstate;
+ struct pt_regs *trap_pt_regs;
+ struct __riscv_v_ext_state kernel_vstate;
};
/* Whitelist the fstate from the task_struct for hardened usercopy */
diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h
index 0c5ba555b460..93d9015b4751 100644
--- a/arch/riscv/include/asm/simd.h
+++ b/arch/riscv/include/asm/simd.h
@@ -12,6 +12,7 @@
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/types.h>
+#include <linux/thread_info.h>
#ifdef CONFIG_RISCV_ISA_V
@@ -33,8 +34,14 @@ static __must_check inline bool may_use_simd(void)
* cannot change under our feet -- if it's set we cannot be
* migrated, and if it's clear we cannot be migrated to a CPU
* where it is set.
+ *
+ * The TIF_RISCV_V_KERNEL_MODE check here prevent us from nesting a
+ * non-preemptible V context on top of a preemptible one. For example,
+ * executing V in a softirq context is prevented if the core is
+ * interrupted during the execution of preemptible V.
*/
- return !in_hardirq() && !in_nmi() && !this_cpu_read(vector_context_busy);
+ return !in_hardirq() && !in_nmi() && !this_cpu_read(vector_context_busy) &&
+ !test_thread_flag(TIF_RISCV_V_KERNEL_MODE);
}
#else /* ! CONFIG_RISCV_ISA_V */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index b182f2d03e25..8797d520e8ef 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -94,6 +94,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
#define TIF_32BIT 11 /* compat-mode 32bit process */
#define TIF_RISCV_V_DEFER_RESTORE 12 /* restore Vector before returing to user */
+#define TIF_RISCV_V_KERNEL_MODE 13 /* kernel-mode Vector run with preemption-on */
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -101,9 +102,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
+#define _TIF_RISCV_V_KERNEL_MODE (1 << TIF_RISCV_V_KERNEL_MODE)
#define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
+#define RISCV_V_VSTATE_CTRL_PREEMPTIBLE 0x20
+
#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index d356eac8c0b4..27bb49e97af8 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -198,9 +198,22 @@ static inline void __switch_to_vector(struct task_struct *prev,
{
struct pt_regs *regs;
- regs = task_pt_regs(prev);
- riscv_v_vstate_save(&prev->thread.vstate, regs);
- riscv_v_vstate_set_restore(next, task_pt_regs(next));
+ if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE) &&
+ test_tsk_thread_flag(prev, TIF_RISCV_V_KERNEL_MODE)) {
+ regs = prev->thread.trap_pt_regs;
+ riscv_v_vstate_save(&prev->thread.kernel_vstate, regs);
+ } else {
+ regs = task_pt_regs(prev);
+ riscv_v_vstate_save(&prev->thread.vstate, regs);
+ }
+
+ if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE) &&
+ test_tsk_thread_flag(next, TIF_RISCV_V_KERNEL_MODE)) {
+ regs = next->thread.trap_pt_regs;
+ riscv_v_vstate_restore(&next->thread.kernel_vstate, regs);
+ } else {
+ riscv_v_vstate_set_restore(next, task_pt_regs(next));
+ }
}
void riscv_v_vstate_ctrl_init(struct task_struct *tsk);
@@ -225,4 +238,10 @@ static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
#endif /* CONFIG_RISCV_ISA_V */
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+void kernel_vector_allow_preemption(void);
+#else
+#define kernel_vector_allow_preemption() do {} while (0)
+#endif
+
#endif /* ! __ASM_RISCV_VECTOR_H */
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index d6a75aac1d27..4b062f7741b2 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -38,6 +38,8 @@ void asm_offsets(void)
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+ OFFSET(TASK_THREAD_TRAP_REGP, task_struct, thread.trap_pt_regs);
+ OFFSET(TASK_THREAD_VSTATE_CTRL, task_struct, thread.vstate_ctrl);
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 143a2bb3e697..ec8baada608f 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -66,6 +66,33 @@ _save_context:
REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp)
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+ /*
+ * Record the register set at the frame where in-kernel V registers are
+ * last alive.
+ */
+ REG_L s0, TASK_TI_FLAGS(tp)
+ li s1, 1 << TIF_RISCV_V_KERNEL_MODE
+ and s0, s0, s1
+ beqz s0, 1f
+ li s0, TASK_THREAD_TRAP_REGP
+ add s0, s0, tp
+ REG_L s1, (s0)
+ bnez s1, 1f
+ REG_S sp, (s0)
+ li s0, TASK_THREAD_VSTATE_CTRL
+ add s0, s0, tp
+ REG_L s1, (s0)
+ /*
+ * Nesting preemptible Vector context is prevented by unsetting
+ * RISCV_V_VSTATE_CTRL_PREEMPTIBLE here.
+ */
+ li s2, ~RISCV_V_VSTATE_CTRL_PREEMPTIBLE
+ and s1, s1, s2
+ REG_S s1, (s0)
+1:
+#endif
+
/*
* Set the scratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel
@@ -129,6 +156,28 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
*/
csrw CSR_SCRATCH, tp
1:
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+ /*
+ * Clear tracking of the trap registers when we return to the frame
+ * that uses kernel mode Vector.
+ */
+ REG_L s0, TASK_TI_FLAGS(tp)
+ li s1, 1 << TIF_RISCV_V_KERNEL_MODE
+ and s0, s0, s1
+ beqz s0, 1f
+ li s0, TASK_THREAD_TRAP_REGP
+ add s0, s0, tp
+ REG_L s1, (s0)
+ bne s1, sp, 1f
+ REG_S x0, (s0)
+ li s0, TASK_THREAD_VSTATE_CTRL
+ add s0, s0, tp
+ REG_L s1, (s0)
+ ori s1, s1, RISCV_V_VSTATE_CTRL_PREEMPTIBLE
+ REG_S s1, (s0)
+1:
+#endif
+
REG_L a0, PT_STATUS(sp)
/*
* The current load reservation is effectively part of the processor's
diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
index 2344817f8640..6203990476b3 100644
--- a/arch/riscv/kernel/kernel_mode_vector.c
+++ b/arch/riscv/kernel/kernel_mode_vector.c
@@ -10,6 +10,7 @@
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <asm/vector.h>
#include <asm/switch_to.h>
@@ -48,6 +49,50 @@ void put_cpu_vector_context(void)
preempt_enable();
}
+#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
+void kernel_vector_allow_preemption(void)
+{
+ current->thread.vstate_ctrl |= RISCV_V_VSTATE_CTRL_PREEMPTIBLE;
+}
+
+static bool kernel_vector_preemptible(void)
+{
+ return !!(current->thread.vstate_ctrl & RISCV_V_VSTATE_CTRL_PREEMPTIBLE);
+}
+
+static int riscv_v_start_kernel_context(void)
+{
+ struct __riscv_v_ext_state *vstate;
+
+ if (!kernel_vector_preemptible())
+ return -EBUSY;
+
+ vstate = ¤t->thread.kernel_vstate;
+ if (!vstate->datap) {
+ vstate->datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
+ if (!vstate->datap)
+ return -ENOMEM;
+ }
+
+ get_cpu_vector_context();
+ riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current));
+ put_cpu_vector_context();
+
+ current->thread.trap_pt_regs = NULL;
+ WARN_ON(test_and_set_thread_flag(TIF_RISCV_V_KERNEL_MODE));
+ return 0;
+}
+
+static void riscv_v_stop_kernel_context(void)
+{
+ WARN_ON(!test_and_clear_thread_flag(TIF_RISCV_V_KERNEL_MODE));
+ current->thread.trap_pt_regs = NULL;
+}
+#else
+#define riscv_v_start_kernel_context() (0)
+#define riscv_v_stop_kernel_context() do {} while (0)
+#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
+
/*
* kernel_vector_begin(): obtain the CPU vector registers for use by the calling
* context
@@ -68,9 +113,10 @@ void kernel_vector_begin(void)
BUG_ON(!may_use_simd());
- get_cpu_vector_context();
-
- riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current));
+ if (!preemptible() || riscv_v_start_kernel_context()) {
+ get_cpu_vector_context();
+ riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current));
+ }
riscv_v_enable();
}
@@ -94,6 +140,9 @@ void kernel_vector_end(void)
riscv_v_disable();
- put_cpu_vector_context();
+ if (!test_thread_flag(TIF_RISCV_V_KERNEL_MODE))
+ put_cpu_vector_context();
+ else
+ riscv_v_stop_kernel_context();
}
EXPORT_SYMBOL_GPL(kernel_vector_end);
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index ec89e7edb6fd..18cb37c305ab 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -160,8 +160,11 @@ void flush_thread(void)
void arch_release_task_struct(struct task_struct *tsk)
{
/* Free the vector context of datap. */
- if (has_vector())
+ if (has_vector()) {
kfree(tsk->thread.vstate.datap);
+ if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE))
+ kfree(tsk->thread.kernel_vstate.datap);
+ }
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -170,7 +173,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
*dst = *src;
/* clear entire V context, including datap for a new task */
memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
+ memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
+ clear_tsk_thread_flag(dst, TIF_RISCV_V_KERNEL_MODE);
return 0;
}
@@ -205,6 +210,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs->a0 = 0; /* Return value of fork() */
p->thread.s[0] = 0;
}
+ kernel_vector_allow_preemption();
p->thread.ra = (unsigned long)ret_from_fork;
p->thread.sp = (unsigned long)childregs; /* kernel sp */
return 0;
diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
index 9d583b760db4..42f227077ee5 100644
--- a/arch/riscv/kernel/vector.c
+++ b/arch/riscv/kernel/vector.c
@@ -122,7 +122,8 @@ static inline void riscv_v_ctrl_set(struct task_struct *tsk, int cur, int nxt,
ctrl |= VSTATE_CTRL_MAKE_NEXT(nxt);
if (inherit)
ctrl |= PR_RISCV_V_VSTATE_CTRL_INHERIT;
- tsk->thread.vstate_ctrl = ctrl;
+ tsk->thread.vstate_ctrl &= ~PR_RISCV_V_VSTATE_CTRL_MASK;
+ tsk->thread.vstate_ctrl |= ctrl;
}
bool riscv_v_vstate_ctrl_user_allowed(void)
--
2.17.1
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [v3, 5/5] riscv: vector: allow kernel-mode Vector with preemption
2023-10-19 15:45 ` [v3, 5/5] riscv: vector: allow kernel-mode Vector with preemption Andy Chiu
@ 2023-10-20 7:02 ` Andy Chiu
0 siblings, 0 replies; 10+ messages in thread
From: Andy Chiu @ 2023-10-20 7:02 UTC (permalink / raw)
To: linux-riscv, palmer
Cc: paul.walmsley, greentime.hu, guoren, bjorn, peterz, tglx,
Albert Ou, Heiko Stuebner, Vincent Chen, Conor Dooley,
Charlie Jenkins, Guo Ren, Jisheng Zhang, Björn Töpel,
Andrew Jones, Ley Foon Tan, Sia Jee Heng, Han-Kuan Chen,
Andrew Bresticker, Fangrui Song, Nick Knight, Jerry Shih
On Thu, Oct 19, 2023 at 11:46 PM Andy Chiu <andy.chiu@sifive.com> wrote:
>
> Add kernel_vstate to keep track of kernel-mode Vector registers when
> trap introduced context switch happens. Also, provide trap_pt_regs to
> let context save/restore routine reference status.VS at which the trap
> takes place. The thread flag TIF_RISCV_V_KERNEL_MODE indicates whether
> a task is running in kernel-mode Vector with preemption 'ON'. So context
> switch routines know and would save V-regs to kernel_vstate and restore
> V-regs immediately from kernel_vstate if the bit is set.
>
> Apart from a task's preemption status, the capability of
> running preemptive kernel-mode Vector is as well controlled by the
> RISCV_V_VSTATE_CTRL_PREEMPTIBLE mask in the task's
> thread.vstate_ctrl. This bit is masked whenever a trap takes place in
> kernel mode while executing preemptive Vector code.
>
> Also, provide a config CONFIG_RISCV_ISA_V_PREEMPTIVE to give users an
> option to disable preemptible kernel-mode Vector at build time. Users
> with constraint memory may want to disable this config as preemptible
> kernel-mode Vector needs extra space for tracking per thread's
> kernel-mode V context. Or, users might as well want to disable it if all
> kernel-mode Vector code is time sensitive and cannot tolerate context
> switch overhead.
>
> Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
> ---
> Changelog v3:
> - Guard vstate_save with {get,set}_cpu_vector_context
> - Add comments on preventions of nesting V contexts
> - remove warnings in context switch when trap's reg is not pressent (Conor)
> - refactor code (Björn)
> Changelog v2:
> - fix build fail when compiling without RISCV_ISA_V (Conor)
> - 's/TIF_RISCV_V_KMV/TIF_RISCV_V_KERNEL_MODE' and add comment (Conor)
> - merge Kconfig patch into this oine (Conor).
> - 's/CONFIG_RISCV_ISA_V_PREEMPTIVE_KMV/CONFIG_RISCV_ISA_V_PREEMPTIVE/'
> (Conor)
> - fix some typos (Conor)
> - enclose assembly with RISCV_ISA_V_PREEMPTIVE.
> - change riscv_v_vstate_ctrl_config_kmv() to
> kernel_vector_allow_preemption() for better understanding. (Conor)
> - 's/riscv_v_kmv_preempitble/kernel_vector_preemptible/'
> ---
> arch/riscv/Kconfig | 10 +++++
> arch/riscv/include/asm/processor.h | 2 +
> arch/riscv/include/asm/simd.h | 9 +++-
> arch/riscv/include/asm/thread_info.h | 4 ++
> arch/riscv/include/asm/vector.h | 25 +++++++++--
> arch/riscv/kernel/asm-offsets.c | 2 +
> arch/riscv/kernel/entry.S | 49 ++++++++++++++++++++++
> arch/riscv/kernel/kernel_mode_vector.c | 57 ++++++++++++++++++++++++--
> arch/riscv/kernel/process.c | 8 +++-
> arch/riscv/kernel/vector.c | 3 +-
> 10 files changed, 159 insertions(+), 10 deletions(-)
>
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index d607ab0f7c6d..dc51164b8fd4 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -520,6 +520,16 @@ config RISCV_ISA_V_DEFAULT_ENABLE
>
> If you don't know what to do here, say Y.
>
> +config RISCV_ISA_V_PREEMPTIVE
> + bool "Run kernel-mode Vector with kernel preemption"
> + depends on PREEMPTION
> + depends on RISCV_ISA_V
> + default y
> + help
> + Ordinarily the kernel disables preemption before running in-kernel
> + Vector code. This config frees the kernel from disabling preemption
> + by adding memory on demand for tracking kernel's V-context.
> +
> config TOOLCHAIN_HAS_ZBB
> bool
> default y
> diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
> index 3e23e1786d05..f9b85e37e624 100644
> --- a/arch/riscv/include/asm/processor.h
> +++ b/arch/riscv/include/asm/processor.h
> @@ -82,6 +82,8 @@ struct thread_struct {
> unsigned long bad_cause;
> unsigned long vstate_ctrl;
> struct __riscv_v_ext_state vstate;
> + struct pt_regs *trap_pt_regs;
> + struct __riscv_v_ext_state kernel_vstate;
> };
>
> /* Whitelist the fstate from the task_struct for hardened usercopy */
> diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h
> index 0c5ba555b460..93d9015b4751 100644
> --- a/arch/riscv/include/asm/simd.h
> +++ b/arch/riscv/include/asm/simd.h
> @@ -12,6 +12,7 @@
> #include <linux/percpu.h>
> #include <linux/preempt.h>
> #include <linux/types.h>
> +#include <linux/thread_info.h>
>
> #ifdef CONFIG_RISCV_ISA_V
>
> @@ -33,8 +34,14 @@ static __must_check inline bool may_use_simd(void)
> * cannot change under our feet -- if it's set we cannot be
> * migrated, and if it's clear we cannot be migrated to a CPU
> * where it is set.
> + *
> + * The TIF_RISCV_V_KERNEL_MODE check here prevent us from nesting a
> + * non-preemptible V context on top of a preemptible one. For example,
> + * executing V in a softirq context is prevented if the core is
> + * interrupted during the execution of preemptible V.
> */
> - return !in_hardirq() && !in_nmi() && !this_cpu_read(vector_context_busy);
> + return !in_hardirq() && !in_nmi() && !this_cpu_read(vector_context_busy) &&
> + !test_thread_flag(TIF_RISCV_V_KERNEL_MODE);
> }
>
> #else /* ! CONFIG_RISCV_ISA_V */
> diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
> index b182f2d03e25..8797d520e8ef 100644
> --- a/arch/riscv/include/asm/thread_info.h
> +++ b/arch/riscv/include/asm/thread_info.h
> @@ -94,6 +94,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
> #define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
> #define TIF_32BIT 11 /* compat-mode 32bit process */
> #define TIF_RISCV_V_DEFER_RESTORE 12 /* restore Vector before returing to user */
> +#define TIF_RISCV_V_KERNEL_MODE 13 /* kernel-mode Vector run with preemption-on */
>
> #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
> #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
> @@ -101,9 +102,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
> #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
> #define _TIF_UPROBE (1 << TIF_UPROBE)
> #define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
> +#define _TIF_RISCV_V_KERNEL_MODE (1 << TIF_RISCV_V_KERNEL_MODE)
>
> #define _TIF_WORK_MASK \
> (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
> _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
>
> +#define RISCV_V_VSTATE_CTRL_PREEMPTIBLE 0x20
> +
> #endif /* _ASM_RISCV_THREAD_INFO_H */
> diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
> index d356eac8c0b4..27bb49e97af8 100644
> --- a/arch/riscv/include/asm/vector.h
> +++ b/arch/riscv/include/asm/vector.h
> @@ -198,9 +198,22 @@ static inline void __switch_to_vector(struct task_struct *prev,
> {
> struct pt_regs *regs;
>
> - regs = task_pt_regs(prev);
> - riscv_v_vstate_save(&prev->thread.vstate, regs);
> - riscv_v_vstate_set_restore(next, task_pt_regs(next));
> + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE) &&
> + test_tsk_thread_flag(prev, TIF_RISCV_V_KERNEL_MODE)) {
> + regs = prev->thread.trap_pt_regs;
> + riscv_v_vstate_save(&prev->thread.kernel_vstate, regs);
> + } else {
> + regs = task_pt_regs(prev);
> + riscv_v_vstate_save(&prev->thread.vstate, regs);
> + }
> +
> + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE) &&
> + test_tsk_thread_flag(next, TIF_RISCV_V_KERNEL_MODE)) {
> + regs = next->thread.trap_pt_regs;
> + riscv_v_vstate_restore(&next->thread.kernel_vstate, regs);
> + } else {
> + riscv_v_vstate_set_restore(next, task_pt_regs(next));
> + }
> }
>
> void riscv_v_vstate_ctrl_init(struct task_struct *tsk);
> @@ -225,4 +238,10 @@ static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
>
> #endif /* CONFIG_RISCV_ISA_V */
>
> +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
> +void kernel_vector_allow_preemption(void);
> +#else
> +#define kernel_vector_allow_preemption() do {} while (0)
> +#endif
> +
> #endif /* ! __ASM_RISCV_VECTOR_H */
> diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
> index d6a75aac1d27..4b062f7741b2 100644
> --- a/arch/riscv/kernel/asm-offsets.c
> +++ b/arch/riscv/kernel/asm-offsets.c
> @@ -38,6 +38,8 @@ void asm_offsets(void)
> OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
> OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
> OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
> + OFFSET(TASK_THREAD_TRAP_REGP, task_struct, thread.trap_pt_regs);
> + OFFSET(TASK_THREAD_VSTATE_CTRL, task_struct, thread.vstate_ctrl);
>
> OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
> OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
> diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
> index 143a2bb3e697..ec8baada608f 100644
> --- a/arch/riscv/kernel/entry.S
> +++ b/arch/riscv/kernel/entry.S
> @@ -66,6 +66,33 @@ _save_context:
> REG_S s4, PT_CAUSE(sp)
> REG_S s5, PT_TP(sp)
>
> +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
> + /*
> + * Record the register set at the frame where in-kernel V registers are
> + * last alive.
> + */
> + REG_L s0, TASK_TI_FLAGS(tp)
> + li s1, 1 << TIF_RISCV_V_KERNEL_MODE
> + and s0, s0, s1
> + beqz s0, 1f
> + li s0, TASK_THREAD_TRAP_REGP
> + add s0, s0, tp
> + REG_L s1, (s0)
> + bnez s1, 1f
> + REG_S sp, (s0)
> + li s0, TASK_THREAD_VSTATE_CTRL
> + add s0, s0, tp
> + REG_L s1, (s0)
> + /*
> + * Nesting preemptible Vector context is prevented by unsetting
> + * RISCV_V_VSTATE_CTRL_PREEMPTIBLE here.
> + */
> + li s2, ~RISCV_V_VSTATE_CTRL_PREEMPTIBLE
> + and s1, s1, s2
> + REG_S s1, (s0)
> +1:
> +#endif
> +
> /*
> * Set the scratch register to 0, so that if a recursive exception
> * occurs, the exception vector knows it came from the kernel
> @@ -129,6 +156,28 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
> */
> csrw CSR_SCRATCH, tp
> 1:
> +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
> + /*
> + * Clear tracking of the trap registers when we return to the frame
> + * that uses kernel mode Vector.
> + */
> + REG_L s0, TASK_TI_FLAGS(tp)
> + li s1, 1 << TIF_RISCV_V_KERNEL_MODE
> + and s0, s0, s1
> + beqz s0, 1f
> + li s0, TASK_THREAD_TRAP_REGP
> + add s0, s0, tp
> + REG_L s1, (s0)
> + bne s1, sp, 1f
> + REG_S x0, (s0)
> + li s0, TASK_THREAD_VSTATE_CTRL
> + add s0, s0, tp
> + REG_L s1, (s0)
> + ori s1, s1, RISCV_V_VSTATE_CTRL_PREEMPTIBLE
> + REG_S s1, (s0)
> +1:
> +#endif
> +
> REG_L a0, PT_STATUS(sp)
> /*
> * The current load reservation is effectively part of the processor's
> diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c
> index 2344817f8640..6203990476b3 100644
> --- a/arch/riscv/kernel/kernel_mode_vector.c
> +++ b/arch/riscv/kernel/kernel_mode_vector.c
> @@ -10,6 +10,7 @@
> #include <linux/percpu.h>
> #include <linux/preempt.h>
> #include <linux/types.h>
> +#include <linux/slab.h>
>
> #include <asm/vector.h>
> #include <asm/switch_to.h>
> @@ -48,6 +49,50 @@ void put_cpu_vector_context(void)
> preempt_enable();
> }
>
> +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
> +void kernel_vector_allow_preemption(void)
> +{
> + current->thread.vstate_ctrl |= RISCV_V_VSTATE_CTRL_PREEMPTIBLE;
> +}
> +
> +static bool kernel_vector_preemptible(void)
> +{
> + return !!(current->thread.vstate_ctrl & RISCV_V_VSTATE_CTRL_PREEMPTIBLE);
> +}
> +
> +static int riscv_v_start_kernel_context(void)
> +{
> + struct __riscv_v_ext_state *vstate;
> +
> + if (!kernel_vector_preemptible())
> + return -EBUSY;
> +
> + vstate = ¤t->thread.kernel_vstate;
> + if (!vstate->datap) {
> + vstate->datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
> + if (!vstate->datap)
> + return -ENOMEM;
> + }
> +
> + get_cpu_vector_context();
> + riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current));
> + put_cpu_vector_context();
> +
> + current->thread.trap_pt_regs = NULL;
> + WARN_ON(test_and_set_thread_flag(TIF_RISCV_V_KERNEL_MODE));
> + return 0;
> +}
> +
> +static void riscv_v_stop_kernel_context(void)
> +{
> + WARN_ON(!test_and_clear_thread_flag(TIF_RISCV_V_KERNEL_MODE));
> + current->thread.trap_pt_regs = NULL;
> +}
> +#else
> +#define riscv_v_start_kernel_context() (0)
Must return a non-zero value here, or it breaks !PREEMPT kernels. I
will send out a v4 with this fixed like:
"#define riscv_v_start_kernel_context() (-ENOENT)"
> +#define riscv_v_stop_kernel_context() do {} while (0)
> +#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */
> +
> /*
> * kernel_vector_begin(): obtain the CPU vector registers for use by the calling
> * context
> @@ -68,9 +113,10 @@ void kernel_vector_begin(void)
>
> BUG_ON(!may_use_simd());
>
> - get_cpu_vector_context();
> -
> - riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current));
> + if (!preemptible() || riscv_v_start_kernel_context()) {
> + get_cpu_vector_context();
> + riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current));
> + }
>
> riscv_v_enable();
> }
> @@ -94,6 +140,9 @@ void kernel_vector_end(void)
>
> riscv_v_disable();
>
> - put_cpu_vector_context();
> + if (!test_thread_flag(TIF_RISCV_V_KERNEL_MODE))
> + put_cpu_vector_context();
> + else
> + riscv_v_stop_kernel_context();
> }
> EXPORT_SYMBOL_GPL(kernel_vector_end);
> diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
> index ec89e7edb6fd..18cb37c305ab 100644
> --- a/arch/riscv/kernel/process.c
> +++ b/arch/riscv/kernel/process.c
> @@ -160,8 +160,11 @@ void flush_thread(void)
> void arch_release_task_struct(struct task_struct *tsk)
> {
> /* Free the vector context of datap. */
> - if (has_vector())
> + if (has_vector()) {
> kfree(tsk->thread.vstate.datap);
> + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE))
> + kfree(tsk->thread.kernel_vstate.datap);
> + }
> }
>
> int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
> @@ -170,7 +173,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
> *dst = *src;
> /* clear entire V context, including datap for a new task */
> memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
> + memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
> clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
> + clear_tsk_thread_flag(dst, TIF_RISCV_V_KERNEL_MODE);
>
> return 0;
> }
> @@ -205,6 +210,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
> childregs->a0 = 0; /* Return value of fork() */
> p->thread.s[0] = 0;
> }
> + kernel_vector_allow_preemption();
> p->thread.ra = (unsigned long)ret_from_fork;
> p->thread.sp = (unsigned long)childregs; /* kernel sp */
> return 0;
> diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
> index 9d583b760db4..42f227077ee5 100644
> --- a/arch/riscv/kernel/vector.c
> +++ b/arch/riscv/kernel/vector.c
> @@ -122,7 +122,8 @@ static inline void riscv_v_ctrl_set(struct task_struct *tsk, int cur, int nxt,
> ctrl |= VSTATE_CTRL_MAKE_NEXT(nxt);
> if (inherit)
> ctrl |= PR_RISCV_V_VSTATE_CTRL_INHERIT;
> - tsk->thread.vstate_ctrl = ctrl;
> + tsk->thread.vstate_ctrl &= ~PR_RISCV_V_VSTATE_CTRL_MASK;
> + tsk->thread.vstate_ctrl |= ctrl;
> }
>
> bool riscv_v_vstate_ctrl_user_allowed(void)
> --
> 2.17.1
>
Sorry for disrupting the list, and thank Jerry for the finding.
Andy
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
^ permalink raw reply [flat|nested] 10+ messages in thread