linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 1/2] powerpc/code-patching: Test patch_instructions() during boot
@ 2024-03-25  5:28 Benjamin Gray
  2024-03-25  5:28 ` [PATCH v2 2/2] powerpc/code-patching: Use dedicated memory routines for patching Benjamin Gray
  2024-05-08 13:39 ` [PATCH v2 1/2] powerpc/code-patching: Test patch_instructions() during boot Michael Ellerman
  0 siblings, 2 replies; 3+ messages in thread
From: Benjamin Gray @ 2024-03-25  5:28 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Gray

patch_instructions() introduces new behaviour with a couple of
variations. Test each case of

  * a repeated 32-bit instruction,
  * a repeated 64-bit instruction (ppc64), and
  * a copied sequence of instructions

for both on a single page and when it crosses a page boundary.

Signed-off-by: Benjamin Gray <bgray@linux.ibm.com>

---

v1: https://lore.kernel.org/all/20240315025736.404867-1-bgray@linux.ibm.com/

v2: * Shrink the code array to reduce frame size. It still
      crosses a page, and 32 vs 256 words is unlikely to
      make a difference in test coverage otherwise.
---
 arch/powerpc/lib/test-code-patching.c | 92 +++++++++++++++++++++++++++
 1 file changed, 92 insertions(+)

diff --git a/arch/powerpc/lib/test-code-patching.c b/arch/powerpc/lib/test-code-patching.c
index c44823292f73..f76030087f98 100644
--- a/arch/powerpc/lib/test-code-patching.c
+++ b/arch/powerpc/lib/test-code-patching.c
@@ -347,6 +347,97 @@ static void __init test_prefixed_patching(void)
 	check(!memcmp(iptr, expected, sizeof(expected)));
 }
 
+static void __init test_multi_instruction_patching(void)
+{
+	u32 code[32];
+	void *buf;
+	u32 *addr32;
+	u64 *addr64;
+	ppc_inst_t inst64 = ppc_inst_prefix(OP_PREFIX << 26 | 3UL << 24, PPC_RAW_TRAP());
+	u32 inst32 = PPC_RAW_NOP();
+
+	buf = vzalloc(PAGE_SIZE * 8);
+	check(buf);
+	if (!buf)
+		return;
+
+	/* Test single page 32-bit repeated instruction */
+	addr32 = buf + PAGE_SIZE;
+	check(!patch_instructions(addr32 + 1, &inst32, 12, true));
+
+	check(addr32[0] == 0);
+	check(addr32[1] == inst32);
+	check(addr32[2] == inst32);
+	check(addr32[3] == inst32);
+	check(addr32[4] == 0);
+
+	/* Test single page 64-bit repeated instruction */
+	if (IS_ENABLED(CONFIG_PPC64)) {
+		check(ppc_inst_prefixed(inst64));
+
+		addr64 = buf + PAGE_SIZE * 2;
+		ppc_inst_write(code, inst64);
+		check(!patch_instructions((u32 *)(addr64 + 1), code, 24, true));
+
+		check(addr64[0] == 0);
+		check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[1]), inst64));
+		check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[2]), inst64));
+		check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[3]), inst64));
+		check(addr64[4] == 0);
+	}
+
+	/* Test single page memcpy */
+	addr32 = buf + PAGE_SIZE * 3;
+
+	for (int i = 0; i < ARRAY_SIZE(code); i++)
+		code[i] = i + 1;
+
+	check(!patch_instructions(addr32 + 1, code, sizeof(code), false));
+
+	check(addr32[0] == 0);
+	check(!memcmp(&addr32[1], code, sizeof(code)));
+	check(addr32[ARRAY_SIZE(code) + 1] == 0);
+
+	/* Test multipage 32-bit repeated instruction */
+	addr32 = buf + PAGE_SIZE * 4 - 8;
+	check(!patch_instructions(addr32 + 1, &inst32, 12, true));
+
+	check(addr32[0] == 0);
+	check(addr32[1] == inst32);
+	check(addr32[2] == inst32);
+	check(addr32[3] == inst32);
+	check(addr32[4] == 0);
+
+	/* Test multipage 64-bit repeated instruction */
+	if (IS_ENABLED(CONFIG_PPC64)) {
+		check(ppc_inst_prefixed(inst64));
+
+		addr64 = buf + PAGE_SIZE * 5 - 8;
+		ppc_inst_write(code, inst64);
+		check(!patch_instructions((u32 *)(addr64 + 1), code, 24, true));
+
+		check(addr64[0] == 0);
+		check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[1]), inst64));
+		check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[2]), inst64));
+		check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[3]), inst64));
+		check(addr64[4] == 0);
+	}
+
+	/* Test multipage memcpy */
+	addr32 = buf + PAGE_SIZE * 6 - 12;
+
+	for (int i = 0; i < ARRAY_SIZE(code); i++)
+		code[i] = i + 1;
+
+	check(!patch_instructions(addr32 + 1, code, sizeof(code), false));
+
+	check(addr32[0] == 0);
+	check(!memcmp(&addr32[1], code, sizeof(code)));
+	check(addr32[ARRAY_SIZE(code) + 1] == 0);
+
+	vfree(buf);
+}
+
 static int __init test_code_patching(void)
 {
 	pr_info("Running code patching self-tests ...\n");
@@ -356,6 +447,7 @@ static int __init test_code_patching(void)
 	test_create_function_call();
 	test_translate_branch();
 	test_prefixed_patching();
+	test_multi_instruction_patching();
 
 	return 0;
 }
-- 
2.44.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH v2 2/2] powerpc/code-patching: Use dedicated memory routines for patching
  2024-03-25  5:28 [PATCH v2 1/2] powerpc/code-patching: Test patch_instructions() during boot Benjamin Gray
@ 2024-03-25  5:28 ` Benjamin Gray
  2024-05-08 13:39 ` [PATCH v2 1/2] powerpc/code-patching: Test patch_instructions() during boot Michael Ellerman
  1 sibling, 0 replies; 3+ messages in thread
From: Benjamin Gray @ 2024-03-25  5:28 UTC (permalink / raw)
  To: linuxppc-dev; +Cc: Benjamin Gray

The patching page set up as a writable alias may be in quadrant 0
(userspace) if the temporary mm path is used. This causes sanitiser
failures if so. Sanitiser failures also occur on the non-mm path
because the plain memset family is instrumented, and KASAN treats the
patching window as poisoned.

Introduce locally defined patch_* variants of memset that perform an
uninstrumented lower level set, as well as detecting write errors like
the original single patch variant does.

copy_to_user() is not correct here, as the PTE makes it a proper kernel
page (the EAA is privileged access only, RW). It just happens to be in
quadrant 0 because that's the hardware's mechanism for using the current
PID vs PID 0 in translations. Importantly, it's incorrect to allow user
page accesses.

Now that the patching memsets are used, we also propagate a failure up
to the caller as the single patch variant does.

Signed-off-by: Benjamin Gray <bgray@linux.ibm.com>

---

v2: * Fix typo in EAA (from EEA)
    * Fix references to quadrant number (0, not 1)
    * Use copy_to_kernel_nofault() over custom memcpy
    * Drop custom memcpy optimisation patch
---
 arch/powerpc/lib/code-patching.c | 31 +++++++++++++++++++++++++++----
 1 file changed, 27 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index c6ab46156cda..df64343b9214 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -372,9 +372,32 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
 }
 NOKPROBE_SYMBOL(patch_instruction);
 
+static int patch_memset64(u64 *addr, u64 val, size_t count)
+{
+	for (u64 *end = addr + count; addr < end; addr++)
+		__put_kernel_nofault(addr, &val, u64, failed);
+
+	return 0;
+
+failed:
+	return -EPERM;
+}
+
+static int patch_memset32(u32 *addr, u32 val, size_t count)
+{
+	for (u32 *end = addr + count; addr < end; addr++)
+		__put_kernel_nofault(addr, &val, u32, failed);
+
+	return 0;
+
+failed:
+	return -EPERM;
+}
+
 static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr)
 {
 	unsigned long start = (unsigned long)patch_addr;
+	int err;
 
 	/* Repeat instruction */
 	if (repeat_instr) {
@@ -383,19 +406,19 @@ static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool rep
 		if (ppc_inst_prefixed(instr)) {
 			u64 val = ppc_inst_as_ulong(instr);
 
-			memset64((u64 *)patch_addr, val, len / 8);
+			err = patch_memset64((u64 *)patch_addr, val, len / 8);
 		} else {
 			u32 val = ppc_inst_val(instr);
 
-			memset32(patch_addr, val, len / 4);
+			err = patch_memset32(patch_addr, val, len / 4);
 		}
 	} else {
-		memcpy(patch_addr, code, len);
+		err = copy_to_kernel_nofault(patch_addr, code, len);
 	}
 
 	smp_wmb();	/* smp write barrier */
 	flush_icache_range(start, start + len);
-	return 0;
+	return err;
 }
 
 /*
-- 
2.44.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v2 1/2] powerpc/code-patching: Test patch_instructions() during boot
  2024-03-25  5:28 [PATCH v2 1/2] powerpc/code-patching: Test patch_instructions() during boot Benjamin Gray
  2024-03-25  5:28 ` [PATCH v2 2/2] powerpc/code-patching: Use dedicated memory routines for patching Benjamin Gray
@ 2024-05-08 13:39 ` Michael Ellerman
  1 sibling, 0 replies; 3+ messages in thread
From: Michael Ellerman @ 2024-05-08 13:39 UTC (permalink / raw)
  To: linuxppc-dev, Benjamin Gray

On Mon, 25 Mar 2024 16:28:14 +1100, Benjamin Gray wrote:
> patch_instructions() introduces new behaviour with a couple of
> variations. Test each case of
> 
>   * a repeated 32-bit instruction,
>   * a repeated 64-bit instruction (ppc64), and
>   * a copied sequence of instructions
> 
> [...]

Applied to powerpc/next.

[1/2] powerpc/code-patching: Test patch_instructions() during boot
      https://git.kernel.org/powerpc/c/c5ef5e35844ad30503c49802b9d6a6c818fca886
[2/2] powerpc/code-patching: Use dedicated memory routines for patching
      https://git.kernel.org/powerpc/c/c3710ee7cd695dc1b0b4b8cfbf464e313467f970

cheers

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2024-05-08 13:42 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-03-25  5:28 [PATCH v2 1/2] powerpc/code-patching: Test patch_instructions() during boot Benjamin Gray
2024-03-25  5:28 ` [PATCH v2 2/2] powerpc/code-patching: Use dedicated memory routines for patching Benjamin Gray
2024-05-08 13:39 ` [PATCH v2 1/2] powerpc/code-patching: Test patch_instructions() during boot Michael Ellerman

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).