* [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c
@ 2008-06-24 1:32 Michael Ellerman
2008-06-24 1:32 ` [PATCH 02/14] powerpc: Allow create_branch() to return errors Michael Ellerman
` (13 more replies)
0 siblings, 14 replies; 29+ messages in thread
From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw)
To: linuxppc-dev
We currently have a few routines for patching code in asm/system.h, because
they didn't fit anywhere else. I'd like to clean them up a little and add
some more, so first move them into a dedicated C file - they don't need to
be inlined.
While we're moving the code, drop create_function_call(), it's intended
caller never got merged and will be replaced in future with something
different.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
---
This series is also in my git tree:
http://git.kernel.org/?p=linux/kernel/git/mpe/linux-2.6.git
arch/powerpc/kernel/crash_dump.c | 1 +
arch/powerpc/lib/Makefile | 2 +
arch/powerpc/lib/code-patching.c | 33 ++++++++++++++++++++
arch/powerpc/platforms/86xx/mpc86xx_smp.c | 1 +
arch/powerpc/platforms/powermac/smp.c | 1 +
include/asm-powerpc/code-patching.h | 25 +++++++++++++++
include/asm-powerpc/system.h | 48 -----------------------------
7 files changed, 63 insertions(+), 48 deletions(-)
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 9ee3c52..35b9a66 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -14,6 +14,7 @@
#include <linux/crash_dump.h>
#include <linux/bootmem.h>
#include <linux/lmb.h>
+#include <asm/code-patching.h>
#include <asm/kdump.h>
#include <asm/prom.h>
#include <asm/firmware.h>
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index c71d37d..305c7df 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -24,3 +24,5 @@ obj-$(CONFIG_SMP) += locks.o
endif
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
+
+obj-y += code-patching.o
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
new file mode 100644
index 0000000..7afae88
--- /dev/null
+++ b/arch/powerpc/lib/code-patching.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2008 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <asm/code-patching.h>
+
+
+void create_instruction(unsigned long addr, unsigned int instr)
+{
+ unsigned int *p;
+ p = (unsigned int *)addr;
+ *p = instr;
+ asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p));
+}
+
+void create_branch(unsigned long addr, unsigned long target, int flags)
+{
+ unsigned int instruction;
+
+ if (! (flags & BRANCH_ABSOLUTE))
+ target = target - addr;
+
+ /* Mask out the flags and target, so they don't step on each other. */
+ instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC);
+
+ create_instruction(addr, instruction);
+}
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index ba55b0f..63f5585 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/delay.h>
+#include <asm/code-patching.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index cb2d894..bf202f7 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -36,6 +36,7 @@
#include <asm/ptrace.h>
#include <asm/atomic.h>
+#include <asm/code-patching.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h
new file mode 100644
index 0000000..0b91fdf
--- /dev/null
+++ b/include/asm-powerpc/code-patching.h
@@ -0,0 +1,25 @@
+#ifndef _ASM_POWERPC_CODE_PATCHING_H
+#define _ASM_POWERPC_CODE_PATCHING_H
+
+/*
+ * Copyright 2008, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/* Flags for create_branch:
+ * "b" == create_branch(addr, target, 0);
+ * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
+ * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
+ * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
+ */
+#define BRANCH_SET_LINK 0x1
+#define BRANCH_ABSOLUTE 0x2
+
+extern void create_branch(unsigned long addr, unsigned long target, int flags);
+extern void create_instruction(unsigned long addr, unsigned int instr);
+
+#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index df781ad..d141e48 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -519,54 +519,6 @@ extern void reloc_got2(unsigned long);
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
-static inline void create_instruction(unsigned long addr, unsigned int instr)
-{
- unsigned int *p;
- p = (unsigned int *)addr;
- *p = instr;
- asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p));
-}
-
-/* Flags for create_branch:
- * "b" == create_branch(addr, target, 0);
- * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
- * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
- * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
- */
-#define BRANCH_SET_LINK 0x1
-#define BRANCH_ABSOLUTE 0x2
-
-static inline void create_branch(unsigned long addr,
- unsigned long target, int flags)
-{
- unsigned int instruction;
-
- if (! (flags & BRANCH_ABSOLUTE))
- target = target - addr;
-
- /* Mask out the flags and target, so they don't step on each other. */
- instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC);
-
- create_instruction(addr, instruction);
-}
-
-static inline void create_function_call(unsigned long addr, void * func)
-{
- unsigned long func_addr;
-
-#ifdef CONFIG_PPC64
- /*
- * On PPC64 the function pointer actually points to the function's
- * descriptor. The first entry in the descriptor is the address
- * of the function text.
- */
- func_addr = *(unsigned long *)func;
-#else
- func_addr = (unsigned long)func;
-#endif
- create_branch(addr, func_addr, BRANCH_SET_LINK);
-}
-
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_system_vtime(struct task_struct *);
#endif
--
1.5.5
^ permalink raw reply related [flat|nested] 29+ messages in thread* [PATCH 02/14] powerpc: Allow create_branch() to return errors 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman @ 2008-06-24 1:32 ` Michael Ellerman 2008-06-24 13:20 ` Kumar Gala 2008-06-24 13:26 ` Jon Loeliger 2008-06-24 1:32 ` [PATCH 03/14] powerpc: Make create_branch() return errors if the branch target is too large Michael Ellerman ` (12 subsequent siblings) 13 siblings, 2 replies; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw) To: linuxppc-dev Currently create_branch() creates a branch instruction for you, and patches it into the call site. In some circumstances it would be nice to be able to create the instruction and patch it later, and also some code might want to check for errors in the branch creation before doing the patching. A future patch will change create_branch() to check for errors. For callers that don't care, replace create_branch() with patch_branch(), which just creates the branch and patches it directly. While we're touching all the callers, change to using unsigned int *, as this seems to match usage better. That allows (and requires) us to remove the volatile in the definition of vector in powermac/smp.c and mpc86xx_smp.c, that's correct because now that we're passing vector as an unsigned int * the compiler knows that it's value might change across the patch_branch() call. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- arch/powerpc/kernel/crash_dump.c | 6 ++++-- arch/powerpc/lib/code-patching.c | 20 ++++++++++++-------- arch/powerpc/platforms/86xx/mpc86xx_smp.c | 5 ++--- arch/powerpc/platforms/powermac/smp.c | 5 ++--- include/asm-powerpc/code-patching.h | 6 ++++-- 5 files changed, 24 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 35b9a66..2664854 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -34,6 +34,8 @@ void __init reserve_kdump_trampoline(void) static void __init create_trampoline(unsigned long addr) { + unsigned int *p = (unsigned int *)addr; + /* The maximum range of a single instruction branch, is the current * instruction's address + (32 MB - 4) bytes. For the trampoline we * need to branch to current address + 32 MB. So we insert a nop at @@ -42,8 +44,8 @@ static void __init create_trampoline(unsigned long addr) * branch to "addr" we jump to ("addr" + 32 MB). Although it requires * two instructions it doesn't require any registers. */ - create_instruction(addr, 0x60000000); /* nop */ - create_branch(addr + 4, addr + PHYSICAL_START, 0); + patch_instruction(p, 0x60000000); /* nop */ + patch_branch(++p, addr + PHYSICAL_START, 0); } void __init setup_kdump_trampoline(void) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 7afae88..638dde3 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -11,23 +11,27 @@ #include <asm/code-patching.h> -void create_instruction(unsigned long addr, unsigned int instr) +void patch_instruction(unsigned int *addr, unsigned int instr) { - unsigned int *p; - p = (unsigned int *)addr; - *p = instr; - asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p)); + *addr = instr; + asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr)); } -void create_branch(unsigned long addr, unsigned long target, int flags) +void patch_branch(unsigned int *addr, unsigned long target, int flags) +{ + patch_instruction(addr, create_branch(addr, target, flags)); +} + +unsigned int create_branch(const unsigned int *addr, + unsigned long target, int flags) { unsigned int instruction; if (! (flags & BRANCH_ABSOLUTE)) - target = target - addr; + target = target - (unsigned long)addr; /* Mask out the flags and target, so they don't step on each other. */ instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC); - create_instruction(addr, instruction); + return instruction; } diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index 63f5585..835f2dc 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -57,8 +57,7 @@ smp_86xx_kick_cpu(int nr) unsigned int save_vector; unsigned long target, flags; int n = 0; - volatile unsigned int *vector - = (volatile unsigned int *)(KERNELBASE + 0x100); + unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100); if (nr < 0 || nr >= NR_CPUS) return; @@ -72,7 +71,7 @@ smp_86xx_kick_cpu(int nr) /* Setup fake reset vector to call __secondary_start_mpc86xx. */ target = (unsigned long) __secondary_start_mpc86xx; - create_branch((unsigned long)vector, target, BRANCH_SET_LINK); + patch_branch(vector, target, BRANCH_SET_LINK); /* Kick that CPU */ smp_86xx_release_core(nr); diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index bf202f7..4ae3d00 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -787,8 +787,7 @@ static void __devinit smp_core99_kick_cpu(int nr) { unsigned int save_vector; unsigned long target, flags; - volatile unsigned int *vector - = ((volatile unsigned int *)(KERNELBASE+0x100)); + unsigned int *vector = (unsigned int *)(KERNELBASE+0x100); if (nr < 0 || nr > 3) return; @@ -805,7 +804,7 @@ static void __devinit smp_core99_kick_cpu(int nr) * b __secondary_start_pmac_0 + nr*8 - KERNELBASE */ target = (unsigned long) __secondary_start_pmac_0 + nr * 8; - create_branch((unsigned long)vector, target, BRANCH_SET_LINK); + patch_branch(vector, target, BRANCH_SET_LINK); /* Put some life in our friend */ pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h index 0b91fdf..fdb187c 100644 --- a/include/asm-powerpc/code-patching.h +++ b/include/asm-powerpc/code-patching.h @@ -19,7 +19,9 @@ #define BRANCH_SET_LINK 0x1 #define BRANCH_ABSOLUTE 0x2 -extern void create_branch(unsigned long addr, unsigned long target, int flags); -extern void create_instruction(unsigned long addr, unsigned int instr); +unsigned int create_branch(const unsigned int *addr, + unsigned long target, int flags); +void patch_branch(unsigned int *addr, unsigned long target, int flags); +void patch_instruction(unsigned int *addr, unsigned int instr); #endif /* _ASM_POWERPC_CODE_PATCHING_H */ -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 02/14] powerpc: Allow create_branch() to return errors 2008-06-24 1:32 ` [PATCH 02/14] powerpc: Allow create_branch() to return errors Michael Ellerman @ 2008-06-24 13:20 ` Kumar Gala 2008-06-24 13:26 ` Jon Loeliger 1 sibling, 0 replies; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:20 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > Currently create_branch() creates a branch instruction for you, and > patches > it into the call site. In some circumstances it would be nice to be > able to > create the instruction and patch it later, and also some code might > want > to check for errors in the branch creation before doing the > patching. A > future patch will change create_branch() to check for errors. > > For callers that don't care, replace create_branch() with > patch_branch(), > which just creates the branch and patches it directly. > > While we're touching all the callers, change to using unsigned int > *, as > this seems to match usage better. That allows (and requires) us to > remove > the volatile in the definition of vector in powermac/smp.c and > mpc86xx_smp.c, > that's correct because now that we're passing vector as an unsigned > int * > the compiler knows that it's value might change across the > patch_branch() > call. > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Kumar Gala <galak@kernel.crashing.org> - k ^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH 02/14] powerpc: Allow create_branch() to return errors 2008-06-24 1:32 ` [PATCH 02/14] powerpc: Allow create_branch() to return errors Michael Ellerman 2008-06-24 13:20 ` Kumar Gala @ 2008-06-24 13:26 ` Jon Loeliger 1 sibling, 0 replies; 29+ messages in thread From: Jon Loeliger @ 2008-06-24 13:26 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev Michael Ellerman wrote: > Currently create_branch() creates a branch instruction for you, and patches > it into the call site. In some circumstances it would be nice to be able to > create the instruction and patch it later, and also some code might want > to check for errors in the branch creation before doing the patching. A > future patch will change create_branch() to check for errors. > > For callers that don't care, replace create_branch() with patch_branch(), > which just creates the branch and patches it directly. > > While we're touching all the callers, change to using unsigned int *, as > this seems to match usage better. That allows (and requires) us to remove > the volatile in the definition of vector in powermac/smp.c and mpc86xx_smp.c, > that's correct because now that we're passing vector as an unsigned int * > the compiler knows that it's value might change across the patch_branch() > call. > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> > --- 86xx bits... Acked-by: Jon Loeliger <jdl@freescale.com> ^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 03/14] powerpc: Make create_branch() return errors if the branch target is too large 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman 2008-06-24 1:32 ` [PATCH 02/14] powerpc: Allow create_branch() to return errors Michael Ellerman @ 2008-06-24 1:32 ` Michael Ellerman 2008-06-24 13:21 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 04/14] powerpc: Add ppc_function_entry() which gets the entry point for a function Michael Ellerman ` (11 subsequent siblings) 13 siblings, 1 reply; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw) To: linuxppc-dev If you pass a target value to create_branch() which is more than 32MB - 4, or - 32MB away from the branch site, then it's impossible to create an immediate branch. The current code doesn't check, which will lead to us creating a branch to somewhere else - which is bad. For code that cares to check we return 0, which is easy to check for, and for code that doesn't at least we'll be creating an illegal instruction, rather than a branch to some random address. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- arch/powerpc/lib/code-patching.c | 10 ++++++++-- 1 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 638dde3..430f4c1 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -26,12 +26,18 @@ unsigned int create_branch(const unsigned int *addr, unsigned long target, int flags) { unsigned int instruction; + long offset; + offset = target; if (! (flags & BRANCH_ABSOLUTE)) - target = target - (unsigned long)addr; + offset = offset - (unsigned long)addr; + + /* Check we can represent the target in the instruction format */ + if (offset < -0x2000000 || offset > 0x1fffffc || offset & 0x3) + return 0; /* Mask out the flags and target, so they don't step on each other. */ - instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC); + instruction = 0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC); return instruction; } -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 03/14] powerpc: Make create_branch() return errors if the branch target is too large 2008-06-24 1:32 ` [PATCH 03/14] powerpc: Make create_branch() return errors if the branch target is too large Michael Ellerman @ 2008-06-24 13:21 ` Kumar Gala 0 siblings, 0 replies; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:21 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > If you pass a target value to create_branch() which is more than > 32MB - 4, > or - 32MB away from the branch site, then it's impossible to create an > immediate branch. The current code doesn't check, which will lead to > us > creating a branch to somewhere else - which is bad. > > For code that cares to check we return 0, which is easy to check > for, and > for code that doesn't at least we'll be creating an illegal > instruction, > rather than a branch to some random address. > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Kumar Gala <galak@kernel.crashing.org> - k ^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 04/14] powerpc: Add ppc_function_entry() which gets the entry point for a function 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman 2008-06-24 1:32 ` [PATCH 02/14] powerpc: Allow create_branch() to return errors Michael Ellerman 2008-06-24 1:32 ` [PATCH 03/14] powerpc: Make create_branch() return errors if the branch target is too large Michael Ellerman @ 2008-06-24 1:32 ` Michael Ellerman 2008-06-24 13:39 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 05/14] powerpc: Add new code patching routines Michael Ellerman ` (10 subsequent siblings) 13 siblings, 1 reply; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw) To: linuxppc-dev Because function pointers point to different things on 32-bit vs 64-bit, add a macro that deals with dereferencing the OPD on 64-bit. The soon to be merged ftrace wants this, as well as other code I am working on. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- include/asm-powerpc/code-patching.h | 16 ++++++++++++++++ 1 files changed, 16 insertions(+), 0 deletions(-) diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h index fdb187c..a45a7ff 100644 --- a/include/asm-powerpc/code-patching.h +++ b/include/asm-powerpc/code-patching.h @@ -10,6 +10,8 @@ * 2 of the License, or (at your option) any later version. */ +#include <asm/types.h> + /* Flags for create_branch: * "b" == create_branch(addr, target, 0); * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); @@ -24,4 +26,18 @@ unsigned int create_branch(const unsigned int *addr, void patch_branch(unsigned int *addr, unsigned long target, int flags); void patch_instruction(unsigned int *addr, unsigned int instr); +static inline unsigned long ppc_function_entry(void *func) +{ +#ifdef CONFIG_PPC64 + /* + * On PPC64 the function pointer actually points to the function's + * descriptor. The first entry in the descriptor is the address + * of the function text. + */ + return ((func_descr_t *)func)->entry; +#else + return (unsigned long)func; +#endif +} + #endif /* _ASM_POWERPC_CODE_PATCHING_H */ -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 04/14] powerpc: Add ppc_function_entry() which gets the entry point for a function 2008-06-24 1:32 ` [PATCH 04/14] powerpc: Add ppc_function_entry() which gets the entry point for a function Michael Ellerman @ 2008-06-24 13:39 ` Kumar Gala 0 siblings, 0 replies; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:39 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > Because function pointers point to different things on 32-bit vs 64- > bit, > add a macro that deals with dereferencing the OPD on 64-bit. The > soon to > be merged ftrace wants this, as well as other code I am working on. > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Kumar Gala <galak@kernel.crashing.org> - k ^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 05/14] powerpc: Add new code patching routines 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (2 preceding siblings ...) 2008-06-24 1:32 ` [PATCH 04/14] powerpc: Add ppc_function_entry() which gets the entry point for a function Michael Ellerman @ 2008-06-24 1:32 ` Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 06/14] powerpc: Add tests of the " Michael Ellerman ` (9 subsequent siblings) 13 siblings, 1 reply; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw) To: linuxppc-dev This commit adds some new routines for patching code, they will be used in a following commit. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- arch/powerpc/lib/code-patching.c | 107 +++++++++++++++++++++++++++++++++++ include/asm-powerpc/code-patching.h | 8 +++ 2 files changed, 115 insertions(+), 0 deletions(-) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 430f4c1..27957c4 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -41,3 +41,110 @@ unsigned int create_branch(const unsigned int *addr, return instruction; } + +unsigned int create_cond_branch(const unsigned int *addr, + unsigned long target, int flags) +{ + unsigned int instruction; + long offset; + + offset = target; + if (! (flags & BRANCH_ABSOLUTE)) + offset = offset - (unsigned long)addr; + + /* Check we can represent the target in the instruction format */ + if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3) + return 0; + + /* Mask out the flags and target, so they don't step on each other. */ + instruction = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC); + + return instruction; +} + +static unsigned int branch_opcode(unsigned int instr) +{ + return (instr >> 26) & 0x3F; +} + +static int instr_is_branch_iform(unsigned int instr) +{ + return branch_opcode(instr) == 18; +} + +static int instr_is_branch_bform(unsigned int instr) +{ + return branch_opcode(instr) == 16; +} + +int instr_is_relative_branch(unsigned int instr) +{ + if (instr & BRANCH_ABSOLUTE) + return 0; + + return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); +} + +static unsigned long branch_iform_target(const unsigned int *instr) +{ + signed long imm; + + imm = *instr & 0x3FFFFFC; + + /* If the top bit of the immediate value is set this is negative */ + if (imm & 0x2000000) + imm -= 0x4000000; + + if ((*instr & BRANCH_ABSOLUTE) == 0) + imm += (unsigned long)instr; + + return (unsigned long)imm; +} + +static unsigned long branch_bform_target(const unsigned int *instr) +{ + signed long imm; + + imm = *instr & 0xFFFC; + + /* If the top bit of the immediate value is set this is negative */ + if (imm & 0x8000) + imm -= 0x10000; + + if ((*instr & BRANCH_ABSOLUTE) == 0) + imm += (unsigned long)instr; + + return (unsigned long)imm; +} + +unsigned long branch_target(const unsigned int *instr) +{ + if (instr_is_branch_iform(*instr)) + return branch_iform_target(instr); + else if (instr_is_branch_bform(*instr)) + return branch_bform_target(instr); + + return 0; +} + +int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr) +{ + if (instr_is_branch_iform(*instr) || instr_is_branch_bform(*instr)) + return branch_target(instr) == addr; + + return 0; +} + +unsigned int translate_branch(const unsigned int *dest, const unsigned int *src) +{ + unsigned long target; + + target = branch_target(src); + + if (instr_is_branch_iform(*src)) + return create_branch(dest, target, *src); + else if (instr_is_branch_bform(*src)) + return create_cond_branch(dest, target, *src); + + return 0; +} diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h index a45a7ff..40ad46b 100644 --- a/include/asm-powerpc/code-patching.h +++ b/include/asm-powerpc/code-patching.h @@ -23,9 +23,17 @@ unsigned int create_branch(const unsigned int *addr, unsigned long target, int flags); +unsigned int create_cond_branch(const unsigned int *addr, + unsigned long target, int flags); void patch_branch(unsigned int *addr, unsigned long target, int flags); void patch_instruction(unsigned int *addr, unsigned int instr); +int instr_is_relative_branch(unsigned int instr); +int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); +unsigned long branch_target(const unsigned int *instr); +unsigned int translate_branch(const unsigned int *dest, + const unsigned int *src); + static inline unsigned long ppc_function_entry(void *func) { #ifdef CONFIG_PPC64 -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 05/14] powerpc: Add new code patching routines 2008-06-24 1:32 ` [PATCH 05/14] powerpc: Add new code patching routines Michael Ellerman @ 2008-06-24 13:48 ` Kumar Gala 2008-06-26 3:54 ` Michael Ellerman 0 siblings, 1 reply; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:48 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > This commit adds some new routines for patching code, they will be > used > in a following commit. > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> > --- > arch/powerpc/lib/code-patching.c | 107 ++++++++++++++++++++++++++ > +++++++++ > include/asm-powerpc/code-patching.h | 8 +++ > 2 files changed, 115 insertions(+), 0 deletions(-) > > diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/ > code-patching.c > index 430f4c1..27957c4 100644 > --- a/arch/powerpc/lib/code-patching.c > +++ b/arch/powerpc/lib/code-patching.c > @@ -41,3 +41,110 @@ unsigned int create_branch(const unsigned int > *addr, > > return instruction; > } > + > +unsigned int create_cond_branch(const unsigned int *addr, > + unsigned long target, int flags) > +{ it would be nice to have some idea what flags is suppose to be. > > + unsigned int instruction; > + long offset; > + > + offset = target; > + if (! (flags & BRANCH_ABSOLUTE)) > + offset = offset - (unsigned long)addr; > + > + /* Check we can represent the target in the instruction format */ > + if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3) > + return 0; > + > + /* Mask out the flags and target, so they don't step on each > other. */ > + instruction = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC); > + > + return instruction; > +} [snip] > +unsigned int translate_branch(const unsigned int *dest, const > unsigned int *src) > +{ I'm not sure I get what this function is trying to do. > > + unsigned long target; > + > + target = branch_target(src); > + > + if (instr_is_branch_iform(*src)) > + return create_branch(dest, target, *src); > + else if (instr_is_branch_bform(*src)) > + return create_cond_branch(dest, target, *src); > + > + return 0; > +} ^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH 05/14] powerpc: Add new code patching routines 2008-06-24 13:48 ` Kumar Gala @ 2008-06-26 3:54 ` Michael Ellerman 0 siblings, 0 replies; 29+ messages in thread From: Michael Ellerman @ 2008-06-26 3:54 UTC (permalink / raw) To: Kumar Gala; +Cc: linuxppc-dev [-- Attachment #1: Type: text/plain, Size: 2306 bytes --] On Tue, 2008-06-24 at 08:48 -0500, Kumar Gala wrote: > On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > > > This commit adds some new routines for patching code, they will be > > used > > in a following commit. > > > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> > > --- > > arch/powerpc/lib/code-patching.c | 107 ++++++++++++++++++++++++++ > > +++++++++ > > include/asm-powerpc/code-patching.h | 8 +++ > > 2 files changed, 115 insertions(+), 0 deletions(-) > > > > diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/ > > code-patching.c > > index 430f4c1..27957c4 100644 > > --- a/arch/powerpc/lib/code-patching.c > > +++ b/arch/powerpc/lib/code-patching.c > > @@ -41,3 +41,110 @@ unsigned int create_branch(const unsigned int > > *addr, > > > > return instruction; > > } > > + > > +unsigned int create_cond_branch(const unsigned int *addr, > > + unsigned long target, int flags) > > +{ > > it would be nice to have some idea what flags is suppose to be. Yeah this routine is a bit of kludge, it's really only written for translate_branch(). As it is, flags just takes any of the bits in a conditional branch that aren't the opcode or target. To fully synthesise a conditional branch by hand you'd probably want another routine which constructs the BO & BI fields for you. > > +unsigned int translate_branch(const unsigned int *dest, const > > unsigned int *src) > > +{ > > I'm not sure I get what this function is trying to do. It takes a relative branch at src and returns a new branch of the same type that could be placed at dest, and would jump to the same target as the original instruction. I guess I should add some doco :) > > + unsigned long target; > > + > > + target = branch_target(src); > > + > > + if (instr_is_branch_iform(*src)) > > + return create_branch(dest, target, *src); > > + else if (instr_is_branch_bform(*src)) > > + return create_cond_branch(dest, target, *src); > > + > > + return 0; > > +} cheers -- Michael Ellerman OzLabs, IBM Australia Development Lab wwweb: http://michael.ellerman.id.au phone: +61 2 6212 1183 (tie line 70 21183) We do not inherit the earth from our ancestors, we borrow it from our children. - S.M.A.R.T Person [-- Attachment #2: This is a digitally signed message part --] [-- Type: application/pgp-signature, Size: 189 bytes --] ^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 06/14] powerpc: Add tests of the code patching routines 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (3 preceding siblings ...) 2008-06-24 1:32 ` [PATCH 05/14] powerpc: Add new code patching routines Michael Ellerman @ 2008-06-24 1:32 ` Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 07/14] powerpc: Add PPC_NOP_INSTR, a hash define for the preferred nop instruction Michael Ellerman ` (8 subsequent siblings) 13 siblings, 1 reply; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw) To: linuxppc-dev Add tests of the existing code patching routines, as well as the new routines added in the last commit. The self-tests are run late in boot when CONFIG_CODE_PATCHING_SELFTEST=y, which depends on DEBUG_KERNEL=y. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- arch/powerpc/Kconfig.debug | 5 + arch/powerpc/lib/code-patching.c | 298 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 303 insertions(+), 0 deletions(-) diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index a7d24e6..dc58939 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -57,6 +57,11 @@ config KGDB debugger. See <http://kgdb.sourceforge.net/> for more information. Unless you are intending to debug the kernel, say N here. +config CODE_PATCHING_SELFTEST + bool "Run self-tests of the code-patching code." + depends on DEBUG_KERNEL + default n + choice prompt "Serial Port" depends on KGDB diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 27957c4..0559fe0 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -8,6 +8,9 @@ */ #include <linux/kernel.h> +#include <linux/vmalloc.h> +#include <linux/init.h> +#include <asm/page.h> #include <asm/code-patching.h> @@ -148,3 +151,298 @@ unsigned int translate_branch(const unsigned int *dest, const unsigned int *src) return 0; } + + +#ifdef CONFIG_CODE_PATCHING_SELFTEST + +static void __init test_trampoline(void) +{ + asm ("nop;\n"); +} + +#define check(x) \ + if (!(x)) printk("code-patching: test failed at line %d\n", __LINE__); + +static void __init test_branch_iform(void) +{ + unsigned int instr; + unsigned long addr; + + addr = (unsigned long)&instr; + + /* The simplest case, branch to self, no flags */ + check(instr_is_branch_iform(0x48000000)); + /* All bits of target set, and flags */ + check(instr_is_branch_iform(0x4bffffff)); + /* High bit of opcode set, which is wrong */ + check(!instr_is_branch_iform(0xcbffffff)); + /* Middle bits of opcode set, which is wrong */ + check(!instr_is_branch_iform(0x7bffffff)); + + /* Simplest case, branch to self with link */ + check(instr_is_branch_iform(0x48000001)); + /* All bits of targets set */ + check(instr_is_branch_iform(0x4bfffffd)); + /* Some bits of targets set */ + check(instr_is_branch_iform(0x4bff00fd)); + /* Must be a valid branch to start with */ + check(!instr_is_branch_iform(0x7bfffffd)); + + /* Absolute branch to 0x100 */ + instr = 0x48000103; + check(instr_is_branch_to_addr(&instr, 0x100)); + /* Absolute branch to 0x420fc */ + instr = 0x480420ff; + check(instr_is_branch_to_addr(&instr, 0x420fc)); + /* Maximum positive relative branch, + 20MB - 4B */ + instr = 0x49fffffc; + check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC)); + /* Smallest negative relative branch, - 4B */ + instr = 0x4bfffffc; + check(instr_is_branch_to_addr(&instr, addr - 4)); + /* Largest negative relative branch, - 32 MB */ + instr = 0x4a000000; + check(instr_is_branch_to_addr(&instr, addr - 0x2000000)); + + /* Branch to self, with link */ + instr = create_branch(&instr, addr, BRANCH_SET_LINK); + check(instr_is_branch_to_addr(&instr, addr)); + + /* Branch to self - 0x100, with link */ + instr = create_branch(&instr, addr - 0x100, BRANCH_SET_LINK); + check(instr_is_branch_to_addr(&instr, addr - 0x100)); + + /* Branch to self + 0x100, no link */ + instr = create_branch(&instr, addr + 0x100, 0); + check(instr_is_branch_to_addr(&instr, addr + 0x100)); + + /* Maximum relative negative offset, - 32 MB */ + instr = create_branch(&instr, addr - 0x2000000, BRANCH_SET_LINK); + check(instr_is_branch_to_addr(&instr, addr - 0x2000000)); + + /* Out of range relative negative offset, - 32 MB + 4*/ + instr = create_branch(&instr, addr - 0x2000004, BRANCH_SET_LINK); + check(instr == 0); + + /* Out of range relative positive offset, + 32 MB */ + instr = create_branch(&instr, addr + 0x2000000, BRANCH_SET_LINK); + check(instr == 0); + + /* Unaligned target */ + instr = create_branch(&instr, addr + 3, BRANCH_SET_LINK); + check(instr == 0); + + /* Check flags are masked correctly */ + instr = create_branch(&instr, addr, 0xFFFFFFFC); + check(instr_is_branch_to_addr(&instr, addr)); + check(instr == 0x48000000); +} + +static void __init test_create_function_call(void) +{ + unsigned int *iptr; + unsigned long dest; + + /* Check we can create a function call */ + iptr = (unsigned int *)ppc_function_entry(test_trampoline); + dest = ppc_function_entry(test_create_function_call); + patch_instruction(iptr, create_branch(iptr, dest, BRANCH_SET_LINK)); + check(instr_is_branch_to_addr(iptr, dest)); +} + +static void __init test_branch_bform(void) +{ + unsigned long addr; + unsigned int *iptr, instr, flags; + + iptr = &instr; + addr = (unsigned long)iptr; + + /* The simplest case, branch to self, no flags */ + check(instr_is_branch_bform(0x40000000)); + /* All bits of target set, and flags */ + check(instr_is_branch_bform(0x43ffffff)); + /* High bit of opcode set, which is wrong */ + check(!instr_is_branch_bform(0xc3ffffff)); + /* Middle bits of opcode set, which is wrong */ + check(!instr_is_branch_bform(0x7bffffff)); + + /* Absolute conditional branch to 0x100 */ + instr = 0x43ff0103; + check(instr_is_branch_to_addr(&instr, 0x100)); + /* Absolute conditional branch to 0x20fc */ + instr = 0x43ff20ff; + check(instr_is_branch_to_addr(&instr, 0x20fc)); + /* Maximum positive relative conditional branch, + 32 KB - 4B */ + instr = 0x43ff7ffc; + check(instr_is_branch_to_addr(&instr, addr + 0x7FFC)); + /* Smallest negative relative conditional branch, - 4B */ + instr = 0x43fffffc; + check(instr_is_branch_to_addr(&instr, addr - 4)); + /* Largest negative relative conditional branch, - 32 KB */ + instr = 0x43ff8000; + check(instr_is_branch_to_addr(&instr, addr - 0x8000)); + + /* All condition code bits set & link */ + flags = 0x3ff000 | BRANCH_SET_LINK; + + /* Branch to self */ + instr = create_cond_branch(iptr, addr, flags); + check(instr_is_branch_to_addr(&instr, addr)); + + /* Branch to self - 0x100 */ + instr = create_cond_branch(iptr, addr - 0x100, flags); + check(instr_is_branch_to_addr(&instr, addr - 0x100)); + + /* Branch to self + 0x100 */ + instr = create_cond_branch(iptr, addr + 0x100, flags); + check(instr_is_branch_to_addr(&instr, addr + 0x100)); + + /* Maximum relative negative offset, - 32 KB */ + instr = create_cond_branch(iptr, addr - 0x8000, flags); + check(instr_is_branch_to_addr(&instr, addr - 0x8000)); + + /* Out of range relative negative offset, - 32 KB + 4*/ + instr = create_cond_branch(iptr, addr - 0x8004, flags); + check(instr == 0); + + /* Out of range relative positive offset, + 32 KB */ + instr = create_cond_branch(iptr, addr + 0x8000, flags); + check(instr == 0); + + /* Unaligned target */ + instr = create_cond_branch(iptr, addr + 3, flags); + check(instr == 0); + + /* Check flags are masked correctly */ + instr = create_cond_branch(iptr, addr, 0xFFFFFFFC); + check(instr_is_branch_to_addr(&instr, addr)); + check(instr == 0x43FF0000); +} + +static void __init test_translate_branch(void) +{ + unsigned long addr; + unsigned int *p, *q; + void *buf; + + buf = vmalloc(PAGE_ALIGN(0x2000000 + 1)); + check(buf); + if (!buf) + return; + + /* Simple case, branch to self moved a little */ + p = buf; + addr = (unsigned long)p; + patch_branch(p, addr, 0); + check(instr_is_branch_to_addr(p, addr)); + q = p + 1; + patch_instruction(q, translate_branch(q, p)); + check(instr_is_branch_to_addr(q, addr)); + + /* Maximum negative case, move b . to addr + 32 MB */ + p = buf; + addr = (unsigned long)p; + patch_branch(p, addr, 0); + q = buf + 0x2000000; + patch_instruction(q, translate_branch(q, p)); + check(instr_is_branch_to_addr(p, addr)); + check(instr_is_branch_to_addr(q, addr)); + check(*q == 0x4a000000); + + /* Maximum positive case, move x to x - 32 MB + 4 */ + p = buf + 0x2000000; + addr = (unsigned long)p; + patch_branch(p, addr, 0); + q = buf + 4; + patch_instruction(q, translate_branch(q, p)); + check(instr_is_branch_to_addr(p, addr)); + check(instr_is_branch_to_addr(q, addr)); + check(*q == 0x49fffffc); + + /* Jump to x + 16 MB moved to x + 20 MB */ + p = buf; + addr = 0x1000000 + (unsigned long)buf; + patch_branch(p, addr, BRANCH_SET_LINK); + q = buf + 0x1400000; + patch_instruction(q, translate_branch(q, p)); + check(instr_is_branch_to_addr(p, addr)); + check(instr_is_branch_to_addr(q, addr)); + + /* Jump to x + 16 MB moved to x - 16 MB + 4 */ + p = buf + 0x1000000; + addr = 0x2000000 + (unsigned long)buf; + patch_branch(p, addr, 0); + q = buf + 4; + patch_instruction(q, translate_branch(q, p)); + check(instr_is_branch_to_addr(p, addr)); + check(instr_is_branch_to_addr(q, addr)); + + + /* Conditional branch tests */ + + /* Simple case, branch to self moved a little */ + p = buf; + addr = (unsigned long)p; + patch_instruction(p, create_cond_branch(p, addr, 0)); + check(instr_is_branch_to_addr(p, addr)); + q = p + 1; + patch_instruction(q, translate_branch(q, p)); + check(instr_is_branch_to_addr(q, addr)); + + /* Maximum negative case, move b . to addr + 32 KB */ + p = buf; + addr = (unsigned long)p; + patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC)); + q = buf + 0x8000; + patch_instruction(q, translate_branch(q, p)); + check(instr_is_branch_to_addr(p, addr)); + check(instr_is_branch_to_addr(q, addr)); + check(*q == 0x43ff8000); + + /* Maximum positive case, move x to x - 32 KB + 4 */ + p = buf + 0x8000; + addr = (unsigned long)p; + patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC)); + q = buf + 4; + patch_instruction(q, translate_branch(q, p)); + check(instr_is_branch_to_addr(p, addr)); + check(instr_is_branch_to_addr(q, addr)); + check(*q == 0x43ff7ffc); + + /* Jump to x + 12 KB moved to x + 20 KB */ + p = buf; + addr = 0x3000 + (unsigned long)buf; + patch_instruction(p, create_cond_branch(p, addr, BRANCH_SET_LINK)); + q = buf + 0x5000; + patch_instruction(q, translate_branch(q, p)); + check(instr_is_branch_to_addr(p, addr)); + check(instr_is_branch_to_addr(q, addr)); + + /* Jump to x + 8 KB moved to x - 8 KB + 4 */ + p = buf + 0x2000; + addr = 0x4000 + (unsigned long)buf; + patch_instruction(p, create_cond_branch(p, addr, 0)); + q = buf + 4; + patch_instruction(q, translate_branch(q, p)); + check(instr_is_branch_to_addr(p, addr)); + check(instr_is_branch_to_addr(q, addr)); + + /* Free the buffer we were using */ + vfree(buf); +} + +static int __init test_code_patching(void) +{ + printk(KERN_DEBUG "Running code patching self-tests ...\n"); + + test_branch_iform(); + test_branch_bform(); + test_create_function_call(); + test_translate_branch(); + + return 0; +} +late_initcall(test_code_patching); + +#endif /* CONFIG_CODE_PATCHING_SELFTEST */ -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 06/14] powerpc: Add tests of the code patching routines 2008-06-24 1:32 ` [PATCH 06/14] powerpc: Add tests of the " Michael Ellerman @ 2008-06-24 13:48 ` Kumar Gala 0 siblings, 0 replies; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:48 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > Add tests of the existing code patching routines, as well as the new > routines added in the last commit. The self-tests are run late in boot > when CONFIG_CODE_PATCHING_SELFTEST=y, which depends on DEBUG_KERNEL=y. > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Kumar Gala <galak@kernel.crashing.org> - k > ^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 07/14] powerpc: Add PPC_NOP_INSTR, a hash define for the preferred nop instruction 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (4 preceding siblings ...) 2008-06-24 1:32 ` [PATCH 06/14] powerpc: Add tests of the " Michael Ellerman @ 2008-06-24 1:32 ` Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 08/14] powerpc: Split out do_feature_fixups() from cputable.c Michael Ellerman ` (7 subsequent siblings) 13 siblings, 1 reply; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw) To: linuxppc-dev A bunch of code has hard-coded the value for a "nop" instruction, it would be nice to have a #define for it. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- arch/powerpc/kernel/cputable.c | 3 ++- arch/powerpc/kernel/crash_dump.c | 2 +- arch/powerpc/kernel/module_64.c | 3 ++- include/asm-powerpc/code-patching.h | 2 ++ 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index e44d553..887e190 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <asm/oprofile_impl.h> +#include <asm/code-patching.h> #include <asm/cputable.h> #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ @@ -1613,7 +1614,7 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) pend = ((unsigned int *)fcur) + (fcur->end_off / 4); for (p = pstart; p < pend; p++) { - *p = 0x60000000u; + *p = PPC_NOP_INSTR; asm volatile ("dcbst 0, %0" : : "r" (p)); } asm volatile ("sync" : : : "memory"); diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 2664854..e0debcc 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -44,7 +44,7 @@ static void __init create_trampoline(unsigned long addr) * branch to "addr" we jump to ("addr" + 32 MB). Although it requires * two instructions it doesn't require any registers. */ - patch_instruction(p, 0x60000000); /* nop */ + patch_instruction(p, PPC_NOP_INSTR); patch_branch(++p, addr + PHYSICAL_START, 0); } diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 3a82b02..d5e569a 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -24,6 +24,7 @@ #include <asm/module.h> #include <asm/uaccess.h> #include <asm/firmware.h> +#include <asm/code-patching.h> #include <linux/sort.h> #include "setup.h" @@ -346,7 +347,7 @@ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, restore r2. */ static int restore_r2(u32 *instruction, struct module *me) { - if (*instruction != 0x60000000) { + if (*instruction != PPC_NOP_INSTR) { printk("%s: Expect noop after relocate, got %08x\n", me->name, *instruction); return 0; diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h index 40ad46b..ef3a5d1 100644 --- a/include/asm-powerpc/code-patching.h +++ b/include/asm-powerpc/code-patching.h @@ -12,6 +12,8 @@ #include <asm/types.h> +#define PPC_NOP_INSTR 0x60000000 + /* Flags for create_branch: * "b" == create_branch(addr, target, 0); * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 07/14] powerpc: Add PPC_NOP_INSTR, a hash define for the preferred nop instruction 2008-06-24 1:32 ` [PATCH 07/14] powerpc: Add PPC_NOP_INSTR, a hash define for the preferred nop instruction Michael Ellerman @ 2008-06-24 13:48 ` Kumar Gala 0 siblings, 0 replies; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:48 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > A bunch of code has hard-coded the value for a "nop" instruction, it > would be nice to have a #define for it. > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Kumar Gala <galak@kernel.crashing.org> - k ^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 08/14] powerpc: Split out do_feature_fixups() from cputable.c 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (5 preceding siblings ...) 2008-06-24 1:32 ` [PATCH 07/14] powerpc: Add PPC_NOP_INSTR, a hash define for the preferred nop instruction Michael Ellerman @ 2008-06-24 1:32 ` Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 09/14] powerpc: Consolidate CPU and firmware feature fixup macros Michael Ellerman ` (6 subsequent siblings) 13 siblings, 1 reply; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw) To: linuxppc-dev The logic to patch CPU feature sections lives in cputable.c, but these days it's used for CPU features as well as firmware features. Move it into it's own file for neatness and as preparation for some additions. While we're moving the code, we pull the loop body logic into a separate routine, and remove a comment which doesn't apply anymore. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- arch/powerpc/kernel/cputable.c | 36 ----------------------- arch/powerpc/lib/Makefile | 1 + arch/powerpc/lib/feature-fixups.c | 56 +++++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 36 deletions(-) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 887e190..11943f0 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -17,7 +17,6 @@ #include <linux/module.h> #include <asm/oprofile_impl.h> -#include <asm/code-patching.h> #include <asm/cputable.h> #include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */ @@ -1588,38 +1587,3 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) BUG(); return NULL; } - -void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) -{ - struct fixup_entry { - unsigned long mask; - unsigned long value; - long start_off; - long end_off; - } *fcur, *fend; - - fcur = fixup_start; - fend = fixup_end; - - for (; fcur < fend; fcur++) { - unsigned int *pstart, *pend, *p; - - if ((value & fcur->mask) == fcur->value) - continue; - - /* These PTRRELOCs will disappear once the new scheme for - * modules and vdso is implemented - */ - pstart = ((unsigned int *)fcur) + (fcur->start_off / 4); - pend = ((unsigned int *)fcur) + (fcur->end_off / 4); - - for (p = pstart; p < pend; p++) { - *p = PPC_NOP_INSTR; - asm volatile ("dcbst 0, %0" : : "r" (p)); - } - asm volatile ("sync" : : : "memory"); - for (p = pstart; p < pend; p++) - asm volatile ("icbi 0,%0" : : "r" (p)); - asm volatile ("sync; isync" : : : "memory"); - } -} diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 305c7df..4afcf3a 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -26,3 +26,4 @@ endif obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o obj-y += code-patching.o +obj-y += feature-fixups.o diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c new file mode 100644 index 0000000..f6fd5d2 --- /dev/null +++ b/arch/powerpc/lib/feature-fixups.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + * + * Modifications for ppc64: + * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> + * + * Copyright 2008 Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <asm/cputable.h> +#include <asm/code-patching.h> + + +struct fixup_entry { + unsigned long mask; + unsigned long value; + long start_off; + long end_off; +}; + +static void patch_feature_section(unsigned long value, struct fixup_entry *fcur) +{ + unsigned int *pstart, *pend, *p; + + if ((value & fcur->mask) == fcur->value) + return; + + pstart = ((unsigned int *)fcur) + (fcur->start_off / 4); + pend = ((unsigned int *)fcur) + (fcur->end_off / 4); + + for (p = pstart; p < pend; p++) { + *p = PPC_NOP_INSTR; + asm volatile ("dcbst 0, %0" : : "r" (p)); + } + asm volatile ("sync" : : : "memory"); + for (p = pstart; p < pend; p++) + asm volatile ("icbi 0,%0" : : "r" (p)); + asm volatile ("sync; isync" : : : "memory"); +} + +void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) +{ + struct fixup_entry *fcur, *fend; + + fcur = fixup_start; + fend = fixup_end; + + for (; fcur < fend; fcur++) + patch_feature_section(value, fcur); +} -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 08/14] powerpc: Split out do_feature_fixups() from cputable.c 2008-06-24 1:32 ` [PATCH 08/14] powerpc: Split out do_feature_fixups() from cputable.c Michael Ellerman @ 2008-06-24 13:48 ` Kumar Gala 0 siblings, 0 replies; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:48 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > The logic to patch CPU feature sections lives in cputable.c, but > these days > it's used for CPU features as well as firmware features. Move it into > it's own file for neatness and as preparation for some additions. > > While we're moving the code, we pull the loop body logic into a > separate > routine, and remove a comment which doesn't apply anymore. > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Kumar Gala <galak@kernel.crashing.org> - k ^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 09/14] powerpc: Consolidate CPU and firmware feature fixup macros 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (6 preceding siblings ...) 2008-06-24 1:32 ` [PATCH 08/14] powerpc: Split out do_feature_fixups() from cputable.c Michael Ellerman @ 2008-06-24 1:32 ` Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 10/14] powerpc: Consolidate feature fixup macros for 64/32 bit Michael Ellerman ` (5 subsequent siblings) 13 siblings, 1 reply; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw) To: linuxppc-dev The CPU and firmware feature fixup macros are currently spread across three files, firmware.h, cputable.h and asm-compat.h. Consolidate them into their own file, feature-fixups.h Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- include/asm-powerpc/asm-compat.h | 51 ----------------- include/asm-powerpc/cputable.h | 14 +---- include/asm-powerpc/feature-fixups.h | 102 ++++++++++++++++++++++++++++++++++ include/asm-powerpc/firmware.h | 13 +---- 4 files changed, 104 insertions(+), 76 deletions(-) diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h index c19e736..8ec2e1d 100644 --- a/include/asm-powerpc/asm-compat.h +++ b/include/asm-powerpc/asm-compat.h @@ -15,57 +15,6 @@ #endif -/* - * Feature section common macros - * - * Note that the entries now contain offsets between the table entry - * and the code rather than absolute code pointers in order to be - * useable with the vdso shared library. There is also an assumption - * that values will be negative, that is, the fixup table has to be - * located after the code it fixes up. - */ -#ifdef CONFIG_PPC64 -#ifdef __powerpc64__ -/* 64 bits kernel, 64 bits code */ -#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ -99: \ - .section sect,"a"; \ - .align 3; \ -98: \ - .llong msk; \ - .llong val; \ - .llong label##b-98b; \ - .llong 99b-98b; \ - .previous -#else /* __powerpc64__ */ -/* 64 bits kernel, 32 bits code (ie. vdso32) */ -#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ -99: \ - .section sect,"a"; \ - .align 3; \ -98: \ - .llong msk; \ - .llong val; \ - .long 0xffffffff; \ - .long label##b-98b; \ - .long 0xffffffff; \ - .long 99b-98b; \ - .previous -#endif /* !__powerpc64__ */ -#else /* CONFIG_PPC64 */ -/* 32 bits kernel, 32 bits code */ -#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ -99: \ - .section sect,"a"; \ - .align 2; \ -98: \ - .long msk; \ - .long val; \ - .long label##b-98b; \ - .long 99b-98b; \ - .previous -#endif /* !CONFIG_PPC64 */ - #ifdef __powerpc64__ /* operations for longs and pointers */ diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 1e79673..388b7de 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -2,6 +2,7 @@ #define __ASM_POWERPC_CPUTABLE_H #include <asm/asm-compat.h> +#include <asm/feature-fixups.h> #define PPC_FEATURE_32 0x80000000 #define PPC_FEATURE_64 0x40000000 @@ -477,18 +478,5 @@ static inline int cpu_has_feature(unsigned long feature) #endif /* !__ASSEMBLY__ */ -#ifdef __ASSEMBLY__ - -#define BEGIN_FTR_SECTION_NESTED(label) label: -#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(97) -#define END_FTR_SECTION_NESTED(msk, val, label) \ - MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) -#define END_FTR_SECTION(msk, val) \ - END_FTR_SECTION_NESTED(msk, val, 97) - -#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) -#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) -#endif /* __ASSEMBLY__ */ - #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_CPUTABLE_H */ diff --git a/include/asm-powerpc/feature-fixups.h b/include/asm-powerpc/feature-fixups.h new file mode 100644 index 0000000..8597212 --- /dev/null +++ b/include/asm-powerpc/feature-fixups.h @@ -0,0 +1,102 @@ +#ifndef __ASM_POWERPC_FEATURE_FIXUPS_H +#define __ASM_POWERPC_FEATURE_FIXUPS_H + +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifdef __ASSEMBLY__ + +/* + * Feature section common macros + * + * Note that the entries now contain offsets between the table entry + * and the code rather than absolute code pointers in order to be + * useable with the vdso shared library. There is also an assumption + * that values will be negative, that is, the fixup table has to be + * located after the code it fixes up. + */ +#ifdef CONFIG_PPC64 + +#ifdef __powerpc64__ + +/* 64 bits kernel, 64 bits code */ +#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ +99: \ + .section sect,"a"; \ + .align 3; \ +98: \ + .llong msk; \ + .llong val; \ + .llong label##b-98b; \ + .llong 99b-98b; \ + .previous + +#else /* __powerpc64__ */ + +/* 64 bits kernel, 32 bits code (ie. vdso32) */ +#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ +99: \ + .section sect,"a"; \ + .align 3; \ +98: \ + .llong msk; \ + .llong val; \ + .long 0xffffffff; \ + .long label##b-98b; \ + .long 0xffffffff; \ + .long 99b-98b; \ + .previous + +#endif /* !__powerpc64__ */ + +#else /* CONFIG_PPC64 */ + +/* 32 bits kernel, 32 bits code */ +#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ +99: \ + .section sect,"a"; \ + .align 2; \ +98: \ + .long msk; \ + .long val; \ + .long label##b-98b; \ + .long 99b-98b; \ + .previous + +#endif /* !CONFIG_PPC64 */ + + +/* CPU feature dependent sections */ +#define BEGIN_FTR_SECTION_NESTED(label) label: +#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(97) + +#define END_FTR_SECTION_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) + +#define END_FTR_SECTION(msk, val) \ + END_FTR_SECTION_NESTED(msk, val, 97) + +#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) +#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) + + +/* Firmware feature dependent sections */ +#define BEGIN_FW_FTR_SECTION_NESTED(label) label: +#define BEGIN_FW_FTR_SECTION BEGIN_FW_FTR_SECTION_NESTED(97) + +#define END_FW_FTR_SECTION_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) + +#define END_FW_FTR_SECTION(msk, val) \ + END_FW_FTR_SECTION_NESTED(msk, val, 97) + +#define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk)) +#define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h index 1e41bd1..ef32899 100644 --- a/include/asm-powerpc/firmware.h +++ b/include/asm-powerpc/firmware.h @@ -15,6 +15,7 @@ #ifdef __KERNEL__ #include <asm/asm-compat.h> +#include <asm/feature-fixups.h> /* firmware feature bitmask values */ #define FIRMWARE_MAX_FEATURES 63 @@ -125,18 +126,6 @@ extern int fwnmi_active; extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup; -#else /* __ASSEMBLY__ */ - -#define BEGIN_FW_FTR_SECTION_NESTED(label) label: -#define BEGIN_FW_FTR_SECTION BEGIN_FW_FTR_SECTION_NESTED(97) -#define END_FW_FTR_SECTION_NESTED(msk, val, label) \ - MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) -#define END_FW_FTR_SECTION(msk, val) \ - END_FW_FTR_SECTION_NESTED(msk, val, 97) - -#define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk)) -#define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0) - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_FIRMWARE_H */ -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 09/14] powerpc: Consolidate CPU and firmware feature fixup macros 2008-06-24 1:32 ` [PATCH 09/14] powerpc: Consolidate CPU and firmware feature fixup macros Michael Ellerman @ 2008-06-24 13:48 ` Kumar Gala 0 siblings, 0 replies; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:48 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > The CPU and firmware feature fixup macros are currently spread across > three files, firmware.h, cputable.h and asm-compat.h. Consolidate them > into their own file, feature-fixups.h > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Kumar Gala <galak@kernel.crashing.org> - k ^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 10/14] powerpc: Consolidate feature fixup macros for 64/32 bit 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (7 preceding siblings ...) 2008-06-24 1:32 ` [PATCH 09/14] powerpc: Consolidate CPU and firmware feature fixup macros Michael Ellerman @ 2008-06-24 1:32 ` Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 11/14] powerpc: Introduce infrastructure for feature sections with alternatives Michael Ellerman ` (4 subsequent siblings) 13 siblings, 1 reply; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw) To: linuxppc-dev Currentl we have three versions of MAKE_FTR_SECTION_ENTRY(), the macro that generates a feature section entry. There is 64bit version, a 32bit version and version for 32bit code built with a 64bit kernel. Rather than triplicating (?) the MAKE_FTR_SECTION_ENTRY() logic, we can move the 64bit/32bit differences into separate macros, and then only have one version of MAKE_FTR_SECTION_ENTRY(). Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- include/asm-powerpc/feature-fixups.h | 54 ++++++++------------------------- 1 files changed, 13 insertions(+), 41 deletions(-) diff --git a/include/asm-powerpc/feature-fixups.h b/include/asm-powerpc/feature-fixups.h index 8597212..35f9278 100644 --- a/include/asm-powerpc/feature-fixups.h +++ b/include/asm-powerpc/feature-fixups.h @@ -19,55 +19,27 @@ * that values will be negative, that is, the fixup table has to be * located after the code it fixes up. */ -#ifdef CONFIG_PPC64 - -#ifdef __powerpc64__ - -/* 64 bits kernel, 64 bits code */ -#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ -99: \ - .section sect,"a"; \ - .align 3; \ -98: \ - .llong msk; \ - .llong val; \ - .llong label##b-98b; \ - .llong 99b-98b; \ - .previous - -#else /* __powerpc64__ */ - +#if defined(CONFIG_PPC64) && !defined(__powerpc64__) /* 64 bits kernel, 32 bits code (ie. vdso32) */ +#define FTR_ENTRY_LONG .llong +#define FTR_ENTRY_OFFSET .long 0xffffffff; .long +#else +/* 64 bit kernel 64 bit code, or 32 bit kernel 32 bit code */ +#define FTR_ENTRY_LONG PPC_LONG +#define FTR_ENTRY_OFFSET PPC_LONG +#endif + #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ 99: \ .section sect,"a"; \ .align 3; \ 98: \ - .llong msk; \ - .llong val; \ - .long 0xffffffff; \ - .long label##b-98b; \ - .long 0xffffffff; \ - .long 99b-98b; \ - .previous - -#endif /* !__powerpc64__ */ - -#else /* CONFIG_PPC64 */ - -/* 32 bits kernel, 32 bits code */ -#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ -99: \ - .section sect,"a"; \ - .align 2; \ -98: \ - .long msk; \ - .long val; \ - .long label##b-98b; \ - .long 99b-98b; \ + FTR_ENTRY_LONG msk; \ + FTR_ENTRY_LONG val; \ + FTR_ENTRY_OFFSET label##b-98b; \ + FTR_ENTRY_OFFSET 99b-98b; \ .previous -#endif /* !CONFIG_PPC64 */ /* CPU feature dependent sections */ -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 10/14] powerpc: Consolidate feature fixup macros for 64/32 bit 2008-06-24 1:32 ` [PATCH 10/14] powerpc: Consolidate feature fixup macros for 64/32 bit Michael Ellerman @ 2008-06-24 13:48 ` Kumar Gala 2008-06-26 3:47 ` Michael Ellerman 0 siblings, 1 reply; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:48 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > Currentl we have three versions of MAKE_FTR_SECTION_ENTRY(), the > macro that > generates a feature section entry. There is 64bit version, a 32bit > version > and version for 32bit code built with a 64bit kernel. typo in 'currentl' > > > Rather than triplicating (?) the MAKE_FTR_SECTION_ENTRY() logic, we > can > move the 64bit/32bit differences into separate macros, and then only > have > one version of MAKE_FTR_SECTION_ENTRY(). > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> > --- > include/asm-powerpc/feature-fixups.h | 54 +++++++ > +------------------------- > 1 files changed, 13 insertions(+), 41 deletions(-) Acked-by: Kumar Gala <galak@kernel.crashing.org> - k ^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH 10/14] powerpc: Consolidate feature fixup macros for 64/32 bit 2008-06-24 13:48 ` Kumar Gala @ 2008-06-26 3:47 ` Michael Ellerman 0 siblings, 0 replies; 29+ messages in thread From: Michael Ellerman @ 2008-06-26 3:47 UTC (permalink / raw) To: Kumar Gala; +Cc: linuxppc-dev [-- Attachment #1: Type: text/plain, Size: 785 bytes --] On Tue, 2008-06-24 at 08:48 -0500, Kumar Gala wrote: > On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > > > Currentl we have three versions of MAKE_FTR_SECTION_ENTRY(), the > > macro that > > generates a feature section entry. There is 64bit version, a 32bit > > version > > and version for 32bit code built with a 64bit kernel. > > typo in 'currentl' Damn it, I thought I fixed that one, or perhaps it was the "currently" in the other patch. Perhaps paulus can fix it for me when he commits :D cheers -- Michael Ellerman OzLabs, IBM Australia Development Lab wwweb: http://michael.ellerman.id.au phone: +61 2 6212 1183 (tie line 70 21183) We do not inherit the earth from our ancestors, we borrow it from our children. - S.M.A.R.T Person [-- Attachment #2: This is a digitally signed message part --] [-- Type: application/pgp-signature, Size: 189 bytes --] ^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 11/14] powerpc: Introduce infrastructure for feature sections with alternatives 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (8 preceding siblings ...) 2008-06-24 1:32 ` [PATCH 10/14] powerpc: Consolidate feature fixup macros for 64/32 bit Michael Ellerman @ 2008-06-24 1:32 ` Michael Ellerman 2008-06-24 1:33 ` [PATCH 12/14] powerpc: Add logic to patch alternative feature sections Michael Ellerman ` (3 subsequent siblings) 13 siblings, 0 replies; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:32 UTC (permalink / raw) To: linuxppc-dev The current feature section logic only supports nop'ing out code, this means if you want to choose at runtime between instruction sequences, one or both cases will have to execute the nop'ed out contents of the other section, eg: BEGIN_FTR_SECTION or 1,1,1 END_FTR_SECTION_IFSET(FOO) BEGIN_FTR_SECTION or 2,2,2 END_FTR_SECTION_IFCLR(FOO) and the resulting code will be either, or 1,1,1 nop or, nop or 2,2,2 For small code segments this is fine, but for larger code blocks and in performance criticial code segments, it would be nice to avoid the nops. This commit starts to implement logic to allow the following: BEGIN_FTR_SECTION or 1,1,1 FTR_SECTION_ELSE or 2,2,2 ALT_FTR_SECTION_END_IFSET(FOO) and the resulting code will be: or 1,1,1 or, or 2,2,2 We achieve this by extending the existing FTR macros. The current feature section semantic just becomes a special case, ie. if the else case is empty we nop out the default case. The key limitation is that the size of the else case must be less than or equal to the size of the default case. If the else case is smaller the remainder of the section is nop'ed. We let the linker put the else case code in with the rest of the text, so that relative branches from the else case are more likley to link, this has the disadvantage that we can't free the unused else cases. This commit introduces the required macro and linker script changes, but does not enable the patching of the alternative sections. We also need to update two hand-made section entries in reg.h and timex.h Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- arch/powerpc/kernel/vdso32/vdso32.lds.S | 2 +- arch/powerpc/kernel/vdso64/vdso64.lds.S | 2 +- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/powerpc/lib/feature-fixups.c | 2 + include/asm-powerpc/feature-fixups.h | 68 +++++++++++++++++++++++++------ include/asm-powerpc/reg.h | 2 + include/asm-powerpc/timex.h | 2 + 7 files changed, 64 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 9352ab5..2717935 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -24,7 +24,7 @@ SECTIONS . = ALIGN(16); .text : { - *(.text .stub .text.* .gnu.linkonce.t.*) + *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) } PROVIDE(__etext = .); PROVIDE(_etext = .); diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index 7d6585f..e608d1b 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -24,7 +24,7 @@ SECTIONS . = ALIGN(16); .text : { - *(.text .stub .text.* .gnu.linkonce.t.*) + *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) *(.sfpr .glink) } :text PROVIDE(__etext = .); diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 0c3000b..3c07811 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -35,7 +35,7 @@ SECTIONS ALIGN_FUNCTION(); *(.text.head) _text = .; - *(.text .fixup .text.init.refok .exit.text.refok) + *(.text .fixup .text.init.refok .exit.text.refok __ftr_alt_*) SCHED_TEXT LOCK_TEXT KPROBES_TEXT diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index f6fd5d2..973d547 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -22,6 +22,8 @@ struct fixup_entry { unsigned long value; long start_off; long end_off; + long alt_start_off; + long alt_end_off; }; static void patch_feature_section(unsigned long value, struct fixup_entry *fcur) diff --git a/include/asm-powerpc/feature-fixups.h b/include/asm-powerpc/feature-fixups.h index 35f9278..ab30129 100644 --- a/include/asm-powerpc/feature-fixups.h +++ b/include/asm-powerpc/feature-fixups.h @@ -29,24 +29,35 @@ #define FTR_ENTRY_OFFSET PPC_LONG #endif +#define START_FTR_SECTION(label) label##1: + +#define FTR_SECTION_ELSE_NESTED(label) \ +label##2: \ + .pushsection __ftr_alt_##label,"a"; \ + .align 2; \ +label##3: + #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ -99: \ - .section sect,"a"; \ +label##4: \ + .popsection; \ + .pushsection sect,"a"; \ .align 3; \ -98: \ +label##5: \ FTR_ENTRY_LONG msk; \ FTR_ENTRY_LONG val; \ - FTR_ENTRY_OFFSET label##b-98b; \ - FTR_ENTRY_OFFSET 99b-98b; \ - .previous - + FTR_ENTRY_OFFSET label##1b-label##5b; \ + FTR_ENTRY_OFFSET label##2b-label##5b; \ + FTR_ENTRY_OFFSET label##3b-label##5b; \ + FTR_ENTRY_OFFSET label##4b-label##5b; \ + .popsection; /* CPU feature dependent sections */ -#define BEGIN_FTR_SECTION_NESTED(label) label: -#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(97) +#define BEGIN_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) +#define BEGIN_FTR_SECTION START_FTR_SECTION(97) -#define END_FTR_SECTION_NESTED(msk, val, label) \ +#define END_FTR_SECTION_NESTED(msk, val, label) \ + FTR_SECTION_ELSE_NESTED(label) \ MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) #define END_FTR_SECTION(msk, val) \ @@ -55,12 +66,27 @@ #define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) #define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) +/* CPU feature sections with alternatives, use BEGIN_FTR_SECTION to start */ +#define FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97) +#define ALT_FTR_SECTION_END_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) +#define ALT_FTR_SECTION_END_NESTED_IFSET(msk, label) \ + ALT_FTR_SECTION_END_NESTED(msk, msk, label) +#define ALT_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ + ALT_FTR_SECTION_END_NESTED(msk, 0, label) +#define ALT_FTR_SECTION_END(msk, val) \ + ALT_FTR_SECTION_END_NESTED(msk, val, 97) +#define ALT_FTR_SECTION_END_IFSET(msk) \ + ALT_FTR_SECTION_END_NESTED_IFSET(msk, 97) +#define ALT_FTR_SECTION_END_IFCLR(msk) \ + ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97) /* Firmware feature dependent sections */ -#define BEGIN_FW_FTR_SECTION_NESTED(label) label: -#define BEGIN_FW_FTR_SECTION BEGIN_FW_FTR_SECTION_NESTED(97) +#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) +#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97) -#define END_FW_FTR_SECTION_NESTED(msk, val, label) \ +#define END_FW_FTR_SECTION_NESTED(msk, val, label) \ + FTR_SECTION_ELSE_NESTED(label) \ MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) #define END_FW_FTR_SECTION(msk, val) \ @@ -69,6 +95,22 @@ #define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk)) #define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0) +/* Firmware feature sections with alternatives */ +#define FW_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label) +#define FW_FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97) +#define ALT_FW_FTR_SECTION_END_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) +#define ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, label) \ + ALT_FW_FTR_SECTION_END_NESTED(msk, msk, label) +#define ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ + ALT_FW_FTR_SECTION_END_NESTED(msk, 0, label) +#define ALT_FW_FTR_SECTION_END(msk, val) \ + ALT_FW_FTR_SECTION_END_NESTED(msk, val, 97) +#define ALT_FW_FTR_SECTION_END_IFSET(msk) \ + ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, 97) +#define ALT_FW_FTR_SECTION_END_IFCLR(msk) \ + ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, 97) + #endif /* __ASSEMBLY__ */ #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index edc0cfd..f8d7563 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h @@ -732,6 +732,8 @@ " .llong %1\n" \ " .llong 97b-98b\n" \ " .llong 99b-98b\n" \ + " .llong 0\n" \ + " .llong 0\n" \ ".previous" \ : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;}) #else diff --git a/include/asm-powerpc/timex.h b/include/asm-powerpc/timex.h index 92dedde..c55e14f 100644 --- a/include/asm-powerpc/timex.h +++ b/include/asm-powerpc/timex.h @@ -38,6 +38,8 @@ static inline cycles_t get_cycles(void) " .long 0\n" " .long 97b-98b\n" " .long 99b-98b\n" + " .long 0\n" + " .long 0\n" ".previous" : "=r" (ret) : "i" (CPU_FTR_601)); return ret; -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* [PATCH 12/14] powerpc: Add logic to patch alternative feature sections 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (9 preceding siblings ...) 2008-06-24 1:32 ` [PATCH 11/14] powerpc: Introduce infrastructure for feature sections with alternatives Michael Ellerman @ 2008-06-24 1:33 ` Michael Ellerman 2008-06-24 13:47 ` Kumar Gala 2008-06-24 1:33 ` [PATCH 13/14] powerpc: Add self-tests of the feature fixup code Michael Ellerman ` (2 subsequent siblings) 13 siblings, 1 reply; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:33 UTC (permalink / raw) To: linuxppc-dev This patch adds the logic to patch alternative sections. This is fairly straight forward, except for branches. Relative branches that jump from inside the else section to outside of it, need to be translated as they're moved, otherwise they will jump to the wrong location. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- arch/powerpc/lib/feature-fixups.c | 79 ++++++++++++++++++++++++++++++------- 1 files changed, 65 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 973d547..174c1eb 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -26,24 +26,66 @@ struct fixup_entry { long alt_end_off; }; -static void patch_feature_section(unsigned long value, struct fixup_entry *fcur) +static unsigned int *calc_addr(struct fixup_entry *fcur, long offset) { - unsigned int *pstart, *pend, *p; + /* + * We store the offset to the code as a negative offset from + * the start of the alt_entry, to support the VDSO. This + * routine converts that back into an actual address. + */ + return (unsigned int *)((unsigned long)fcur + offset); +} + +static int patch_alt_instruction(unsigned int *src, unsigned int *dest, + unsigned int *alt_start, unsigned int *alt_end) +{ + unsigned int instr; + + instr = *src; + + if (instr_is_relative_branch(*src)) { + unsigned int *target = (unsigned int *)branch_target(src); + + /* Branch within the section doesn't need translating */ + if (target < alt_start || target >= alt_end) { + instr = translate_branch(dest, src); + if (!instr) + return 1; + } + } + + patch_instruction(dest, instr); + + return 0; +} + +static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) +{ + unsigned int *start, *end, *alt_start, *alt_end, *src, *dest; + + start = calc_addr(fcur, fcur->start_off); + end = calc_addr(fcur, fcur->end_off); + alt_start = calc_addr(fcur, fcur->alt_start_off); + alt_end = calc_addr(fcur, fcur->alt_end_off); + + if ((alt_end - alt_start) > (end - start)) + return 1; if ((value & fcur->mask) == fcur->value) - return; + return 0; - pstart = ((unsigned int *)fcur) + (fcur->start_off / 4); - pend = ((unsigned int *)fcur) + (fcur->end_off / 4); + src = alt_start; + dest = start; - for (p = pstart; p < pend; p++) { - *p = PPC_NOP_INSTR; - asm volatile ("dcbst 0, %0" : : "r" (p)); + for (; src < alt_end; src++, dest++) { + if (patch_alt_instruction(src, dest, alt_start, alt_end)) + return 1; } - asm volatile ("sync" : : : "memory"); - for (p = pstart; p < pend; p++) - asm volatile ("icbi 0,%0" : : "r" (p)); - asm volatile ("sync; isync" : : : "memory"); + + for (; dest < end; dest++) + patch_instruction(dest, PPC_NOP_INSTR); + + return 0; } void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) @@ -53,6 +95,15 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) fcur = fixup_start; fend = fixup_end; - for (; fcur < fend; fcur++) - patch_feature_section(value, fcur); + for (; fcur < fend; fcur++) { + if (patch_feature_section(value, fcur)) { + __WARN(); + printk("Unable to patch feature section at %p - %p" \ + " with %p - %p\n", + calc_addr(fcur, fcur->start_off), + calc_addr(fcur, fcur->end_off), + calc_addr(fcur, fcur->alt_start_off), + calc_addr(fcur, fcur->alt_end_off)); + } + } } -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 12/14] powerpc: Add logic to patch alternative feature sections 2008-06-24 1:33 ` [PATCH 12/14] powerpc: Add logic to patch alternative feature sections Michael Ellerman @ 2008-06-24 13:47 ` Kumar Gala 2008-06-26 3:45 ` Michael Ellerman 0 siblings, 1 reply; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:47 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:33 PM, Michael Ellerman wrote: > This patch adds the logic to patch alternative sections. This is > fairly > straight forward, except for branches. Relative branches that jump > from > inside the else section to outside of it, need to be translated as > they're > moved, otherwise they will jump to the wrong location. this should really be part of the 'Introduce infrastructure for feature sections with alternatives' patch. The two are really coupled (one doesn't make sense w/o the other). - k ^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH 12/14] powerpc: Add logic to patch alternative feature sections 2008-06-24 13:47 ` Kumar Gala @ 2008-06-26 3:45 ` Michael Ellerman 0 siblings, 0 replies; 29+ messages in thread From: Michael Ellerman @ 2008-06-26 3:45 UTC (permalink / raw) To: Kumar Gala; +Cc: linuxppc-dev [-- Attachment #1: Type: text/plain, Size: 1048 bytes --] On Tue, 2008-06-24 at 08:47 -0500, Kumar Gala wrote: > On Jun 23, 2008, at 8:33 PM, Michael Ellerman wrote: > > > This patch adds the logic to patch alternative sections. This is > > fairly > > straight forward, except for branches. Relative branches that jump > > from > > inside the else section to outside of it, need to be translated as > > they're > > moved, otherwise they will jump to the wrong location. > > this should really be part of the 'Introduce infrastructure for > feature sections with alternatives' patch. The two are really coupled > (one doesn't make sense w/o the other). I guess, although I did implement it this way, in two stages - it just means the ALT_ macros are non-functional for one commit. I can respin if you really want. cheers -- Michael Ellerman OzLabs, IBM Australia Development Lab wwweb: http://michael.ellerman.id.au phone: +61 2 6212 1183 (tie line 70 21183) We do not inherit the earth from our ancestors, we borrow it from our children. - S.M.A.R.T Person [-- Attachment #2: This is a digitally signed message part --] [-- Type: application/pgp-signature, Size: 189 bytes --] ^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 13/14] powerpc: Add self-tests of the feature fixup code 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (10 preceding siblings ...) 2008-06-24 1:33 ` [PATCH 12/14] powerpc: Add logic to patch alternative feature sections Michael Ellerman @ 2008-06-24 1:33 ` Michael Ellerman 2008-06-24 1:33 ` [PATCH 14/14] powerpc: Use an alternative feature section in entry_64.S Michael Ellerman 2008-06-24 13:23 ` [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Kumar Gala 13 siblings, 0 replies; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:33 UTC (permalink / raw) To: linuxppc-dev This commit adds tests of the feature fixup code, they are run during boot if CONFIG_FTR_FIXUP_SELFTEST=y. Some of the tests manually invoke the patching routines to check their behaviour, and others use the macros and so are patched during the normal patching done during boot. Because we have two sets of macros with different names, we use a macro to generate the test of the macros, very niiiice. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- arch/powerpc/Kconfig.debug | 5 + arch/powerpc/lib/Makefile | 1 + arch/powerpc/lib/feature-fixups-test.S | 727 ++++++++++++++++++++++++++++++++ arch/powerpc/lib/feature-fixups.c | 206 +++++++++ 4 files changed, 939 insertions(+), 0 deletions(-) diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index dc58939..2840ab6 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -62,6 +62,11 @@ config CODE_PATCHING_SELFTEST depends on DEBUG_KERNEL default n +config FTR_FIXUP_SELFTEST + bool "Run self-tests of the feature-fixup code." + depends on DEBUG_KERNEL + default n + choice prompt "Serial Port" depends on KGDB diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 4afcf3a..9469f99 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -27,3 +27,4 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o obj-y += code-patching.o obj-y += feature-fixups.o +obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S new file mode 100644 index 0000000..10d038b --- /dev/null +++ b/arch/powerpc/lib/feature-fixups-test.S @@ -0,0 +1,727 @@ +/* + * Copyright 2008 Michael Ellerman, IBM Corporation. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <asm/feature-fixups.h> +#include <asm/ppc_asm.h> + + .text + +#define globl(x) \ + .globl x; \ +x: + +globl(ftr_fixup_test1) + or 1,1,1 + or 2,2,2 /* fixup will nop out this instruction */ + or 3,3,3 + +globl(end_ftr_fixup_test1) + +globl(ftr_fixup_test1_orig) + or 1,1,1 + or 2,2,2 + or 3,3,3 + +globl(ftr_fixup_test1_expected) + or 1,1,1 + nop + or 3,3,3 + +globl(ftr_fixup_test2) + or 1,1,1 + or 2,2,2 /* fixup will replace this with ftr_fixup_test2_alt */ + or 3,3,3 + +globl(end_ftr_fixup_test2) + +globl(ftr_fixup_test2_orig) + or 1,1,1 + or 2,2,2 + or 3,3,3 + +globl(ftr_fixup_test2_alt) + or 31,31,31 + +globl(ftr_fixup_test2_expected) + or 1,1,1 + or 31,31,31 + or 3,3,3 + +globl(ftr_fixup_test3) + or 1,1,1 + or 2,2,2 /* fixup will fail to replace this */ + or 3,3,3 + +globl(end_ftr_fixup_test3) + +globl(ftr_fixup_test3_orig) + or 1,1,1 + or 2,2,2 + or 3,3,3 + +globl(ftr_fixup_test3_alt) + or 31,31,31 + or 31,31,31 + +globl(ftr_fixup_test4) + or 1,1,1 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 3,3,3 + +globl(end_ftr_fixup_test4) + +globl(ftr_fixup_test4_expected) + or 1,1,1 + or 31,31,31 + or 31,31,31 + nop + nop + or 3,3,3 + +globl(ftr_fixup_test4_orig) + or 1,1,1 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 3,3,3 + +globl(ftr_fixup_test4_alt) + or 31,31,31 + or 31,31,31 + + +globl(ftr_fixup_test5) + or 1,1,1 +BEGIN_FTR_SECTION + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 +FTR_SECTION_ELSE +2: b 3f +3: or 5,5,5 + beq 3b + b 1f + or 6,6,6 + b 2b +1: bdnz 3b +ALT_FTR_SECTION_END(0, 1) + or 1,1,1 + +globl(end_ftr_fixup_test5) + +globl(ftr_fixup_test5_expected) + or 1,1,1 +2: b 3f +3: or 5,5,5 + beq 3b + b 1f + or 6,6,6 + b 2b +1: bdnz 3b + or 1,1,1 + +globl(ftr_fixup_test6) +1: or 1,1,1 +BEGIN_FTR_SECTION + or 5,5,5 +2: cmpdi r3,0 + beq 4f + blt 2b + b 1b + b 4f +FTR_SECTION_ELSE +2: or 2,2,2 + cmpdi r3,1 + beq 3f + blt 2b + b 3f + b 1b +ALT_FTR_SECTION_END(0, 1) +3: or 1,1,1 + or 2,2,2 +4: or 3,3,3 + +globl(end_ftr_fixup_test6) + +globl(ftr_fixup_test6_expected) +1: or 1,1,1 +2: or 2,2,2 + cmpdi r3,1 + beq 3f + blt 2b + b 3f + b 1b +2: or 1,1,1 + or 2,2,2 +3: or 3,3,3 + + +#define MAKE_MACRO_TEST(TYPE) \ +globl(ftr_fixup_test_ ##TYPE##_macros) \ + or 1,1,1; \ + /* Basic test, this section should all be nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic test, this section should NOT be nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, inner section should be nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(80) \ + or 3,3,3; \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 1, 80) \ + or 2,2,2; \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, whole section should be nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(80) \ + or 3,3,3; \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 0, 80) \ + or 2,2,2; \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, none should be nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(80) \ + or 3,3,3; \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 0, 80) \ + or 2,2,2; \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic alt section test, default case should be taken */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 5,5,5; \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic alt section test, else case should be taken */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ + or 31,31,31; \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt with smaller else case, should be padded with nops */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in default case */ \ + /* Default case should be taken, with nop'ed inner section */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 3,3,3; \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 1, 95) \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 2,2,2; \ + or 2,2,2; \ +ALT_##TYPE##_SECTION_END(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in else, default taken */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 5,5,5; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 1, 95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in else, else taken & nop */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 5,5,5; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 1, 95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, default taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, else taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, all nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, default with inner default taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) \ + or 2,2,2; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ +BEGIN_##TYPE##_SECTION_NESTED(94) \ + or 5,5,5; \ +##TYPE##_SECTION_ELSE_NESTED(94) \ + or 1,1,1; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, default with inner else taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ + or 2,2,2; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ +BEGIN_##TYPE##_SECTION_NESTED(94) \ + or 5,5,5; \ +##TYPE##_SECTION_ELSE_NESTED(94) \ + or 1,1,1; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else with inner default taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ + or 2,2,2; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ +BEGIN_##TYPE##_SECTION_NESTED(94) \ + or 5,5,5; \ +##TYPE##_SECTION_ELSE_NESTED(94) \ + or 1,1,1; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else with inner else taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ + or 2,2,2; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ +BEGIN_##TYPE##_SECTION_NESTED(94) \ + or 5,5,5; \ +##TYPE##_SECTION_ELSE_NESTED(94) \ + or 1,1,1; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else can have large else case */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ + or 2,2,2; \ + or 2,2,2; \ +##TYPE##_SECTION_ELSE \ +BEGIN_##TYPE##_SECTION_NESTED(94) \ + or 5,5,5; \ + or 5,5,5; \ + or 5,5,5; \ + or 5,5,5; \ +##TYPE##_SECTION_ELSE_NESTED(94) \ + or 1,1,1; \ + or 1,1,1; \ + or 1,1,1; \ + or 1,1,1; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; + +#define MAKE_MACRO_TEST_EXPECTED(TYPE) \ +globl(ftr_fixup_test_ ##TYPE##_macros_expected) \ + or 1,1,1; \ + /* Basic test, this section should all be nop'ed */ \ +/* BEGIN_##TYPE##_SECTION */ \ + nop; \ + nop; \ + nop; \ +/* END_##TYPE##_SECTION(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic test, this section should NOT be nop'ed */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ + or 2,2,2; \ + or 2,2,2; \ +/* END_##TYPE##_SECTION(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, inner section should be nop'ed */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(80) */ \ + nop; \ + nop; \ +/* END_##TYPE##_SECTION_NESTED(0, 1, 80) */ \ + or 2,2,2; \ + or 2,2,2; \ +/* END_##TYPE##_SECTION(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, whole section should be nop'ed */ \ + /* NB. inner section is not nop'ed, but then entire outer is */ \ +/* BEGIN_##TYPE##_SECTION */ \ + nop; \ + nop; \ +/* BEGIN_##TYPE##_SECTION_NESTED(80) */ \ + nop; \ + nop; \ +/* END_##TYPE##_SECTION_NESTED(0, 0, 80) */ \ + nop; \ + nop; \ +/* END_##TYPE##_SECTION(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, none should be nop'ed */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(80) */ \ + or 3,3,3; \ + or 3,3,3; \ +/* END_##TYPE##_SECTION_NESTED(0, 0, 80) */ \ + or 2,2,2; \ + or 2,2,2; \ +/* END_##TYPE##_SECTION(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic alt section test, default case should be taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +/* ##TYPE##_SECTION_ELSE */ \ + /* or 5,5,5; */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic alt section test, else case should be taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ +/* ##TYPE##_SECTION_ELSE */ \ + or 31,31,31; \ + or 31,31,31; \ + or 31,31,31; \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt with smaller else case, should be padded with nops */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ +/* ##TYPE##_SECTION_ELSE */ \ + or 31,31,31; \ + nop; \ + nop; \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in default case */ \ + /* Default case should be taken, with nop'ed inner section */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 3,3,3; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + nop; \ + nop; \ +/* END_##TYPE##_SECTION_NESTED(0, 1, 95) */ \ + or 3,3,3; \ +/* ##TYPE##_SECTION_ELSE */ \ + /* or 2,2,2; */ \ + /* or 2,2,2; */ \ +/* ALT_##TYPE##_SECTION_END(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in else, default taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +/* ##TYPE##_SECTION_ELSE */ \ + /* or 5,5,5; */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + /* or 3,3,3; */ \ +/* END_##TYPE##_SECTION_NESTED(0, 1, 95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in else, else taken & nop */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ +/* ##TYPE##_SECTION_ELSE */ \ + or 5,5,5; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + nop; \ +/* END_##TYPE##_SECTION_NESTED(0, 1, 95) */ \ + or 5,5,5; \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, default taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + or 1,1,1; \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) */ \ + or 2,2,2; \ +/* END_##TYPE##_SECTION(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, else taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + /* or 1,1,1; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + or 5,5,5; \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ + or 2,2,2; \ +/* END_##TYPE##_SECTION(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, all nop'ed */ \ +/* BEGIN_##TYPE##_SECTION */ \ + nop; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + nop; \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) */ \ + nop; \ +/* END_##TYPE##_SECTION(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, default with inner default taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + or 1,1,1; \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) */ \ + or 2,2,2; \ +/* ##TYPE##_SECTION_ELSE */ \ + /* or 31,31,31; */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ + /* or 5,5,5; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ + /* or 1,1,1; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) */ \ + /* or 31,31,31; */ \ +/* ALT_##TYPE##_SECTION_END(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, default with inner else taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + /* or 1,1,1; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + or 5,5,5; \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ + or 2,2,2; \ +/* ##TYPE##_SECTION_ELSE */ \ + /* or 31,31,31; */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ + /* or 5,5,5; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ + /* or 1,1,1; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) */ \ + /* or 31,31,31; */ \ +/* ALT_##TYPE##_SECTION_END(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else with inner default taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 2,2,2; */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + /* or 1,1,1; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ + /* or 2,2,2; */ \ +/* ##TYPE##_SECTION_ELSE */ \ + or 31,31,31; \ +/* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ + or 5,5,5; \ +/* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ + /* or 1,1,1; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) */ \ + or 31,31,31; \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else with inner else taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 2,2,2; */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + /* or 1,1,1; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ + /* or 2,2,2; */ \ +/* ##TYPE##_SECTION_ELSE */ \ + or 31,31,31; \ +/* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ + /* or 5,5,5; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ + or 1,1,1; \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) */ \ + or 31,31,31; \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else can have large else case */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 2,2,2; */ \ + /* or 2,2,2; */ \ + /* or 2,2,2; */ \ + /* or 2,2,2; */ \ +/* ##TYPE##_SECTION_ELSE */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ + /* or 5,5,5; */ \ + /* or 5,5,5; */ \ + /* or 5,5,5; */ \ + /* or 5,5,5; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ + or 1,1,1; \ + or 1,1,1; \ + or 1,1,1; \ + or 1,1,1; \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) */ \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; + +MAKE_MACRO_TEST(FTR); +MAKE_MACRO_TEST_EXPECTED(FTR); + +#ifdef CONFIG_PPC64 +MAKE_MACRO_TEST(FW_FTR); +MAKE_MACRO_TEST_EXPECTED(FW_FTR); +#endif diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 174c1eb..48e1ed8 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -13,6 +13,8 @@ */ #include <linux/kernel.h> +#include <linux/string.h> +#include <linux/init.h> #include <asm/cputable.h> #include <asm/code-patching.h> @@ -107,3 +109,207 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) } } } + +#ifdef CONFIG_FTR_FIXUP_SELFTEST + +#define check(x) \ + if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__); + +/* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */ +static struct fixup_entry fixup; + +static long calc_offset(struct fixup_entry *entry, unsigned int *p) +{ + return (unsigned long)p - (unsigned long)entry; +} + +void test_basic_patching(void) +{ + extern unsigned int ftr_fixup_test1; + extern unsigned int end_ftr_fixup_test1; + extern unsigned int ftr_fixup_test1_orig; + extern unsigned int ftr_fixup_test1_expected; + int size = &end_ftr_fixup_test1 - &ftr_fixup_test1; + + fixup.value = fixup.mask = 8; + fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1); + fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2); + fixup.alt_start_off = fixup.alt_end_off = 0; + + /* Sanity check */ + check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + + /* Check we don't patch if the value matches */ + patch_feature_section(8, &fixup); + check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + + /* Check we do patch if the value doesn't match */ + patch_feature_section(0, &fixup); + check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0); + + /* Check we do patch if the mask doesn't match */ + memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size); + check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + patch_feature_section(~8, &fixup); + check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0); +} + +static void test_alternative_patching(void) +{ + extern unsigned int ftr_fixup_test2; + extern unsigned int end_ftr_fixup_test2; + extern unsigned int ftr_fixup_test2_orig; + extern unsigned int ftr_fixup_test2_alt; + extern unsigned int ftr_fixup_test2_expected; + int size = &end_ftr_fixup_test2 - &ftr_fixup_test2; + + fixup.value = fixup.mask = 0xF; + fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1); + fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2); + fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt); + fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1); + + /* Sanity check */ + check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + + /* Check we don't patch if the value matches */ + patch_feature_section(0xF, &fixup); + check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + + /* Check we do patch if the value doesn't match */ + patch_feature_section(0, &fixup); + check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0); + + /* Check we do patch if the mask doesn't match */ + memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size); + check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + patch_feature_section(~0xF, &fixup); + check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0); +} + +static void test_alternative_case_too_big(void) +{ + extern unsigned int ftr_fixup_test3; + extern unsigned int end_ftr_fixup_test3; + extern unsigned int ftr_fixup_test3_orig; + extern unsigned int ftr_fixup_test3_alt; + int size = &end_ftr_fixup_test3 - &ftr_fixup_test3; + + fixup.value = fixup.mask = 0xC; + fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1); + fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2); + fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt); + fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2); + + /* Sanity check */ + check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + + /* Expect nothing to be patched, and the error returned to us */ + check(patch_feature_section(0xF, &fixup) == 1); + check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(patch_feature_section(0, &fixup) == 1); + check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(patch_feature_section(~0xF, &fixup) == 1); + check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); +} + +static void test_alternative_case_too_small(void) +{ + extern unsigned int ftr_fixup_test4; + extern unsigned int end_ftr_fixup_test4; + extern unsigned int ftr_fixup_test4_orig; + extern unsigned int ftr_fixup_test4_alt; + extern unsigned int ftr_fixup_test4_expected; + int size = &end_ftr_fixup_test4 - &ftr_fixup_test4; + unsigned long flag; + + /* Check a high-bit flag */ + flag = 1UL << ((sizeof(unsigned long) - 1) * 8); + fixup.value = fixup.mask = flag; + fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1); + fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5); + fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt); + fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2); + + /* Sanity check */ + check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + + /* Check we don't patch if the value matches */ + patch_feature_section(flag, &fixup); + check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + + /* Check we do patch if the value doesn't match */ + patch_feature_section(0, &fixup); + check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0); + + /* Check we do patch if the mask doesn't match */ + memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size); + check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + patch_feature_section(~flag, &fixup); + check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0); +} + +static void test_alternative_case_with_branch(void) +{ + extern unsigned int ftr_fixup_test5; + extern unsigned int end_ftr_fixup_test5; + extern unsigned int ftr_fixup_test5_expected; + int size = &end_ftr_fixup_test5 - &ftr_fixup_test5; + + check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0); +} + +static void test_alternative_case_with_external_branch(void) +{ + extern unsigned int ftr_fixup_test6; + extern unsigned int end_ftr_fixup_test6; + extern unsigned int ftr_fixup_test6_expected; + int size = &end_ftr_fixup_test6 - &ftr_fixup_test6; + + check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0); +} + +static void test_cpu_macros(void) +{ + extern void ftr_fixup_test_FTR_macros; + extern void ftr_fixup_test_FTR_macros_expected; + unsigned long size = &ftr_fixup_test_FTR_macros_expected - + &ftr_fixup_test_FTR_macros; + + /* The fixups have already been done for us during boot */ + check(memcmp(&ftr_fixup_test_FTR_macros, + &ftr_fixup_test_FTR_macros_expected, size) == 0); +} + +static void test_fw_macros(void) +{ +#ifdef CONFIG_PPC64 + extern void ftr_fixup_test_FW_FTR_macros; + extern void ftr_fixup_test_FW_FTR_macros_expected; + unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected - + &ftr_fixup_test_FW_FTR_macros; + + /* The fixups have already been done for us during boot */ + check(memcmp(&ftr_fixup_test_FW_FTR_macros, + &ftr_fixup_test_FW_FTR_macros_expected, size) == 0); +#endif +} + +static int __init test_feature_fixups(void) +{ + printk(KERN_DEBUG "Running feature fixup self-tests ...\n"); + + test_basic_patching(); + test_alternative_patching(); + test_alternative_case_too_big(); + test_alternative_case_too_small(); + test_alternative_case_with_branch(); + test_alternative_case_with_external_branch(); + test_cpu_macros(); + test_fw_macros(); + + return 0; +} +late_initcall(test_feature_fixups); + +#endif /* CONFIG_FTR_FIXUP_SELFTEST */ -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* [PATCH 14/14] powerpc: Use an alternative feature section in entry_64.S 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (11 preceding siblings ...) 2008-06-24 1:33 ` [PATCH 13/14] powerpc: Add self-tests of the feature fixup code Michael Ellerman @ 2008-06-24 1:33 ` Michael Ellerman 2008-06-24 13:23 ` [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Kumar Gala 13 siblings, 0 replies; 29+ messages in thread From: Michael Ellerman @ 2008-06-24 1:33 UTC (permalink / raw) To: linuxppc-dev Use an alternative feature section in _switch. There are three cases handled here, either we don't have an SLB, in which case we jump over the entire code section, or if we do we either do or don't have 1TB segments. Boot tested on Power3, Power5 and Power5+. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> --- arch/powerpc/kernel/entry_64.S | 12 ++++++------ 1 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index c0db5b7..6ca3044 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -383,16 +383,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ld r8,KSP(r4) /* new stack pointer */ BEGIN_FTR_SECTION - b 2f -END_FTR_SECTION_IFCLR(CPU_FTR_SLB) -BEGIN_FTR_SECTION + BEGIN_FTR_SECTION_NESTED(95) clrrdi r6,r8,28 /* get its ESID */ clrrdi r9,r1,28 /* get current sp ESID */ -END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) -BEGIN_FTR_SECTION + FTR_SECTION_ELSE_NESTED(95) clrrdi r6,r8,40 /* get its 1T ESID */ clrrdi r9,r1,40 /* get current sp 1T ESID */ -END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) + ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95) +FTR_SECTION_ELSE + b 2f +ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB) clrldi. r0,r6,2 /* is new ESID c00000000? */ cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ cror eq,4*cr1+eq,eq -- 1.5.5 ^ permalink raw reply related [flat|nested] 29+ messages in thread
* Re: [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman ` (12 preceding siblings ...) 2008-06-24 1:33 ` [PATCH 14/14] powerpc: Use an alternative feature section in entry_64.S Michael Ellerman @ 2008-06-24 13:23 ` Kumar Gala 13 siblings, 0 replies; 29+ messages in thread From: Kumar Gala @ 2008-06-24 13:23 UTC (permalink / raw) To: Michael Ellerman; +Cc: linuxppc-dev On Jun 23, 2008, at 8:32 PM, Michael Ellerman wrote: > We currently have a few routines for patching code in asm/system.h, > because > they didn't fit anywhere else. I'd like to clean them up a little > and add > some more, so first move them into a dedicated C file - they don't > need to > be inlined. > > While we're moving the code, drop create_function_call(), it's > intended > caller never got merged and will be replaced in future with something > different. > > Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Kumar Gala <galak@kernel.crashing.org> - k ^ permalink raw reply [flat|nested] 29+ messages in thread
end of thread, other threads:[~2008-06-26 3:54 UTC | newest] Thread overview: 29+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2008-06-24 1:32 [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Michael Ellerman 2008-06-24 1:32 ` [PATCH 02/14] powerpc: Allow create_branch() to return errors Michael Ellerman 2008-06-24 13:20 ` Kumar Gala 2008-06-24 13:26 ` Jon Loeliger 2008-06-24 1:32 ` [PATCH 03/14] powerpc: Make create_branch() return errors if the branch target is too large Michael Ellerman 2008-06-24 13:21 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 04/14] powerpc: Add ppc_function_entry() which gets the entry point for a function Michael Ellerman 2008-06-24 13:39 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 05/14] powerpc: Add new code patching routines Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-26 3:54 ` Michael Ellerman 2008-06-24 1:32 ` [PATCH 06/14] powerpc: Add tests of the " Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 07/14] powerpc: Add PPC_NOP_INSTR, a hash define for the preferred nop instruction Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 08/14] powerpc: Split out do_feature_fixups() from cputable.c Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 09/14] powerpc: Consolidate CPU and firmware feature fixup macros Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-24 1:32 ` [PATCH 10/14] powerpc: Consolidate feature fixup macros for 64/32 bit Michael Ellerman 2008-06-24 13:48 ` Kumar Gala 2008-06-26 3:47 ` Michael Ellerman 2008-06-24 1:32 ` [PATCH 11/14] powerpc: Introduce infrastructure for feature sections with alternatives Michael Ellerman 2008-06-24 1:33 ` [PATCH 12/14] powerpc: Add logic to patch alternative feature sections Michael Ellerman 2008-06-24 13:47 ` Kumar Gala 2008-06-26 3:45 ` Michael Ellerman 2008-06-24 1:33 ` [PATCH 13/14] powerpc: Add self-tests of the feature fixup code Michael Ellerman 2008-06-24 1:33 ` [PATCH 14/14] powerpc: Use an alternative feature section in entry_64.S Michael Ellerman 2008-06-24 13:23 ` [PATCH 01/14] powerpc: Move code patching code into arch/powerpc/lib/code-patching.c Kumar Gala
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).