* [PATCH v4 1/2] powerpc32: memcpy: only use dcbz once cache is enabled
2015-09-16 10:04 [PATCH v4 0/2] powerpc32: memcpy/memset: only use dcbz once cache is enabled Christophe Leroy
@ 2015-09-16 10:04 ` Christophe Leroy
2015-09-17 5:13 ` [v4,1/2] " Michael Ellerman
2015-09-16 10:04 ` [PATCH v4 2/2] powerpc32: memset: " Christophe Leroy
1 sibling, 1 reply; 5+ messages in thread
From: Christophe Leroy @ 2015-09-16 10:04 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
scottwood, sojkam1
Cc: linux-kernel, linuxppc-dev
memcpy() uses instruction dcbz to speed up copy by not wasting time
loading cache line with data that will be overwritten.
Some platform like mpc52xx do no have cache active at startup and
can therefore not use memcpy(). Allthough no part of the code
explicitly uses memcpy(), GCC makes calls to it.
This patch modifies memcpy() such that at startup, memcpy()
unconditionally jumps to generic_memcpy() which doesn't use
the dcbz instruction.
Once the initial MMU is set up, in machine_init() we patch memcpy()
by replacing this inconditional jump by a NOP
Reported-by: Michal Sojka <sojkam1@fel.cvut.cz>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
changes in v2:
Using feature-fixups instead of hardcoded call to patch_instruction()
Handling of memset() added
changes in v3:
Not using anymore feature-fixups
Handling of memset() removed
changes in v4:
None
arch/powerpc/kernel/setup_32.c | 3 +++
arch/powerpc/lib/copy_32.S | 5 +++++
2 files changed, 8 insertions(+)
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 07831ed..362495f 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -39,6 +39,7 @@
#include <asm/udbg.h>
#include <asm/mmu_context.h>
#include <asm/epapr_hcalls.h>
+#include <asm/code-patching.h>
#define DBG(fmt...)
@@ -122,6 +123,8 @@ notrace void __init machine_init(u64 dt_ptr)
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
+ patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
+
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 2ef50c6..da5847d 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -128,6 +128,10 @@ _GLOBAL(memset)
* the destination area is cacheable.
* We only use this version if the source and dest don't overlap.
* -- paulus.
+ *
+ * During early init, cache might not be active yet, so dcbz cannot be used.
+ * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
+ * replaced by a nop once cache is active. This is done in machine_init()
*/
_GLOBAL(memmove)
cmplw 0,r3,r4
@@ -135,6 +139,7 @@ _GLOBAL(memmove)
/* fall through */
_GLOBAL(memcpy)
+ b generic_memcpy
add r7,r3,r5 /* test if the src & dst overlap */
add r8,r4,r5
cmplw 0,r4,r7
--
2.1.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH v4 2/2] powerpc32: memset: only use dcbz once cache is enabled
2015-09-16 10:04 [PATCH v4 0/2] powerpc32: memcpy/memset: only use dcbz once cache is enabled Christophe Leroy
2015-09-16 10:04 ` [PATCH v4 1/2] powerpc32: memcpy: " Christophe Leroy
@ 2015-09-16 10:04 ` Christophe Leroy
2015-09-17 5:13 ` [v4,2/2] " Michael Ellerman
1 sibling, 1 reply; 5+ messages in thread
From: Christophe Leroy @ 2015-09-16 10:04 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
scottwood, sojkam1
Cc: linux-kernel, linuxppc-dev
memset() uses instruction dcbz to speed up clearing by not wasting time
loading cache line with data that will be overwritten.
Some platform like mpc52xx do no have cache active at startup and
can therefore not use memset(). Allthough no part of the code
explicitly uses memset(), GCC may make calls to it.
This patch modifies memset() such that at startup, memset()
unconditionally skip the optimised bloc that uses dcbz instruction.
Once the initial MMU is set up, in machine_init() we patch memset()
by replacing this inconditional jump by a NOP
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
Changes in v2:
was part of [v2] powerpc32: memcpy/memset: only use dcbz once cache is enabled
changes in v3:
Not using anymore feature-fixups
Handling of memcpy() and memset() split in two patches
changes in v4:
Skipping the optimised bloc in the middle of memset() instead of
branching to a newly implemented simple_memset()
arch/powerpc/kernel/setup_32.c | 3 +++
arch/powerpc/lib/copy_32.S | 6 ++++++
2 files changed, 9 insertions(+)
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 362495f..cdb8221 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -116,6 +116,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
* This is called very early on the boot process, after a minimal
* MMU environment has been set up but before MMU_init is called.
*/
+extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
+
notrace void __init machine_init(u64 dt_ptr)
{
lockdep_init();
@@ -124,6 +126,7 @@ notrace void __init machine_init(u64 dt_ptr)
udbg_early_init();
patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
+ patch_instruction(&memset_nocache_branch, PPC_INST_NOP);
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index da5847d..c44df2d 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -73,6 +73,10 @@ CACHELINE_MASK = (L1_CACHE_BYTES-1)
* Use dcbz on the complete cache lines in the destination
* to set them to zero. This requires that the destination
* area is cacheable. -- paulus
+ *
+ * During early init, cache might not be active yet, so dcbz cannot be used.
+ * We therefore skip the optimised bloc that uses dcbz. This jump is
+ * replaced by a nop once cache is active. This is done in machine_init()
*/
_GLOBAL(memset)
rlwimi r4,r4,8,16,23
@@ -88,6 +92,8 @@ _GLOBAL(memset)
subf r6,r0,r6
cmplwi 0,r4,0
bne 2f /* Use normal procedure if r4 is not zero */
+_GLOBAL(memset_nocache_branch)
+ b 2f /* Skip optimised bloc until cache is enabled */
clrlwi r7,r6,32-LG_CACHELINE_BYTES
add r8,r7,r5
--
2.1.0
^ permalink raw reply related [flat|nested] 5+ messages in thread