linuxppc-dev.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] powerpc32: memcpy/memset: only use dcbz once cache is enabled
@ 2015-09-10  6:41 Christophe Leroy
  2015-09-10 22:05 ` Scott Wood
  0 siblings, 1 reply; 6+ messages in thread
From: Christophe Leroy @ 2015-09-10  6:41 UTC (permalink / raw)
  To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
	scottwood, sojkam1
  Cc: linux-kernel, linuxppc-dev

memcpy() and memset() uses instruction dcbz to speed up copy by not
wasting time loading cache line with data that will be overwritten.
Some platform like mpc52xx do no have cache active at startup and
can therefore not use memcpy(). Allthough no part of the code
explicitly uses memcpy(), GCC makes calls to it.

This patch implements fixups linked to the cache.
At startup, the functions implement code that does not use dcbz:
* For memcpy(), dcbz is replaced by dcbtst which is harmless when
cache is not enabled, and which helps a bit (allthough not as much
as dcbz) if cache is already enabled.
* For memset(), it branches inconditionnally to the alternative part
normally used only when setting non-zero value. That part doesn't
use dcbz

Once the initial MMU is set up, in machine_init() we call
do_feature_fixups() which replaces the temporary instructions with
the final ones.

Reported-by: Michal Sojka <sojkam1@fel.cvut.cz>
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
changes in v2:
  Using feature-fixups instead of hardcoded call to patch_instruction()
  Handling of memset() added

 arch/powerpc/include/asm/cache.h          |  8 ++++++++
 arch/powerpc/include/asm/feature-fixups.h | 30 ++++++++++++++++++++++++++++++
 arch/powerpc/kernel/setup_32.c            |  3 +++
 arch/powerpc/kernel/vmlinux.lds.S         |  8 ++++++++
 arch/powerpc/lib/copy_32.S                | 16 ++++++++++++++++
 5 files changed, 65 insertions(+)

diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index a2de4f0..4d51010 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -48,6 +48,10 @@ static inline void logmpp(u64 x)
 
 #endif /* __powerpc64__ && ! __ASSEMBLY__ */
 
+#ifdef CONFIG_PPC32
+#define CACHE_NOW_ON	1
+#endif
+
 #if defined(__ASSEMBLY__)
 /*
  * For a snooping icache, we still need a dummy icbi to purge all the
@@ -64,6 +68,10 @@ static inline void logmpp(u64 x)
 #else
 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
+#ifdef CONFIG_PPC32
+extern unsigned int __start___cache_fixup, __stop___cache_fixup;
+#endif
+
 #ifdef CONFIG_6xx
 extern long _get_L2CR(void);
 extern long _get_L3CR(void);
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 9a67a38..7f351cd 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -184,4 +184,34 @@ label##3:					       	\
 	FTR_ENTRY_OFFSET label##1b-label##3b;		\
 	.popsection;
 
+/* Cache related sections */
+#define BEGIN_CACHE_SECTION_NESTED(label)	START_FTR_SECTION(label)
+#define BEGIN_CACHE_SECTION			START_FTR_SECTION(97)
+
+#define END_CACHE_SECTION_NESTED(msk, val, label)		\
+	FTR_SECTION_ELSE_NESTED(label)				\
+	MAKE_FTR_SECTION_ENTRY(msk, val, label, __cache_fixup)
+
+#define END_CACHE_SECTION(msk, val)		\
+	END_CACHE_SECTION_NESTED(msk, val, 97)
+
+#define END_CACHE_SECTION_IFSET(msk)	END_CACHE_SECTION((msk), (msk))
+#define END_CACHE_SECTION_IFCLR(msk)	END_CACHE_SECTION((msk), 0)
+
+/* CACHE feature sections with alternatives, use BEGIN_FTR_SECTION to start */
+#define CACHE_SECTION_ELSE_NESTED(label)	FTR_SECTION_ELSE_NESTED(label)
+#define CACHE_SECTION_ELSE	CACHE_SECTION_ELSE_NESTED(97)
+#define ALT_CACHE_SECTION_END_NESTED(msk, val, label)	\
+	MAKE_FTR_SECTION_ENTRY(msk, val, label, __cache_fixup)
+#define ALT_CACHE_SECTION_END_NESTED_IFSET(msk, label)	\
+	ALT_CACHE_SECTION_END_NESTED(msk, msk, label)
+#define ALT_CACHE_SECTION_END_NESTED_IFCLR(msk, label)	\
+	ALT_CACHE_SECTION_END_NESTED(msk, 0, label)
+#define ALT_CACHE_SECTION_END(msk, val)	\
+	ALT_CACHE_SECTION_END_NESTED(msk, val, 97)
+#define ALT_CACHE_SECTION_END_IFSET(msk)	\
+	ALT_CACHE_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_CACHE_SECTION_END_IFCLR(msk)	\
+	ALT_CACHE_SECTION_END_NESTED_IFCLR(msk, 97)
+
 #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 07831ed..41d39da 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -122,6 +122,9 @@ notrace void __init machine_init(u64 dt_ptr)
 	/* Enable early debugging if any specified (see udbg.h) */
 	udbg_early_init();
 
+	do_feature_fixups(CACHE_NOW_ON, &__start___cache_fixup,
+			  &__stop___cache_fixup);
+
 	/* Do some early initialization based on the flat device tree */
 	early_init_devtree(__va(dt_ptr));
 
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 1db6851..3c7dcab 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -148,6 +148,14 @@ SECTIONS
 		__stop___fw_ftr_fixup = .;
 	}
 #endif
+#ifdef CONFIG_PPC32
+	. = ALIGN(8);
+	__cache_fixup : AT(ADDR(__cache_fixup) - LOAD_OFFSET) {
+		__start___cache_fixup = .;
+		*(__cache_fixup)
+		__stop___cache_fixup = .;
+	}
+#endif
 	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
 		INIT_RAM_FS
 	}
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 2ef50c6..c0b8d52 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -87,7 +87,11 @@ _GLOBAL(memset)
 	add	r5,r0,r5
 	subf	r6,r0,r6
 	cmplwi	0,r4,0
+BEGIN_CACHE_SECTION
+	b	2f	/* Use normal procedure until cache is active */
+CACHE_SECTION_ELSE
 	bne	2f	/* Use normal procedure if r4 is not zero */
+ALT_CACHE_SECTION_END_IFCLR(CACHE_NOW_ON)
 
 	clrlwi	r7,r6,32-LG_CACHELINE_BYTES
 	add	r8,r7,r5
@@ -172,7 +176,19 @@ _GLOBAL(memcpy)
 	mtctr	r0
 	beq	63f
 53:
+	/*
+	 * During early init, cache might not be active yet, so dcbz cannot be
+	 * used. We put dcbtst instead of dcbz. If cache is not active, it's
+	 * just like a nop. If cache is active, at least it prefetchs the line
+	 * to be overwritten.
+	 * Will be replaced by dcbz at runtime in machine_init()
+	 */
+BEGIN_CACHE_SECTION
+	dcbtst	r11,r6
+CACHE_SECTION_ELSE
 	dcbz	r11,r6
+ALT_CACHE_SECTION_END_IFCLR(CACHE_NOW_ON)
+
 	COPY_16_BYTES
 #if L1_CACHE_BYTES >= 32
 	COPY_16_BYTES
-- 
2.1.0

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] powerpc32: memcpy/memset: only use dcbz once cache is enabled
  2015-09-10  6:41 [PATCH v2] powerpc32: memcpy/memset: only use dcbz once cache is enabled Christophe Leroy
@ 2015-09-10 22:05 ` Scott Wood
  2015-09-11  1:24   ` Michael Ellerman
  0 siblings, 1 reply; 6+ messages in thread
From: Scott Wood @ 2015-09-10 22:05 UTC (permalink / raw)
  To: Christophe Leroy
  Cc: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman, sojkam1,
	linux-kernel, linuxppc-dev

On Thu, 2015-09-10 at 08:41 +0200, Christophe Leroy wrote:
>  
> +/* Cache related sections */
> +#define BEGIN_CACHE_SECTION_NESTED(label)    START_FTR_SECTION(label)
> +#define BEGIN_CACHE_SECTION                  START_FTR_SECTION(97)
> +
> +#define END_CACHE_SECTION_NESTED(msk, val, label)            \
> +     FTR_SECTION_ELSE_NESTED(label)                          \
> +     MAKE_FTR_SECTION_ENTRY(msk, val, label, __cache_fixup)
> +
> +#define END_CACHE_SECTION(msk, val)          \
> +     END_CACHE_SECTION_NESTED(msk, val, 97)
> +
> +#define END_CACHE_SECTION_IFSET(msk) END_CACHE_SECTION((msk), (msk))
> +#define END_CACHE_SECTION_IFCLR(msk) END_CACHE_SECTION((msk), 0)
> +
> +/* CACHE feature sections with alternatives, use BEGIN_FTR_SECTION to 
> start */
> +#define CACHE_SECTION_ELSE_NESTED(label)     FTR_SECTION_ELSE_NESTED(label)
> +#define CACHE_SECTION_ELSE   CACHE_SECTION_ELSE_NESTED(97)
> +#define ALT_CACHE_SECTION_END_NESTED(msk, val, label)        \
> +     MAKE_FTR_SECTION_ENTRY(msk, val, label, __cache_fixup)
> +#define ALT_CACHE_SECTION_END_NESTED_IFSET(msk, label)       \
> +     ALT_CACHE_SECTION_END_NESTED(msk, msk, label)
> +#define ALT_CACHE_SECTION_END_NESTED_IFCLR(msk, label)       \
> +     ALT_CACHE_SECTION_END_NESTED(msk, 0, label)
> +#define ALT_CACHE_SECTION_END(msk, val)      \
> +     ALT_CACHE_SECTION_END_NESTED(msk, val, 97)
> +#define ALT_CACHE_SECTION_END_IFSET(msk)     \
> +     ALT_CACHE_SECTION_END_NESTED_IFSET(msk, 97)
> +#define ALT_CACHE_SECTION_END_IFCLR(msk)     \
> +     ALT_CACHE_SECTION_END_NESTED_IFCLR(msk, 97)

I don't think this duplication is what Michael meant by "the normal cpu 
feature sections".  What else is going to use this very specific 
infrastructure?

-Scott

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] powerpc32: memcpy/memset: only use dcbz once cache is enabled
  2015-09-10 22:05 ` Scott Wood
@ 2015-09-11  1:24   ` Michael Ellerman
  2015-09-12  9:57     ` christophe leroy
  0 siblings, 1 reply; 6+ messages in thread
From: Michael Ellerman @ 2015-09-11  1:24 UTC (permalink / raw)
  To: Scott Wood
  Cc: Christophe Leroy, Benjamin Herrenschmidt, Paul Mackerras, sojkam1,
	linux-kernel, linuxppc-dev

On Thu, 2015-09-10 at 17:05 -0500, Scott Wood wrote:
> On Thu, 2015-09-10 at 08:41 +0200, Christophe Leroy wrote:
> >  
> > +/* Cache related sections */
> > +#define BEGIN_CACHE_SECTION_NESTED(label)    START_FTR_SECTION(label)
> > +#define BEGIN_CACHE_SECTION                  START_FTR_SECTION(97)
> > +
> > +#define END_CACHE_SECTION_NESTED(msk, val, label)            \
> > +     FTR_SECTION_ELSE_NESTED(label)                          \
> > +     MAKE_FTR_SECTION_ENTRY(msk, val, label, __cache_fixup)
> > +
> > +#define END_CACHE_SECTION(msk, val)          \
> > +     END_CACHE_SECTION_NESTED(msk, val, 97)
> > +
> > +#define END_CACHE_SECTION_IFSET(msk) END_CACHE_SECTION((msk), (msk))
> > +#define END_CACHE_SECTION_IFCLR(msk) END_CACHE_SECTION((msk), 0)
> > +
> > +/* CACHE feature sections with alternatives, use BEGIN_FTR_SECTION to 
> > start */
> > +#define CACHE_SECTION_ELSE_NESTED(label)     FTR_SECTION_ELSE_NESTED(label)
> > +#define CACHE_SECTION_ELSE   CACHE_SECTION_ELSE_NESTED(97)
> > +#define ALT_CACHE_SECTION_END_NESTED(msk, val, label)        \
> > +     MAKE_FTR_SECTION_ENTRY(msk, val, label, __cache_fixup)
> > +#define ALT_CACHE_SECTION_END_NESTED_IFSET(msk, label)       \
> > +     ALT_CACHE_SECTION_END_NESTED(msk, msk, label)
> > +#define ALT_CACHE_SECTION_END_NESTED_IFCLR(msk, label)       \
> > +     ALT_CACHE_SECTION_END_NESTED(msk, 0, label)
> > +#define ALT_CACHE_SECTION_END(msk, val)      \
> > +     ALT_CACHE_SECTION_END_NESTED(msk, val, 97)
> > +#define ALT_CACHE_SECTION_END_IFSET(msk)     \
> > +     ALT_CACHE_SECTION_END_NESTED_IFSET(msk, 97)
> > +#define ALT_CACHE_SECTION_END_IFCLR(msk)     \
> > +     ALT_CACHE_SECTION_END_NESTED_IFCLR(msk, 97)
> 
> I don't think this duplication is what Michael meant by "the normal cpu 
> feature sections".  What else is going to use this very specific 
> infrastructure?

Yeah, sorry, I was hoping you could do it with the existing cpu feature
mechanism.

It looks like the timing doesn't work, ie. you need to patch this stuff in
machine_init(), which is later than the regular patching which gets done in
early_init().

This is one of the festering differences we have between the 32 and 64-bit
initialisation code, ie. on 64-bit we do the patching much later.


So I think the cleanest solution is to have memcpy branch to generic_memcpy by
default, and then patch that to a nop once you're up and running. Something
like:

diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index bb02e9f6944e..1c1a4e8866ad 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -38,6 +38,7 @@
 #include <asm/udbg.h>
 #include <asm/mmu_context.h>
 #include <asm/epapr_hcalls.h>
+#include <asm/code-patching.h>
 
 #define DBG(fmt...)
 
@@ -119,6 +120,8 @@ notrace void __init machine_init(u64 dt_ptr)
        /* Do some early initialization based on the flat device tree */
        early_init_devtree(__va(dt_ptr));
 
+       patch_instruction((unsigned int *)&memcpy, 0x60000000);
+
        epapr_paravirt_early_init();
 
        early_init_mmu();
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 2ef50c629470..6446d2915e41 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -135,6 +135,7 @@ _GLOBAL(memmove)
        /* fall through */
 
 _GLOBAL(memcpy)
+       b       generic_memcpy
        add     r7,r3,r5                /* test if the src & dst overlap */
        add     r8,r4,r5
        cmplw   0,r4,r7

cheers

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] powerpc32: memcpy/memset: only use dcbz once cache is enabled
  2015-09-11  1:24   ` Michael Ellerman
@ 2015-09-12  9:57     ` christophe leroy
  2015-09-14  9:59       ` Michael Ellerman
  2015-09-14 16:32       ` Scott Wood
  0 siblings, 2 replies; 6+ messages in thread
From: christophe leroy @ 2015-09-12  9:57 UTC (permalink / raw)
  To: Michael Ellerman, Scott Wood
  Cc: Benjamin Herrenschmidt, Paul Mackerras, sojkam1, linux-kernel,
	linuxppc-dev



Le 11/09/2015 03:24, Michael Ellerman a écrit :
> On Thu, 2015-09-10 at 17:05 -0500, Scott Wood wrote:
>>
>> I don't think this duplication is what Michael meant by "the normal cpu
>> feature sections".  What else is going to use this very specific
>> infrastructure?
> Yeah, sorry, I was hoping you could do it with the existing cpu feature
> mechanism.
>
> It looks like the timing doesn't work, ie. you need to patch this stuff in
> machine_init(), which is later than the regular patching which gets done in
> early_init().
>
> This is one of the festering differences we have between the 32 and 64-bit
> initialisation code, ie. on 64-bit we do the patching much later.
>
>

I've just thought about maybe another alternative.
Is there any issue with calling do_feature_fixups() twice for the same 
features ?
If not, we could define a MMU_CACHE_NOW_ON dummy MMU feature, then
call again do_feature_fixups() in machine_init() to patch memcpy/memset 
stuff, something like:

In arch/powerpc/include/asm/mmu.h:
+#define MMU_CACHE_NOW_ON                ASM_CONST(0x00008000)

In arch/powerpc/kernel/setup_32.c: @machine_init()

         udbg_early_init();

+        spec = identify_cpu(0, mfspr(SPRN_PVR));
+        do_feature_fixups(spec->mmu_features | MMU_CACHE_NOW_ON,
+                          &__start___mmu_ftr_fixup,
+                          &__stop___mmu_ftr_fixup);

         /* Do some early initialization based on the flat device tree */
         early_init_devtree(__va(dt_ptr));


Christophe

---
L'absence de virus dans ce courrier électronique a été vérifiée par le logiciel antivirus Avast.
https://www.avast.com/antivirus

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] powerpc32: memcpy/memset: only use dcbz once cache is enabled
  2015-09-12  9:57     ` christophe leroy
@ 2015-09-14  9:59       ` Michael Ellerman
  2015-09-14 16:32       ` Scott Wood
  1 sibling, 0 replies; 6+ messages in thread
From: Michael Ellerman @ 2015-09-14  9:59 UTC (permalink / raw)
  To: christophe leroy
  Cc: Scott Wood, Benjamin Herrenschmidt, Paul Mackerras, sojkam1,
	linux-kernel, linuxppc-dev

On Sat, 2015-09-12 at 11:57 +0200, christophe leroy wrote:
> 
> Le 11/09/2015 03:24, Michael Ellerman a écrit :
> > On Thu, 2015-09-10 at 17:05 -0500, Scott Wood wrote:
> >>
> >> I don't think this duplication is what Michael meant by "the normal cpu
> >> feature sections".  What else is going to use this very specific
> >> infrastructure?
> > Yeah, sorry, I was hoping you could do it with the existing cpu feature
> > mechanism.
> >
> > It looks like the timing doesn't work, ie. you need to patch this stuff in
> > machine_init(), which is later than the regular patching which gets done in
> > early_init().
> >
> > This is one of the festering differences we have between the 32 and 64-bit
> > initialisation code, ie. on 64-bit we do the patching much later.
>
> I've just thought about maybe another alternative.
> Is there any issue with calling do_feature_fixups() twice for the same
> features ?

Not that I can think of, but you never know.

> If not, we could define a MMU_CACHE_NOW_ON dummy MMU feature, then
> call again do_feature_fixups() in machine_init() to patch memcpy/memset 
> stuff, something like:
> 
> In arch/powerpc/include/asm/mmu.h:
> +#define MMU_CACHE_NOW_ON                ASM_CONST(0x00008000)
> 
> In arch/powerpc/kernel/setup_32.c: @machine_init()
> 
>          udbg_early_init();
> 
> +        spec = identify_cpu(0, mfspr(SPRN_PVR));
> +        do_feature_fixups(spec->mmu_features | MMU_CACHE_NOW_ON,
> +                          &__start___mmu_ftr_fixup,
> +                          &__stop___mmu_ftr_fixup);


Did you try that? It would be cleaner, especially now that you have to do memset as well.

cheers

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2] powerpc32: memcpy/memset: only use dcbz once cache is enabled
  2015-09-12  9:57     ` christophe leroy
  2015-09-14  9:59       ` Michael Ellerman
@ 2015-09-14 16:32       ` Scott Wood
  1 sibling, 0 replies; 6+ messages in thread
From: Scott Wood @ 2015-09-14 16:32 UTC (permalink / raw)
  To: christophe leroy
  Cc: Michael Ellerman, Benjamin Herrenschmidt, Paul Mackerras, sojkam1,
	linux-kernel, linuxppc-dev

On Sat, 2015-09-12 at 11:57 +0200, christophe leroy wrote:
> Le 11/09/2015 03:24, Michael Ellerman a écrit :
> > On Thu, 2015-09-10 at 17:05 -0500, Scott Wood wrote:
> > > 
> > > I don't think this duplication is what Michael meant by "the normal cpu
> > > feature sections".  What else is going to use this very specific
> > > infrastructure?
> > Yeah, sorry, I was hoping you could do it with the existing cpu feature
> > mechanism.
> > 
> > It looks like the timing doesn't work, ie. you need to patch this stuff in
> > machine_init(), which is later than the regular patching which gets done 
> > in
> > early_init().
> > 
> > This is one of the festering differences we have between the 32 and 64-bit
> > initialisation code, ie. on 64-bit we do the patching much later.
> > 
> > 
> 
> I've just thought about maybe another alternative.
> Is there any issue with calling do_feature_fixups() twice for the same 
> features ?
> If not, we could define a MMU_CACHE_NOW_ON dummy MMU feature, then
> call again do_feature_fixups() in machine_init() to patch memcpy/memset 
> stuff, something like:
> 
> In arch/powerpc/include/asm/mmu.h:
> +#define MMU_CACHE_NOW_ON                ASM_CONST(0x00008000)
> 
> In arch/powerpc/kernel/setup_32.c: @machine_init()
> 
>          udbg_early_init();
> 
> +        spec = identify_cpu(0, mfspr(SPRN_PVR));
> +        do_feature_fixups(spec->mmu_features | MMU_CACHE_NOW_ON,
> +                          &__start___mmu_ftr_fixup,
> +                          &__stop___mmu_ftr_fixup);

This will cause cpu_setup() to be called twice on booke.  I'm not sure if 
that will cause any harm with the current cpu_setup() implementation, but 
it's complexity that is better avoided.  Why not just use cur_cpu_spec?

How much code is between the enabling of caches and the application of fixups 
(quite a lot on booke where cache is enabled by the bootloader...)?  Perhaps 
it's better to label it something that indicates that cache block operations 
are safe to use, so nobody gets the idea that it's OK to use it to protect 
things that can only be done before caches are enabled.

What happens if someone sees MMU_CACHE_NOW_ON (or whatever it ends up being 
called) and decides to call mmu_has_feature()?  At least set the bit in
spec->mmu_features rather than just for the do_feature_fixups() argument, and 
hope that nobody implements MMU_FTRS_POSSIBLE/ALWAYS, or checks the feature 
on 64-bit...  I'm not 100% convinced that abusing cpu feature mechanisms for 
boot sequence control is a good idea.  The direct patching alternative is 
quite simple, and if we were to accumulate enough instances of that (or more 
complicated instances) then patching infrastructure that is explicitly 
relating to the current state of the system rather than permanent hardware 
description could be justified.

-Scott

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2015-09-14 16:32 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-09-10  6:41 [PATCH v2] powerpc32: memcpy/memset: only use dcbz once cache is enabled Christophe Leroy
2015-09-10 22:05 ` Scott Wood
2015-09-11  1:24   ` Michael Ellerman
2015-09-12  9:57     ` christophe leroy
2015-09-14  9:59       ` Michael Ellerman
2015-09-14 16:32       ` Scott Wood

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).