* [PATCH 2/8] powerpc/slb: Rename all the 'slot' occurrences to 'entry'
2015-07-29 7:09 [PATCH 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
@ 2015-07-29 7:09 ` Anshuman Khandual
2015-08-13 1:44 ` [2/8] " Michael Ellerman
2015-07-29 7:10 ` [PATCH 3/8] powerpc/slb: Define macros for the bolted slots Anshuman Khandual
` (6 subsequent siblings)
7 siblings, 1 reply; 18+ messages in thread
From: Anshuman Khandual @ 2015-07-29 7:09 UTC (permalink / raw)
To: linuxppc-dev; +Cc: mpe, mikey
These are essentially SLB individual slots with entries what we are
dealing with in these functions. Usage of both 'entry' and 'slot'
synonyms makes it real confusing sometimes. This patch makes it
uniform across the file by replacing all those 'slot's with 'entry's.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
arch/powerpc/mm/slb.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 62fafb3..faf9f0c 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -41,9 +41,9 @@ static void slb_allocate(unsigned long ea)
(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
- unsigned long slot)
+ unsigned long entry)
{
- return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
+ return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | entry;
}
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
@@ -308,12 +308,11 @@ void slb_initialize(void)
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp;
- /* Invalidate the entire SLB (even slot 0) & all the ERATS */
+ /* Invalidate the entire SLB (even entry 0) & all the ERATS */
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
-
create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
/* For the boot cpu, we're running on the stack in init_thread_union,
--
2.1.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 3/8] powerpc/slb: Define macros for the bolted slots
2015-07-29 7:09 [PATCH 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
2015-07-29 7:09 ` [PATCH 2/8] powerpc/slb: Rename all the 'slot' occurrences to 'entry' Anshuman Khandual
@ 2015-07-29 7:10 ` Anshuman Khandual
2015-07-29 7:10 ` [PATCH 4/8] powerpc/slb: Add some helper functions to improve modularization Anshuman Khandual
` (5 subsequent siblings)
7 siblings, 0 replies; 18+ messages in thread
From: Anshuman Khandual @ 2015-07-29 7:10 UTC (permalink / raw)
To: linuxppc-dev; +Cc: mpe, mikey
This patch defines macros for all the three bolted SLB slots. This also
renames the 'create_shadowed_slb' function as 'new_shadowed_slb'.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
arch/powerpc/mm/slb.c | 29 +++++++++++++++++------------
1 file changed, 17 insertions(+), 12 deletions(-)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index faf9f0c..701a57f 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -25,6 +25,11 @@
#include <asm/udbg.h>
#include <asm/code-patching.h>
+enum slb_slots {
+ LINEAR_SLOT = 0, /* Kernel linear map (0xc000000000000000) */
+ VMALLOC_SLOT = 1, /* Kernel virtual map (0xd000000000000000) */
+ KSTACK_SLOT = 2, /* Kernel stack map */
+};
extern void slb_allocate_realmode(unsigned long ea);
extern void slb_allocate_user(unsigned long ea);
@@ -74,7 +79,7 @@ static inline void slb_shadow_clear(unsigned long entry)
get_slb_shadow()->save_area[entry].esid = 0;
}
-static inline void create_shadowed_slbe(unsigned long ea, int ssize,
+static inline void new_shadowed_slbe(unsigned long ea, int ssize,
unsigned long flags,
unsigned long entry)
{
@@ -103,16 +108,16 @@ static void __slb_flush_and_rebolt(void)
lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp;
- ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
+ ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_SLOT);
if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
ksp_esid_data &= ~SLB_ESID_V;
ksp_vsid_data = 0;
- slb_shadow_clear(2);
+ slb_shadow_clear(KSTACK_SLOT);
} else {
/* Update stack entry; others don't change */
- slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
+ slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_SLOT);
ksp_vsid_data =
- be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
+ be64_to_cpu(get_slb_shadow()->save_area[KSTACK_SLOT].vsid);
}
/* We need to do this all in asm, so we're sure we don't touch
@@ -125,7 +130,7 @@ static void __slb_flush_and_rebolt(void)
"slbmte %2,%3\n"
"isync"
:: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
- "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
+ "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, VMALLOC_SLOT)),
"r"(ksp_vsid_data),
"r"(ksp_esid_data)
: "memory");
@@ -151,7 +156,7 @@ void slb_vmalloc_update(void)
unsigned long vflags;
vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
- slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
+ slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_SLOT);
slb_flush_and_rebolt();
}
@@ -312,19 +317,19 @@ void slb_initialize(void)
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
- create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
- create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
+ new_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_SLOT);
+ new_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_SLOT);
/* For the boot cpu, we're running on the stack in init_thread_union,
* which is in the first segment of the linear mapping, and also
* get_paca()->kstack hasn't been initialized yet.
* For secondary cpus, we need to bolt the kernel stack entry now.
*/
- slb_shadow_clear(2);
+ slb_shadow_clear(KSTACK_SLOT);
if (raw_smp_processor_id() != boot_cpuid &&
(get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
- create_shadowed_slbe(get_paca()->kstack,
- mmu_kernel_ssize, lflags, 2);
+ new_shadowed_slbe(get_paca()->kstack,
+ mmu_kernel_ssize, lflags, KSTACK_SLOT);
asm volatile("isync":::"memory");
}
--
2.1.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 4/8] powerpc/slb: Add some helper functions to improve modularization
2015-07-29 7:09 [PATCH 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
2015-07-29 7:09 ` [PATCH 2/8] powerpc/slb: Rename all the 'slot' occurrences to 'entry' Anshuman Khandual
2015-07-29 7:10 ` [PATCH 3/8] powerpc/slb: Define macros for the bolted slots Anshuman Khandual
@ 2015-07-29 7:10 ` Anshuman Khandual
2015-08-12 4:11 ` [4/8] " Michael Ellerman
2015-07-29 7:10 ` [PATCH 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding Anshuman Khandual
` (4 subsequent siblings)
7 siblings, 1 reply; 18+ messages in thread
From: Anshuman Khandual @ 2015-07-29 7:10 UTC (permalink / raw)
To: linuxppc-dev; +Cc: mpe, mikey
This patch adds the following six helper functions to help improve
modularization and readability of the code.
(1) slb_invalidate_all: Invalidates the entire SLB
(2) slb_invalidate: Invalidates SLB entries present in PACA
(3) mmu_linear_vsid_flags: VSID flags for kernel linear mapping
(4) mmu_virtual_vsid_flags: VSID flags for kernel virtual mapping
(5) mmu_vmemmap_vsid_flags: VSID flags for kernel vmem mapping
(6) mmu_io_vsid_flags: VSID flags for kernel I/O mapping
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
arch/powerpc/mm/slb.c | 92 ++++++++++++++++++++++++++++++++++-----------------
1 file changed, 61 insertions(+), 31 deletions(-)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 701a57f..c87d5de 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -96,18 +96,37 @@ static inline void new_shadowed_slbe(unsigned long ea, int ssize,
: "memory" );
}
+static inline unsigned long mmu_linear_vsid_flags(void)
+{
+ return SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
+}
+
+static inline unsigned long mmu_vmalloc_vsid_flags(void)
+{
+ return SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
+}
+
+static inline unsigned long mmu_io_vsid_flags(void)
+{
+ return SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline unsigned long mmu_vmemmap_vsid_flags(void)
+{
+ return SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp;
+}
+#endif
+
static void __slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* and PR KVM appropriately too. */
- unsigned long linear_llp, vmalloc_llp, lflags, vflags;
+ unsigned long lflags, vflags;
unsigned long ksp_esid_data, ksp_vsid_data;
- linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
- vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
- lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | vmalloc_llp;
-
+ lflags = mmu_linear_vsid_flags();
+ vflags = mmu_vmalloc_vsid_flags();
ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_SLOT);
if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
ksp_esid_data &= ~SLB_ESID_V;
@@ -155,7 +174,7 @@ void slb_vmalloc_update(void)
{
unsigned long vflags;
- vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
+ vflags = mmu_vmalloc_vsid_flags();
slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_SLOT);
slb_flush_and_rebolt();
}
@@ -189,26 +208,15 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
}
-/* Flush all user entries from the segment table of the current processor. */
-void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
+static void slb_invalidate(void)
{
- unsigned long offset;
unsigned long slbie_data = 0;
- unsigned long pc = KSTK_EIP(tsk);
- unsigned long stack = KSTK_ESP(tsk);
- unsigned long exec_base;
+ unsigned long offset;
+ int i;
- /*
- * We need interrupts hard-disabled here, not just soft-disabled,
- * so that a PMU interrupt can't occur, which might try to access
- * user memory (to get a stack trace) and possible cause an SLB miss
- * which would update the slb_cache/slb_cache_ptr fields in the PACA.
- */
- hard_irq_disable();
offset = get_paca()->slb_cache_ptr;
if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
offset <= SLB_CACHE_ENTRIES) {
- int i;
asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) {
slbie_data = (unsigned long)get_paca()->slb_cache[i]
@@ -226,6 +234,23 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
/* Workaround POWER5 < DD2.1 issue */
if (offset == 1 || offset > SLB_CACHE_ENTRIES)
asm volatile("slbie %0" : : "r" (slbie_data));
+}
+
+/* Flush all user entries from the segment table of the current processor. */
+void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
+{
+ unsigned long pc = KSTK_EIP(tsk);
+ unsigned long stack = KSTK_ESP(tsk);
+ unsigned long exec_base;
+
+ /*
+ * We need interrupts hard-disabled here, not just soft-disabled,
+ * so that a PMU interrupt can't occur, which might try to access
+ * user memory (to get a stack trace) and possible cause an SLB miss
+ * which would update the slb_cache/slb_cache_ptr fields in the PACA.
+ */
+ hard_irq_disable();
+ slb_invalidate();
get_paca()->slb_cache_ptr = 0;
get_paca()->context = mm->context;
@@ -258,6 +283,14 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
patch_instruction(insn_addr, insn);
}
+/* Invalidate the entire SLB (even slot 0) & all the ERATS */
+static inline void slb_invalidate_all(void)
+{
+ asm volatile("isync":::"memory");
+ asm volatile("slbmte %0,%0"::"r" (0) : "memory");
+ asm volatile("isync; slbia; isync":::"memory");
+}
+
extern u32 slb_miss_kernel_load_linear[];
extern u32 slb_miss_kernel_load_io[];
extern u32 slb_compare_rr_to_size[];
@@ -285,16 +318,16 @@ void slb_initialize(void)
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
io_llp = mmu_psize_defs[mmu_io_psize].sllp;
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
- get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
+ get_paca()->vmalloc_sllp = mmu_vmalloc_vsid_flags();
#ifdef CONFIG_SPARSEMEM_VMEMMAP
vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
#endif
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
- SLB_VSID_KERNEL | linear_llp);
+ mmu_linear_vsid_flags());
patch_slb_encoding(slb_miss_kernel_load_io,
- SLB_VSID_KERNEL | io_llp);
+ mmu_io_vsid_flags());
patch_slb_encoding(slb_compare_rr_to_size,
mmu_slb_size);
@@ -303,20 +336,17 @@ void slb_initialize(void)
#ifdef CONFIG_SPARSEMEM_VMEMMAP
patch_slb_encoding(slb_miss_kernel_load_vmemmap,
- SLB_VSID_KERNEL | vmemmap_llp);
+ mmu_vmemmap_vsid_flags());
pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
#endif
}
get_paca()->stab_rr = SLB_NUM_BOLTED;
- lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | vmalloc_llp;
+ lflags = mmu_linear_vsid_flags();
+ vflags = mmu_vmalloc_vsid_flags();
- /* Invalidate the entire SLB (even entry 0) & all the ERATS */
- asm volatile("isync":::"memory");
- asm volatile("slbmte %0,%0"::"r" (0) : "memory");
- asm volatile("isync; slbia; isync":::"memory");
+ slb_invalidate_all();
new_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_SLOT);
new_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_SLOT);
--
2.1.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [4/8] powerpc/slb: Add some helper functions to improve modularization
2015-07-29 7:10 ` [PATCH 4/8] powerpc/slb: Add some helper functions to improve modularization Anshuman Khandual
@ 2015-08-12 4:11 ` Michael Ellerman
2015-08-12 6:36 ` Anshuman Khandual
0 siblings, 1 reply; 18+ messages in thread
From: Michael Ellerman @ 2015-08-12 4:11 UTC (permalink / raw)
To: Anshuman Khandual, linuxppc-dev; +Cc: mikey
On Wed, 2015-29-07 at 07:10:01 UTC, Anshuman Khandual wrote:
> This patch adds the following six helper functions to help improve
> modularization and readability of the code.
>
> (1) slb_invalidate_all: Invalidates the entire SLB
> (2) slb_invalidate: Invalidates SLB entries present in PACA
> (3) mmu_linear_vsid_flags: VSID flags for kernel linear mapping
> (4) mmu_virtual_vsid_flags: VSID flags for kernel virtual mapping
> (5) mmu_vmemmap_vsid_flags: VSID flags for kernel vmem mapping
> (6) mmu_io_vsid_flags: VSID flags for kernel I/O mapping
That's too many changes for one patch, it's certainly not a single logical change.
I'm happy with all the flag ones being done in a single patch, but please do
the other two in separate patches.
cheers
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [4/8] powerpc/slb: Add some helper functions to improve modularization
2015-08-12 4:11 ` [4/8] " Michael Ellerman
@ 2015-08-12 6:36 ` Anshuman Khandual
0 siblings, 0 replies; 18+ messages in thread
From: Anshuman Khandual @ 2015-08-12 6:36 UTC (permalink / raw)
To: Michael Ellerman, linuxppc-dev; +Cc: mikey
On 08/12/2015 09:41 AM, Michael Ellerman wrote:
> On Wed, 2015-29-07 at 07:10:01 UTC, Anshuman Khandual wrote:
>> > This patch adds the following six helper functions to help improve
>> > modularization and readability of the code.
>> >
>> > (1) slb_invalidate_all: Invalidates the entire SLB
>> > (2) slb_invalidate: Invalidates SLB entries present in PACA
>> > (3) mmu_linear_vsid_flags: VSID flags for kernel linear mapping
>> > (4) mmu_virtual_vsid_flags: VSID flags for kernel virtual mapping
>> > (5) mmu_vmemmap_vsid_flags: VSID flags for kernel vmem mapping
>> > (6) mmu_io_vsid_flags: VSID flags for kernel I/O mapping
> That's too many changes for one patch, it's certainly not a single logical change.
>
> I'm happy with all the flag ones being done in a single patch, but please do
> the other two in separate patches.
Sure, will split this into three separate patches, also update the
in-code documentation as suggested on the [5/8] patch and then will
send out a new series.
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding
2015-07-29 7:09 [PATCH 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
` (2 preceding siblings ...)
2015-07-29 7:10 ` [PATCH 4/8] powerpc/slb: Add some helper functions to improve modularization Anshuman Khandual
@ 2015-07-29 7:10 ` Anshuman Khandual
2015-08-12 5:12 ` [5/8] " Michael Ellerman
2015-08-13 1:44 ` Michael Ellerman
2015-07-29 7:10 ` [PATCH 6/8] powerpc/prom: Simplify the logic while fetching SLB size Anshuman Khandual
` (3 subsequent siblings)
7 siblings, 2 replies; 18+ messages in thread
From: Anshuman Khandual @ 2015-07-29 7:10 UTC (permalink / raw)
To: linuxppc-dev; +Cc: mpe, mikey
This patch adds some documentation to 'patch_slb_encoding' function
explaining about how it clears the existing immediate value in the
given instruction and inserts a new one there.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
arch/powerpc/mm/slb.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index c87d5de..1962357 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -279,7 +279,18 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
static inline void patch_slb_encoding(unsigned int *insn_addr,
unsigned int immed)
{
- int insn = (*insn_addr & 0xffff0000) | immed;
+
+ /*
+ * This function patches either an li or a cmpldi instruction with
+ * a new immediate value. This relies on the fact that both li
+ * (which is actually addi) and cmpldi both take a 16-bit immediate
+ * value, and it is situated in the same location in the instruction,
+ * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
+ * To patch the value we read the existing instruction, clear the
+ * immediate value, and or in our new value, then write the instruction
+ * back.
+ */
+ unsigned int insn = (*insn_addr & 0xffff0000) | immed;
patch_instruction(insn_addr, insn);
}
--
2.1.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding
2015-07-29 7:10 ` [PATCH 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding Anshuman Khandual
@ 2015-08-12 5:12 ` Michael Ellerman
2015-08-13 1:44 ` Michael Ellerman
1 sibling, 0 replies; 18+ messages in thread
From: Michael Ellerman @ 2015-08-12 5:12 UTC (permalink / raw)
To: Anshuman Khandual, linuxppc-dev; +Cc: mikey, Segher Boessenkool
On Wed, 2015-29-07 at 07:10:02 UTC, Anshuman Khandual wrote:
> This patch adds some documentation to 'patch_slb_encoding' function
> explaining about how it clears the existing immediate value in the
> given instruction and inserts a new one there.
>
> Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
> ---
> arch/powerpc/mm/slb.c | 13 ++++++++++++-
> 1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
> index c87d5de..1962357 100644
> --- a/arch/powerpc/mm/slb.c
> +++ b/arch/powerpc/mm/slb.c
> @@ -279,7 +279,18 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
> static inline void patch_slb_encoding(unsigned int *insn_addr,
> unsigned int immed)
> {
> - int insn = (*insn_addr & 0xffff0000) | immed;
> +
> + /*
> + * This function patches either an li or a cmpldi instruction with
> + * a new immediate value. This relies on the fact that both li
> + * (which is actually addi) and cmpldi both take a 16-bit immediate
> + * value, and it is situated in the same location in the instruction,
> + * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
> + * To patch the value we read the existing instruction, clear the
> + * immediate value, and or in our new value, then write the instruction
> + * back.
> + */
> + unsigned int insn = (*insn_addr & 0xffff0000) | immed;
> patch_instruction(insn_addr, insn);
> }
As Segher pointed out the signedness of the immediate value differs between the
instructions, I added:
+ * The signedness of the immediate operand differs between the two
+ * instructions however this code is only ever patching a small value,
+ * much less than 1 << 15, so we can get away with it.
cheers
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding
2015-07-29 7:10 ` [PATCH 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding Anshuman Khandual
2015-08-12 5:12 ` [5/8] " Michael Ellerman
@ 2015-08-13 1:44 ` Michael Ellerman
1 sibling, 0 replies; 18+ messages in thread
From: Michael Ellerman @ 2015-08-13 1:44 UTC (permalink / raw)
To: Anshuman Khandual, linuxppc-dev; +Cc: mikey
On Wed, 2015-29-07 at 07:10:02 UTC, Anshuman Khandual wrote:
> This patch adds some documentation to 'patch_slb_encoding' function
> explaining about how it clears the existing immediate value in the
> given instruction and inserts a new one there.
>
> Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Applied to powerpc next, thanks.
https://git.kernel.org/powerpc/c/79d0be7407955a268bce
cheers
^ permalink raw reply [flat|nested] 18+ messages in thread
* [PATCH 6/8] powerpc/prom: Simplify the logic while fetching SLB size
2015-07-29 7:09 [PATCH 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
` (3 preceding siblings ...)
2015-07-29 7:10 ` [PATCH 5/8] powerpc/slb: Add documentation to runtime patching of SLB encoding Anshuman Khandual
@ 2015-07-29 7:10 ` Anshuman Khandual
2015-08-13 1:44 ` [6/8] " Michael Ellerman
2015-07-29 7:10 ` [PATCH 7/8] powerpc/xmon: Drop the 'valid' variable completely in 'dump_segments' Anshuman Khandual
` (2 subsequent siblings)
7 siblings, 1 reply; 18+ messages in thread
From: Anshuman Khandual @ 2015-07-29 7:10 UTC (permalink / raw)
To: linuxppc-dev; +Cc: mpe, mikey
This patch just simplifies the existing code logic while fetching
the SLB size property from the device tree. This also changes the
function name from check_cpu_slb_size to init_mmu_slb_size as
it just initializes the mmu_slb_size value.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
arch/powerpc/kernel/prom.c | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b888b1..4bb43c0 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -218,22 +218,18 @@ static void __init check_cpu_pa_features(unsigned long node)
}
#ifdef CONFIG_PPC_STD_MMU_64
-static void __init check_cpu_slb_size(unsigned long node)
+static void __init init_mmu_slb_size(unsigned long node)
{
const __be32 *slb_size_ptr;
- slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
- if (slb_size_ptr != NULL) {
- mmu_slb_size = be32_to_cpup(slb_size_ptr);
- return;
- }
- slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
- if (slb_size_ptr != NULL) {
+ slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
+ of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+
+ if (slb_size_ptr)
mmu_slb_size = be32_to_cpup(slb_size_ptr);
- }
}
#else
-#define check_cpu_slb_size(node) do { } while(0)
+#define init_mmu_slb_size(node) do { } while(0)
#endif
static struct feature_property {
@@ -380,7 +376,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
check_cpu_feature_properties(node);
check_cpu_pa_features(node);
- check_cpu_slb_size(node);
+ init_mmu_slb_size(node);
#ifdef CONFIG_PPC64
if (nthreads > 1)
--
2.1.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 7/8] powerpc/xmon: Drop the 'valid' variable completely in 'dump_segments'
2015-07-29 7:09 [PATCH 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
` (4 preceding siblings ...)
2015-07-29 7:10 ` [PATCH 6/8] powerpc/prom: Simplify the logic while fetching SLB size Anshuman Khandual
@ 2015-07-29 7:10 ` Anshuman Khandual
2015-08-13 1:44 ` [7/8] " Michael Ellerman
2015-07-29 7:10 ` [PATCH 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list Anshuman Khandual
2015-08-13 1:44 ` [1/8] powerpc/slb: Remove a duplicate extern variable Michael Ellerman
7 siblings, 1 reply; 18+ messages in thread
From: Anshuman Khandual @ 2015-07-29 7:10 UTC (permalink / raw)
To: linuxppc-dev; +Cc: mpe, mikey
Value of the 'valid' variable is zero when 'esid' is zero and it does
not matter when 'esid' is non-zero. The variable 'valid' can be dropped
from the function 'dump_segments' by checking for validity of 'esid'
inside the nested code block. This patch does that change.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
arch/powerpc/xmon/xmon.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index e599259..bc1b066a 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2731,7 +2731,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
void dump_segments(void)
{
int i;
- unsigned long esid,vsid,valid;
+ unsigned long esid,vsid;
unsigned long llp;
printf("SLB contents of cpu 0x%x\n", smp_processor_id());
@@ -2739,10 +2739,9 @@ void dump_segments(void)
for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
- valid = (esid & SLB_ESID_V);
- if (valid | esid | vsid) {
+ if (esid || vsid) {
printf("%02d %016lx %016lx", i, esid, vsid);
- if (valid) {
+ if (esid & SLB_ESID_V) {
llp = vsid & SLB_VSID_LLP;
if (vsid & SLB_VSID_B_1T) {
printf(" 1T ESID=%9lx VSID=%13lx LLP:%3lx \n",
--
2.1.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* [PATCH 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list
2015-07-29 7:09 [PATCH 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
` (5 preceding siblings ...)
2015-07-29 7:10 ` [PATCH 7/8] powerpc/xmon: Drop the 'valid' variable completely in 'dump_segments' Anshuman Khandual
@ 2015-07-29 7:10 ` Anshuman Khandual
2015-08-12 6:05 ` Michael Ellerman
2015-08-13 1:44 ` [1/8] powerpc/slb: Remove a duplicate extern variable Michael Ellerman
7 siblings, 1 reply; 18+ messages in thread
From: Anshuman Khandual @ 2015-07-29 7:10 UTC (permalink / raw)
To: linuxppc-dev; +Cc: mpe, mikey
This patch adds a set of new elements to the existing PACA dump list
inside an xmon session which can be listed below improving the overall
xmon debug support.
(1) hmi_event_available
(2) dscr_default
(3) vmalloc_sllp
(4) slb_cache_ptr
(5) sprg_vdso
(6) tm_scratch
(7) core_idle_state_ptr
(8) thread_idle_state
(9) thread_mask
(10) slb_shadow
(11) pgd
(12) kernel_pgd
(13) tcd_ptr
(14) mc_kstack
(15) crit_kstack
(16) dbg_kstack
(17) user_time
(18) system_time
(19) user_time_scaled
(20) starttime
(21) starttime_user
(22) startspurr
(23) utime_sspurr
(24) stolen_time
With this patch, a typical xmon PACA dump looks something like this.
paca for cpu 0x0 @ c00000000fdc0000:
possible = yes
present = yes
online = yes
lock_token = 0x8000 (0x8)
paca_index = 0x0 (0xa)
kernel_toc = 0xc000000000e79300 (0x10)
kernelbase = 0xc000000000000000 (0x18)
kernel_msr = 0xb000000000001032 (0x20)
emergency_sp = 0xc00000003fff0000 (0x28)
mc_emergency_sp = 0xc00000003ffec000 (0x2e0)
in_mce = 0x0 (0x2e8)
hmi_event_available = 0x0 (0x2ea)
data_offset = 0xfa9f0000 (0x30)
hw_cpu_id = 0x0 (0x38)
cpu_start = 0x1 (0x3a)
kexec_state = 0x0 (0x3b)
slb_shadow[0]: = 0xc000000008000000 0x40016e7779000510
slb_shadow[1]: = 0xd000000008000001 0x400142add1000510
dscr_default = 0x0 (0x58)
vmalloc_sllp = 0x510 (0x1b8)
slb_cache_ptr = 0x3 (0x1ba)
slb_cache[0]: = 0x3f000
slb_cache[1]: = 0x1
slb_cache[2]: = 0x1000
__current = 0xc0000000a7406b70 (0x290)
kstack = 0xc0000000a750fe30 (0x298)
stab_rr = 0x11 (0x2a0)
saved_r1 = 0xc0000000a750f360 (0x2a8)
trap_save = 0x0 (0x2b8)
soft_enabled = 0x0 (0x2ba)
irq_happened = 0x1 (0x2bb)
io_sync = 0x0 (0x2bc)
irq_work_pending = 0x0 (0x2bd)
nap_state_lost = 0x0 (0x2be)
sprg_vdso = 0x0 (0x2c0)
tm_scratch = 0x800000010280f032 (0x2c8)
core_idle_state_ptr = (null) (0x2d0)
thread_idle_state = 0x0 (0x2d8)
thread_mask = 0x0 (0x2d9)
subcore_sibling_mask = 0x0 (0x2da)
user_time = 0x18895 (0x2f0)
system_time = 0x11dc2 (0x2f8)
user_time_scaled = 0x0 (0x300)
starttime = 0xe64688b4688a (0x308)
starttime_user = 0xe64688b466d1 (0x310)
startspurr = 0x1a79afea8 (0x318)
utime_sspurr = 0x0 (0x320)
stolen_time = 0x0 (0x328)
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
---
arch/powerpc/xmon/xmon.c | 57 ++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 53 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index bc1b066a..1e67c8b 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2073,6 +2073,9 @@ static void xmon_rawdump (unsigned long adrs, long ndump)
static void dump_one_paca(int cpu)
{
struct paca_struct *p;
+#ifdef CONFIG_PPC_STD_MMU_64
+ int i = 0;
+#endif
if (setjmp(bus_error_jmp) != 0) {
printf("*** Error dumping paca for cpu 0x%x!\n", cpu);
@@ -2086,12 +2089,12 @@ static void dump_one_paca(int cpu)
printf("paca for cpu 0x%x @ %p:\n", cpu, p);
- printf(" %-*s = %s\n", 16, "possible", cpu_possible(cpu) ? "yes" : "no");
- printf(" %-*s = %s\n", 16, "present", cpu_present(cpu) ? "yes" : "no");
- printf(" %-*s = %s\n", 16, "online", cpu_online(cpu) ? "yes" : "no");
+ printf(" %-*s = %s\n", 20, "possible", cpu_possible(cpu) ? "yes" : "no");
+ printf(" %-*s = %s\n", 20, "present", cpu_present(cpu) ? "yes" : "no");
+ printf(" %-*s = %s\n", 20, "online", cpu_online(cpu) ? "yes" : "no");
#define DUMP(paca, name, format) \
- printf(" %-*s = %#-*"format"\t(0x%lx)\n", 16, #name, 18, paca->name, \
+ printf(" %-*s = %#-*"format"\t(0x%lx)\n", 20, #name, 18, paca->name, \
offsetof(struct paca_struct, name));
DUMP(p, lock_token, "x");
@@ -2103,11 +2106,37 @@ static void dump_one_paca(int cpu)
#ifdef CONFIG_PPC_BOOK3S_64
DUMP(p, mc_emergency_sp, "p");
DUMP(p, in_mce, "x");
+ DUMP(p, hmi_event_available, "x");
#endif
DUMP(p, data_offset, "lx");
DUMP(p, hw_cpu_id, "x");
DUMP(p, cpu_start, "x");
DUMP(p, kexec_state, "x");
+#ifdef CONFIG_PPC_STD_MMU_64
+ for (i = 0; i < SLB_NUM_BOLTED; i++) {
+ if (p->slb_shadow_ptr->save_area[i].esid ||
+ p->slb_shadow_ptr->save_area[i].vsid) {
+ printf(" slb_shadow[%d]: = 0x%lx 0x%lx\n", i,
+ p->slb_shadow_ptr->save_area[i].esid,
+ p->slb_shadow_ptr->save_area[i].vsid);
+ }
+ }
+#endif
+ DUMP(p, dscr_default, "llx");
+#ifdef CONFIG_PPC_STD_MMU_64
+ DUMP(p, vmalloc_sllp, "x");
+ DUMP(p, slb_cache_ptr, "x");
+ for (i = 0; i < p->slb_cache_ptr; i++)
+ printf(" slb_cache[%d]: = 0x%lx\n", i, p->slb_cache[i]);
+#endif
+#ifdef CONFIG_PPC_BOOK3E
+ DUMP(p, pgd, "p");
+ DUMP(p, kernel_pgd, "p");
+ DUMP(p, tcd_ptr, "p");
+ DUMP(p, mc_kstack, "p");
+ DUMP(p, crit_kstack, "p");
+ DUMP(p, dbg_kstack, "p");
+#endif
DUMP(p, __current, "p");
DUMP(p, kstack, "lx");
DUMP(p, stab_rr, "lx");
@@ -2118,7 +2147,27 @@ static void dump_one_paca(int cpu)
DUMP(p, io_sync, "x");
DUMP(p, irq_work_pending, "x");
DUMP(p, nap_state_lost, "x");
+ DUMP(p, sprg_vdso, "llx");
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ DUMP(p, tm_scratch, "llx");
+#endif
+
+#ifdef CONFIG_PPC_POWERNV
+ DUMP(p, core_idle_state_ptr, "p");
+ DUMP(p, thread_idle_state, "x");
+ DUMP(p, thread_mask, "x");
+ DUMP(p, subcore_sibling_mask, "x");
+#endif
+ DUMP(p, user_time, "llx");
+ DUMP(p, system_time, "llx");
+ DUMP(p, user_time_scaled, "llx");
+ DUMP(p, starttime, "llx");
+ DUMP(p, starttime_user, "llx");
+ DUMP(p, startspurr, "llx");
+ DUMP(p, utime_sspurr, "llx");
+ DUMP(p, stolen_time, "llx");
#undef DUMP
catch_memory_errors = 0;
--
2.1.0
^ permalink raw reply related [flat|nested] 18+ messages in thread
* Re: [PATCH 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list
2015-07-29 7:10 ` [PATCH 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list Anshuman Khandual
@ 2015-08-12 6:05 ` Michael Ellerman
2015-08-12 6:27 ` Anshuman Khandual
0 siblings, 1 reply; 18+ messages in thread
From: Michael Ellerman @ 2015-08-12 6:05 UTC (permalink / raw)
To: Anshuman Khandual; +Cc: linuxppc-dev, mikey
On Wed, 2015-07-29 at 12:40 +0530, Anshuman Khandual wrote:
> This patch adds a set of new elements to the existing PACA dump list
> inside an xmon session which can be listed below improving the overall
> xmon debug support.
>
> (1) hmi_event_available
> (2) dscr_default
> (3) vmalloc_sllp
> (4) slb_cache_ptr
> (5) sprg_vdso
> (6) tm_scratch
> (7) core_idle_state_ptr
> (8) thread_idle_state
> (9) thread_mask
> (10) slb_shadow
> (11) pgd
> (12) kernel_pgd
> (13) tcd_ptr
> (14) mc_kstack
> (15) crit_kstack
> (16) dbg_kstack
> (17) user_time
> (18) system_time
> (19) user_time_scaled
> (20) starttime
> (21) starttime_user
> (22) startspurr
> (23) utime_sspurr
> (24) stolen_time
Adding these makes the paca display much longer than 24 lines. I know in
general we don't worry too much about folks on 80x24 green screens, but it's
nice if xmon works OK on those. Or on virtual consoles that don't scroll for
whatever reason.
So I'm going to hold off on this one until we have a way to display some of the
paca. I have an idea for that and will send a patch if it works.
cheers
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [PATCH 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list
2015-08-12 6:05 ` Michael Ellerman
@ 2015-08-12 6:27 ` Anshuman Khandual
0 siblings, 0 replies; 18+ messages in thread
From: Anshuman Khandual @ 2015-08-12 6:27 UTC (permalink / raw)
To: Michael Ellerman; +Cc: linuxppc-dev, mikey
On 08/12/2015 11:35 AM, Michael Ellerman wrote:
> On Wed, 2015-07-29 at 12:40 +0530, Anshuman Khandual wrote:
>> This patch adds a set of new elements to the existing PACA dump list
>> inside an xmon session which can be listed below improving the overall
>> xmon debug support.
>>
>> (1) hmi_event_available
>> (2) dscr_default
>> (3) vmalloc_sllp
>> (4) slb_cache_ptr
>> (5) sprg_vdso
>> (6) tm_scratch
>> (7) core_idle_state_ptr
>> (8) thread_idle_state
>> (9) thread_mask
>> (10) slb_shadow
>> (11) pgd
>> (12) kernel_pgd
>> (13) tcd_ptr
>> (14) mc_kstack
>> (15) crit_kstack
>> (16) dbg_kstack
>> (17) user_time
>> (18) system_time
>> (19) user_time_scaled
>> (20) starttime
>> (21) starttime_user
>> (22) startspurr
>> (23) utime_sspurr
>> (24) stolen_time
>
> Adding these makes the paca display much longer than 24 lines. I know in
> general we don't worry too much about folks on 80x24 green screens, but it's
> nice if xmon works OK on those. Or on virtual consoles that don't scroll for
> whatever reason.
>
> So I'm going to hold off on this one until we have a way to display some of the
> paca. I have an idea for that and will send a patch if it works.
>
Sure, if you believe that is the best thing to do at the moment.
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [1/8] powerpc/slb: Remove a duplicate extern variable
2015-07-29 7:09 [PATCH 1/8] powerpc/slb: Remove a duplicate extern variable Anshuman Khandual
` (6 preceding siblings ...)
2015-07-29 7:10 ` [PATCH 8/8] powerpc/xmon: Add some more elements to the existing PACA dump list Anshuman Khandual
@ 2015-08-13 1:44 ` Michael Ellerman
7 siblings, 0 replies; 18+ messages in thread
From: Michael Ellerman @ 2015-08-13 1:44 UTC (permalink / raw)
To: Anshuman Khandual, linuxppc-dev; +Cc: mikey
On Wed, 2015-29-07 at 07:09:58 UTC, Anshuman Khandual wrote:
> This patch just removes one redundant entry for one extern variable
> 'slb_compare_rr_to_size' from the scope. This patch does not change
> any functionality.
>
> Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Applied to powerpc next, thanks.
https://git.kernel.org/powerpc/c/752b8adec4a776b4fdf0
cheers
^ permalink raw reply [flat|nested] 18+ messages in thread