* [PATCH v2 1/2] powerpc/64s/hash: add torture_slb kernel boot option to increase SLB faults
@ 2020-05-04 9:06 Nicholas Piggin
2020-05-04 9:06 ` [PATCH v2 2/2] powerpc/64s/hash: add torture_hpt kernel boot option to increase hash faults Nicholas Piggin
2020-05-06 1:32 ` [PATCH v2 1/2] powerpc/64s/hash: add torture_slb kernel boot option to increase SLB faults kbuild test robot
0 siblings, 2 replies; 3+ messages in thread
From: Nicholas Piggin @ 2020-05-04 9:06 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Aneesh Kumar K . V, Nicholas Piggin
This option increases the number of SLB misses by limiting the number of
kernel SLB entries, and increased flushing of cached lookaside information.
This helps stress test difficult to hit paths in the kernel.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
v2:
- Address some comments from Aneesh about function names and types
.../admin-guide/kernel-parameters.txt | 5 +
arch/powerpc/include/asm/book3s/64/mmu-hash.h | 7 +
arch/powerpc/mm/book3s64/hash_utils.c | 13 ++
arch/powerpc/mm/book3s64/slb.c | 152 ++++++++++++------
4 files changed, 132 insertions(+), 45 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index f2a93c8679e8..5a34b7dd9ebe 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -871,6 +871,11 @@
can be useful when debugging issues that require an SLB
miss to occur.
+ torture_slb [PPC]
+ Limits the number of kernel SLB entries, and flushes
+ them frequently to increase the rate of SLB faults
+ on kernel addresses.
+
disable= [IPV6]
See Documentation/networking/ipv6.txt.
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 3fa1b962dc27..758de1e0f676 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -317,6 +317,13 @@ extern unsigned long tce_alloc_start, tce_alloc_end;
*/
extern int mmu_ci_restrictions;
+extern bool torture_slb_enabled;
+DECLARE_STATIC_KEY_FALSE(torture_slb_key);
+static inline bool torture_slb(void)
+{
+ return static_branch_unlikely(&torture_slb_key);
+}
+
/*
* This computes the AVPN and B fields of the first dword of a HPTE,
* for use when we want to match an existing PTE. The bottom 7 bits
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 8ed2411c3f39..9c487b5782ef 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -354,6 +354,7 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
}
static bool disable_1tb_segments = false;
+bool torture_slb_enabled __read_mostly = false;
static int __init parse_disable_1tb_segments(char *p)
{
@@ -362,6 +363,13 @@ static int __init parse_disable_1tb_segments(char *p)
}
early_param("disable_1tb_segments", parse_disable_1tb_segments);
+static int __init parse_torture_slb(char *p)
+{
+ torture_slb_enabled = true;
+ return 0;
+}
+early_param("torture_slb", parse_torture_slb);
+
static int __init htab_dt_scan_seg_sizes(unsigned long node,
const char *uname, int depth,
void *data)
@@ -854,6 +862,8 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
pr_info("Partition table %p\n", partition_tb);
}
+DEFINE_STATIC_KEY_FALSE(torture_slb_key);
+
static void __init htab_initialize(void)
{
unsigned long table;
@@ -870,6 +880,9 @@ static void __init htab_initialize(void)
printk(KERN_INFO "Using 1TB segments\n");
}
+ if (torture_slb_enabled)
+ static_branch_enable(&torture_slb_key);
+
/*
* Calculate the required size of the htab. We want the number of
* PTEGs to equal one half the number of real pages.
diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
index 716204aee3da..cf41c4b63d95 100644
--- a/arch/powerpc/mm/book3s64/slb.c
+++ b/arch/powerpc/mm/book3s64/slb.c
@@ -68,7 +68,7 @@ static void assert_slb_presence(bool present, unsigned long ea)
* slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware
* ignores all other bits from 0-27, so just clear them all.
*/
- ea &= ~((1UL << 28) - 1);
+ ea &= ~((1UL << SID_SHIFT) - 1);
asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
WARN_ON(present == (tmp == 0));
@@ -153,14 +153,42 @@ void slb_flush_all_realmode(void)
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
}
+static __always_inline void __slb_flush_and_restore_bolted(bool preserve_kernel_lookaside)
+{
+ struct slb_shadow *p = get_slb_shadow();
+ unsigned long ksp_esid_data, ksp_vsid_data;
+ u32 ih;
+
+ /*
+ * SLBIA IH=1 on ISA v2.05 and newer processors may preserve lookaside
+ * information created with Class=0 entries, which we use for kernel
+ * SLB entries (the SLB entries themselves are still invalidated).
+ *
+ * Older processors will ignore this optimisation. Over-invalidation
+ * is fine because we never rely on lookaside information existing.
+ */
+ if (preserve_kernel_lookaside)
+ ih = 1;
+ else
+ ih = 0;
+
+ ksp_esid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
+ ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
+
+ asm volatile(PPC_SLBIA(%0)" \n"
+ "slbmte %1, %2 \n"
+ :: "i" (ih),
+ "r" (ksp_vsid_data),
+ "r" (ksp_esid_data)
+ : "memory");
+}
+
/*
* This flushes non-bolted entries, it can be run in virtual mode. Must
* be called with interrupts disabled.
*/
void slb_flush_and_restore_bolted(void)
{
- struct slb_shadow *p = get_slb_shadow();
-
BUILD_BUG_ON(SLB_NUM_BOLTED != 2);
WARN_ON(!irqs_disabled());
@@ -171,13 +199,10 @@ void slb_flush_and_restore_bolted(void)
*/
hard_irq_disable();
- asm volatile("isync\n"
- "slbia\n"
- "slbmte %0, %1\n"
- "isync\n"
- :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
- "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
- : "memory");
+ isync();
+ __slb_flush_and_restore_bolted(false);
+ isync();
+
assert_slb_presence(true, get_paca()->kstack);
get_paca()->slb_cache_ptr = 0;
@@ -400,6 +425,30 @@ void preload_new_slb_context(unsigned long start, unsigned long sp)
local_irq_enable();
}
+static void slb_cache_slbie_kernel(unsigned int index)
+{
+ unsigned long slbie_data = get_paca()->slb_cache[index];
+ unsigned long ksp = get_paca()->kstack;
+
+ slbie_data <<= SID_SHIFT;
+ slbie_data |= 0xc000000000000000ULL;
+ if ((ksp & slb_esid_mask(mmu_kernel_ssize)) == slbie_data)
+ return;
+ slbie_data |= mmu_kernel_ssize << SLBIE_SSIZE_SHIFT;
+
+ asm volatile("slbie %0" : : "r" (slbie_data));
+}
+
+static void slb_cache_slbie_user(unsigned int index)
+{
+ unsigned long slbie_data = get_paca()->slb_cache[index];
+
+ slbie_data <<= SID_SHIFT;
+ slbie_data |= user_segment_size(slbie_data) << SLBIE_SSIZE_SHIFT;
+ slbie_data |= SLBIE_C; /* user slbs have C=1 */
+
+ asm volatile("slbie %0" : : "r" (slbie_data));
+}
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
@@ -414,8 +463,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
* which would update the slb_cache/slb_cache_ptr fields in the PACA.
*/
hard_irq_disable();
- asm volatile("isync" : : : "memory");
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ isync();
+ if (torture_slb()) {
+ __slb_flush_and_restore_bolted(false);
+ isync();
+ get_paca()->slb_cache_ptr = 0;
+ get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
+
+ } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
* SLBIA IH=3 invalidates all Class=1 SLBEs and their
* associated lookaside structures, which matches what
@@ -423,47 +478,29 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
* cache.
*/
asm volatile(PPC_SLBIA(3));
+
} else {
unsigned long offset = get_paca()->slb_cache_ptr;
if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
offset <= SLB_CACHE_ENTRIES) {
- unsigned long slbie_data = 0;
-
- for (i = 0; i < offset; i++) {
- unsigned long ea;
-
- ea = (unsigned long)
- get_paca()->slb_cache[i] << SID_SHIFT;
- /*
- * Could assert_slb_presence(true) here, but
- * hypervisor or machine check could have come
- * in and removed the entry at this point.
- */
-
- slbie_data = ea;
- slbie_data |= user_segment_size(slbie_data)
- << SLBIE_SSIZE_SHIFT;
- slbie_data |= SLBIE_C; /* user slbs have C=1 */
- asm volatile("slbie %0" : : "r" (slbie_data));
- }
+ /*
+ * Could assert_slb_presence(true) here, but
+ * hypervisor or machine check could have come
+ * in and removed the entry at this point.
+ */
+
+ for (i = 0; i < offset; i++)
+ slb_cache_slbie_user(i);
/* Workaround POWER5 < DD2.1 issue */
if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1)
- asm volatile("slbie %0" : : "r" (slbie_data));
+ slb_cache_slbie_user(0);
} else {
- struct slb_shadow *p = get_slb_shadow();
- unsigned long ksp_esid_data =
- be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
- unsigned long ksp_vsid_data =
- be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
-
- asm volatile(PPC_SLBIA(1) "\n"
- "slbmte %0,%1\n"
- "isync"
- :: "r"(ksp_vsid_data),
- "r"(ksp_esid_data));
+ /* Flush but retain kernel lookaside information */
+ __slb_flush_and_restore_bolted(true);
+ isync();
get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
}
@@ -503,7 +540,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
* address accesses by the kernel (user mode won't happen until
* rfid, which is safe).
*/
- asm volatile("isync" : : : "memory");
+ isync();
}
void slb_set_size(u16 size)
@@ -571,6 +608,9 @@ static void slb_cache_update(unsigned long esid_data)
if (cpu_has_feature(CPU_FTR_ARCH_300))
return; /* ISAv3.0B and later does not use slb_cache */
+ if (torture_slb())
+ return;
+
/*
* Now update slb cache entries
*/
@@ -580,7 +620,7 @@ static void slb_cache_update(unsigned long esid_data)
* We have space in slb cache for optimized switch_slb().
* Top 36 bits from esid_data as per ISA
*/
- local_paca->slb_cache[slb_cache_index++] = esid_data >> 28;
+ local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
local_paca->slb_cache_ptr++;
} else {
/*
@@ -671,6 +711,28 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
* accesses user memory before it returns to userspace with rfid.
*/
assert_slb_presence(false, ea);
+ if (torture_slb()) {
+ int slb_cache_index = local_paca->slb_cache_ptr;
+
+ /*
+ * torture_slb() does not use slb cache, repurpose as a
+ * cache of inserted (non-bolted) kernel SLB entries. All
+ * non-bolted kernel entries are flushed on any user fault,
+ * or if there are already 3 non-boled kernel entries.
+ */
+ BUILD_BUG_ON(SLB_CACHE_ENTRIES < 3);
+ if (!kernel || slb_cache_index == 3) {
+ int i;
+
+ for (i = 0; i < slb_cache_index; i++)
+ slb_cache_slbie_kernel(i);
+ slb_cache_index = 0;
+ }
+
+ if (kernel)
+ local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
+ local_paca->slb_cache_ptr = slb_cache_index;
+ }
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
barrier();
--
2.23.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH v2 2/2] powerpc/64s/hash: add torture_hpt kernel boot option to increase hash faults
2020-05-04 9:06 [PATCH v2 1/2] powerpc/64s/hash: add torture_slb kernel boot option to increase SLB faults Nicholas Piggin
@ 2020-05-04 9:06 ` Nicholas Piggin
2020-05-06 1:32 ` [PATCH v2 1/2] powerpc/64s/hash: add torture_slb kernel boot option to increase SLB faults kbuild test robot
1 sibling, 0 replies; 3+ messages in thread
From: Nicholas Piggin @ 2020-05-04 9:06 UTC (permalink / raw)
To: linuxppc-dev; +Cc: Aneesh Kumar K . V, Nicholas Piggin
This option increases the number of hash misses by limiting the number of
kernel HPT entries, by accessing the address immediately after installing
the PTE, then removing it again (except in the case of CI entries that
must not be accessed, these are removed on the next hash fault).
This helps stress test difficult to hit paths in the kernel.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
v2:
- Add a bit more to the changelog.
- Add some kuap stuff for when Aneesh's kaup for hash patches are
merged.
.../admin-guide/kernel-parameters.txt | 9 +++
arch/powerpc/include/asm/book3s/64/mmu-hash.h | 10 +++
arch/powerpc/mm/book3s64/hash_4k.c | 3 +
arch/powerpc/mm/book3s64/hash_64k.c | 8 ++
arch/powerpc/mm/book3s64/hash_utils.c | 76 ++++++++++++++++++-
5 files changed, 105 insertions(+), 1 deletion(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 5a34b7dd9ebe..1ec6a32a717a 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -876,6 +876,15 @@
them frequently to increase the rate of SLB faults
on kernel addresses.
+ torture_hpt [PPC]
+ Limits the number of kernel HPT entries in the hash
+ page table to increase the rate of hash page table
+ faults on kernel addresses.
+
+ This may hang when run on processors / emulators which
+ do not have a TLB, or flush it more often than
+ required, QEMU seems to have problems.
+
disable= [IPV6]
See Documentation/networking/ipv6.txt.
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 758de1e0f676..539e3d91eac4 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -324,6 +324,16 @@ static inline bool torture_slb(void)
return static_branch_unlikely(&torture_slb_key);
}
+extern bool torture_hpt_enabled;
+DECLARE_STATIC_KEY_FALSE(torture_hpt_key);
+static inline bool torture_hpt(void)
+{
+ return static_branch_unlikely(&torture_hpt_key);
+}
+
+void hpt_do_torture(unsigned long ea, unsigned long access,
+ unsigned long rflags, unsigned long hpte_group);
+
/*
* This computes the AVPN and B fields of the first dword of a HPTE,
* for use when we want to match an existing PTE. The bottom 7 bits
diff --git a/arch/powerpc/mm/book3s64/hash_4k.c b/arch/powerpc/mm/book3s64/hash_4k.c
index 22e787123cdf..54e4ff8c558d 100644
--- a/arch/powerpc/mm/book3s64/hash_4k.c
+++ b/arch/powerpc/mm/book3s64/hash_4k.c
@@ -118,6 +118,9 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
}
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
+
+ if (torture_hpt())
+ hpt_do_torture(ea, access, rflags, hpte_group);
}
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
return 0;
diff --git a/arch/powerpc/mm/book3s64/hash_64k.c b/arch/powerpc/mm/book3s64/hash_64k.c
index 7084ce2951e6..19ea0fc145a9 100644
--- a/arch/powerpc/mm/book3s64/hash_64k.c
+++ b/arch/powerpc/mm/book3s64/hash_64k.c
@@ -216,6 +216,9 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE);
new_pte |= H_PAGE_HASHPTE;
+ if (torture_hpt())
+ hpt_do_torture(ea, access, rflags, hpte_group);
+
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
return 0;
}
@@ -327,7 +330,12 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
+
+ if (torture_hpt())
+ hpt_do_torture(ea, access, rflags, hpte_group);
}
+
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
+
return 0;
}
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 9c487b5782ef..845da1e8ca4f 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -353,8 +353,12 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
return ret;
}
-static bool disable_1tb_segments = false;
+static bool disable_1tb_segments __read_mostly = false;
bool torture_slb_enabled __read_mostly = false;
+bool torture_hpt_enabled __read_mostly = false;
+
+/* per-CPU array allocated if we enable torture_hpt. */
+static unsigned long *torture_hpt_last_group;
static int __init parse_disable_1tb_segments(char *p)
{
@@ -370,6 +374,13 @@ static int __init parse_torture_slb(char *p)
}
early_param("torture_slb", parse_torture_slb);
+static int __init parse_torture_hpt(char *p)
+{
+ torture_hpt_enabled = true;
+ return 0;
+}
+early_param("torture_hpt", parse_torture_hpt);
+
static int __init htab_dt_scan_seg_sizes(unsigned long node,
const char *uname, int depth,
void *data)
@@ -863,6 +874,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
}
DEFINE_STATIC_KEY_FALSE(torture_slb_key);
+DEFINE_STATIC_KEY_FALSE(torture_hpt_key);
static void __init htab_initialize(void)
{
@@ -882,6 +894,15 @@ static void __init htab_initialize(void)
if (torture_slb_enabled)
static_branch_enable(&torture_slb_key);
+ if (torture_hpt_enabled) {
+ unsigned long tmp;
+ static_branch_enable(&torture_hpt_key);
+ tmp = memblock_phys_alloc_range(sizeof(unsigned long) * NR_CPUS,
+ 0,
+ 0, MEMBLOCK_ALLOC_ANYWHERE);
+ memset((void *)tmp, 0xff, sizeof(unsigned long) * NR_CPUS);
+ torture_hpt_last_group = __va(tmp);
+ }
/*
* Calculate the required size of the htab. We want the number of
@@ -1901,6 +1922,59 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
return slot;
}
+void hpt_do_torture(unsigned long ea, unsigned long access,
+ unsigned long rflags, unsigned long hpte_group)
+{
+ unsigned long last_group;
+ int cpu = raw_smp_processor_id();
+
+ last_group = torture_hpt_last_group[cpu];
+ if (last_group != -1UL) {
+ while (mmu_hash_ops.hpte_remove(last_group) != -1)
+ ;
+ torture_hpt_last_group[cpu] = -1UL;
+ }
+
+#define QEMU_WORKAROUND 0
+
+ if (ea >= PAGE_OFFSET) {
+ if (!QEMU_WORKAROUND && (access & (_PAGE_READ|_PAGE_WRITE)) &&
+ !(rflags & (HPTE_R_I|HPTE_R_G))) {
+ /* prefetch / prefetchw does not seem to set up a TLB
+ * entry with the powerpc systemsim (mambo) emulator,
+ * though it works with real hardware. An alternative
+ * approach that would work more reliably on quirky
+ * emulators like QEMU may be to remember the last
+ * insertion and remove that, rather than removing the
+ * current insertion. Then no prefetch is required.
+ */
+ if ((access & _PAGE_WRITE) && (access & _PAGE_READ)) {
+ void __user *mem = (void *)(ea & ~0x3);
+
+ allow_read_write_user(mem, mem, sizeof(atomic_t));
+ atomic_add(0, (atomic_t *)mem);
+ prevent_read_write_user(mem, mem, sizeof(atomic_t));
+
+ } else if (access & _PAGE_READ) {
+ void __user *mem = (void *)ea;
+
+ allow_read_from_user(mem, 1);
+ (void)*(volatile char *)mem;
+ prevent_read_from_user(mem, 1);
+ }
+
+ mb();
+
+ while (mmu_hash_ops.hpte_remove(hpte_group) != -1)
+ ;
+ } else {
+ /* Can't prefetch cache-inhibited so clear next time. */
+ torture_hpt_last_group[cpu] = hpte_group;
+ }
+ }
+}
+
+
#ifdef CONFIG_DEBUG_PAGEALLOC
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
--
2.23.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH v2 1/2] powerpc/64s/hash: add torture_slb kernel boot option to increase SLB faults
2020-05-04 9:06 [PATCH v2 1/2] powerpc/64s/hash: add torture_slb kernel boot option to increase SLB faults Nicholas Piggin
2020-05-04 9:06 ` [PATCH v2 2/2] powerpc/64s/hash: add torture_hpt kernel boot option to increase hash faults Nicholas Piggin
@ 2020-05-06 1:32 ` kbuild test robot
1 sibling, 0 replies; 3+ messages in thread
From: kbuild test robot @ 2020-05-06 1:32 UTC (permalink / raw)
To: Nicholas Piggin, linuxppc-dev, Aneesh Kumar K . V
Cc: clang-built-linux, kbuild-all, Aneesh Kumar K . V
[-- Attachment #1: Type: text/plain, Size: 16681 bytes --]
Hi Nicholas,
I love your patch! Yet something to improve:
[auto build test ERROR on powerpc/next]
[also build test ERROR on linus/master v5.7-rc4 next-20200505]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]
url: https://github.com/0day-ci/linux/commits/Nicholas-Piggin/powerpc-64s-hash-add-torture_slb-kernel-boot-option-to-increase-SLB-faults/20200505-053958
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-randconfig-a001-20200503 (attached as .config)
compiler: clang version 11.0.0 (https://github.com/llvm/llvm-project 9e3549804672c79d64eececab39019f4dfd2b7ea)
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install powerpc cross compiling tool for clang build
# apt-get install binutils-powerpc-linux-gnu
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=powerpc
If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <lkp@intel.com>
All error/warnings (new ones prefixed by >>):
In file included from arch/powerpc/kernel/asm-offsets.c:14:
In file included from include/linux/compat.h:10:
In file included from include/linux/time.h:6:
In file included from include/linux/seqlock.h:36:
In file included from include/linux/spinlock.h:51:
In file included from include/linux/preempt.h:78:
In file included from ./arch/powerpc/include/generated/asm/preempt.h:1:
In file included from include/asm-generic/preempt.h:5:
In file included from include/linux/thread_info.h:21:
In file included from arch/powerpc/include/asm/current.h:13:
In file included from arch/powerpc/include/asm/paca.h:17:
In file included from arch/powerpc/include/asm/lppaca.h:47:
In file included from arch/powerpc/include/asm/mmu.h:356:
In file included from arch/powerpc/include/asm/book3s/64/mmu.h:46:
>> arch/powerpc/include/asm/book3s/64/mmu-hash.h:321:1: warning: declaration specifier missing, defaulting to 'int'
DECLARE_STATIC_KEY_FALSE(torture_slb_key);
^
int
>> arch/powerpc/include/asm/book3s/64/mmu-hash.h:321:26: error: a parameter list without types is only allowed in a function definition
DECLARE_STATIC_KEY_FALSE(torture_slb_key);
^
>> arch/powerpc/include/asm/book3s/64/mmu-hash.h:324:9: error: implicit declaration of function 'static_branch_unlikely' [-Werror,-Wimplicit-function-declaration]
return static_branch_unlikely(&torture_slb_key);
^
>> arch/powerpc/include/asm/book3s/64/mmu-hash.h:324:33: error: use of undeclared identifier 'torture_slb_key'; did you mean 'torture_slb'?
return static_branch_unlikely(&torture_slb_key);
^~~~~~~~~~~~~~~
torture_slb
arch/powerpc/include/asm/book3s/64/mmu-hash.h:322:20: note: 'torture_slb' declared here
static inline bool torture_slb(void)
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
In file included from include/linux/compat.h:17:
In file included from include/linux/fs.h:34:
In file included from include/linux/percpu-rwsem.h:7:
In file included from include/linux/rcuwait.h:6:
In file included from include/linux/sched/signal.h:6:
include/linux/signal.h:87:11: warning: array index 3 is past the end of the array (which contains 1 element) [-Warray-bounds]
return (set->sig[3] | set->sig[2] |
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
In file included from include/linux/compat.h:17:
In file included from include/linux/fs.h:34:
In file included from include/linux/percpu-rwsem.h:7:
In file included from include/linux/rcuwait.h:6:
In file included from include/linux/sched/signal.h:6:
include/linux/signal.h:87:25: warning: array index 2 is past the end of the array (which contains 1 element) [-Warray-bounds]
return (set->sig[3] | set->sig[2] |
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
In file included from include/linux/compat.h:17:
In file included from include/linux/fs.h:34:
In file included from include/linux/percpu-rwsem.h:7:
In file included from include/linux/rcuwait.h:6:
In file included from include/linux/sched/signal.h:6:
include/linux/signal.h:88:4: warning: array index 1 is past the end of the array (which contains 1 element) [-Warray-bounds]
set->sig[1] | set->sig[0]) == 0;
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
In file included from include/linux/compat.h:17:
In file included from include/linux/fs.h:34:
In file included from include/linux/percpu-rwsem.h:7:
In file included from include/linux/rcuwait.h:6:
In file included from include/linux/sched/signal.h:6:
include/linux/signal.h:90:11: warning: array index 1 is past the end of the array (which contains 1 element) [-Warray-bounds]
return (set->sig[1] | set->sig[0]) == 0;
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
In file included from include/linux/compat.h:17:
In file included from include/linux/fs.h:34:
In file included from include/linux/percpu-rwsem.h:7:
In file included from include/linux/rcuwait.h:6:
In file included from include/linux/sched/signal.h:6:
include/linux/signal.h:103:11: warning: array index 3 is past the end of the array (which contains 1 element) [-Warray-bounds]
return (set1->sig[3] == set2->sig[3]) &&
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
In file included from include/linux/compat.h:17:
In file included from include/linux/fs.h:34:
In file included from include/linux/percpu-rwsem.h:7:
In file included from include/linux/rcuwait.h:6:
In file included from include/linux/sched/signal.h:6:
include/linux/signal.h:103:27: warning: array index 3 is past the end of the array (which contains 1 element) [-Warray-bounds]
return (set1->sig[3] == set2->sig[3]) &&
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
In file included from include/linux/compat.h:17:
In file included from include/linux/fs.h:34:
In file included from include/linux/percpu-rwsem.h:7:
In file included from include/linux/rcuwait.h:6:
In file included from include/linux/sched/signal.h:6:
include/linux/signal.h:104:5: warning: array index 2 is past the end of the array (which contains 1 element) [-Warray-bounds]
(set1->sig[2] == set2->sig[2]) &&
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
In file included from include/linux/compat.h:17:
In file included from include/linux/fs.h:34:
In file included from include/linux/percpu-rwsem.h:7:
In file included from include/linux/rcuwait.h:6:
In file included from include/linux/sched/signal.h:6:
include/linux/signal.h:104:21: warning: array index 2 is past the end of the array (which contains 1 element) [-Warray-bounds]
(set1->sig[2] == set2->sig[2]) &&
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
--
In file included from include/linux/percpu-rwsem.h:7:
In file included from include/linux/rcuwait.h:6:
In file included from include/linux/sched/signal.h:6:
include/linux/signal.h:232:10: warning: array index 1 is past the end of the array (which contains 1 element) [-Warray-bounds]
case 2: set->sig[1] = 0;
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
In file included from include/linux/compat.h:17:
In file included from include/linux/fs.h:34:
In file included from include/linux/percpu-rwsem.h:7:
In file included from include/linux/rcuwait.h:6:
In file included from include/linux/sched/signal.h:6:
include/linux/signal.h:244:10: warning: array index 1 is past the end of the array (which contains 1 element) [-Warray-bounds]
case 2: set->sig[1] = -1;
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
include/linux/compat.h:424:22: warning: array index 3 is past the end of the array (which contains 1 element) [-Warray-bounds]
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
include/linux/compat.h:424:10: warning: array index 7 is past the end of the array (which contains 2 elements) [-Warray-bounds]
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
^ ~
include/linux/compat.h:131:2: note: array 'sig' declared here
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
^
include/linux/compat.h:424:42: warning: array index 6 is past the end of the array (which contains 2 elements) [-Warray-bounds]
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
^ ~
include/linux/compat.h:131:2: note: array 'sig' declared here
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
^
include/linux/compat.h:424:53: warning: array index 3 is past the end of the array (which contains 1 element) [-Warray-bounds]
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
include/linux/compat.h:426:22: warning: array index 2 is past the end of the array (which contains 1 element) [-Warray-bounds]
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
include/linux/compat.h:426:10: warning: array index 5 is past the end of the array (which contains 2 elements) [-Warray-bounds]
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
^ ~
include/linux/compat.h:131:2: note: array 'sig' declared here
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
^
include/linux/compat.h:426:42: warning: array index 4 is past the end of the array (which contains 2 elements) [-Warray-bounds]
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
^ ~
include/linux/compat.h:131:2: note: array 'sig' declared here
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
^
include/linux/compat.h:426:53: warning: array index 2 is past the end of the array (which contains 1 element) [-Warray-bounds]
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
include/linux/compat.h:428:22: warning: array index 1 is past the end of the array (which contains 1 element) [-Warray-bounds]
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:14:
include/linux/compat.h:428:10: warning: array index 3 is past the end of the array (which contains 2 elements) [-Warray-bounds]
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
^ ~
include/linux/compat.h:131:2: note: array 'sig' declared here
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
^
include/linux/compat.h:428:42: warning: array index 2 is past the end of the array (which contains 2 elements) [-Warray-bounds]
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
^ ~
include/linux/compat.h:131:2: note: array 'sig' declared here
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
^
include/linux/compat.h:428:53: warning: array index 1 is past the end of the array (which contains 1 element) [-Warray-bounds]
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
^ ~
arch/powerpc/include/uapi/asm/signal.h:18:2: note: array 'sig' declared here
unsigned long sig[_NSIG_WORDS];
^
In file included from arch/powerpc/kernel/asm-offsets.c:21:
>> include/linux/mman.h:133:9: warning: division by zero is undefined [-Wdivision-by-zero]
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/mman.h:111:21: note: expanded from macro '_calc_vm_trans'
: ((x) & (bit1)) / ((bit1) / (bit2))))
^ ~~~~~~~~~~~~~~~~~
include/linux/mman.h:134:9: warning: division by zero is undefined [-Wdivision-by-zero]
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC );
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/mman.h:111:21: note: expanded from macro '_calc_vm_trans'
: ((x) & (bit1)) / ((bit1) / (bit2))))
^ ~~~~~~~~~~~~~~~~~
64 warnings and 3 errors generated.
make[2]: *** [scripts/Makefile.build:100: arch/powerpc/kernel/asm-offsets.s] Error 1
make[2]: Target '__build' not remade because of errors.
make[1]: *** [Makefile:1141: prepare0] Error 2
make[1]: Target 'prepare' not remade because of errors.
make: *** [Makefile:180: sub-make] Error 2
vim +321 arch/powerpc/include/asm/book3s/64/mmu-hash.h
319
320 extern bool torture_slb_enabled;
> 321 DECLARE_STATIC_KEY_FALSE(torture_slb_key);
322 static inline bool torture_slb(void)
323 {
> 324 return static_branch_unlikely(&torture_slb_key);
325 }
326
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 31814 bytes --]
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-05-06 1:36 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-05-04 9:06 [PATCH v2 1/2] powerpc/64s/hash: add torture_slb kernel boot option to increase SLB faults Nicholas Piggin
2020-05-04 9:06 ` [PATCH v2 2/2] powerpc/64s/hash: add torture_hpt kernel boot option to increase hash faults Nicholas Piggin
2020-05-06 1:32 ` [PATCH v2 1/2] powerpc/64s/hash: add torture_slb kernel boot option to increase SLB faults kbuild test robot
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).