diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 19e46fd623b0..ec989e1011f0 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -126,7 +126,9 @@ void __init btext_setup_display(int width, int height, int depth, int pitch, void __init btext_unmap(void) { + pr_info("%s:%d\n", __func__, __LINE__); boot_text_mapped = 0; + pr_info("%s:%d\n", __func__, __LINE__); } /* Here's a small text engine to use during early boot diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index eeff136b83d9..341a0635e131 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -198,8 +198,6 @@ static unsigned long oops_begin(struct pt_regs *regs) die_owner = cpu; console_verbose(); bust_spinlocks(1); - if (machine_is(powermac)) - pmac_backlight_unblank(); return flags; } NOKPROBE_SYMBOL(oops_begin); diff --git a/arch/powerpc/mm/book3s32/kuap.c b/arch/powerpc/mm/book3s32/kuap.c index 3a8815555a48..79b9fb0adfb4 100644 --- a/arch/powerpc/mm/book3s32/kuap.c +++ b/arch/powerpc/mm/book3s32/kuap.c @@ -5,18 +5,23 @@ void setup_kuap(bool disabled) { + pr_info("%s:%d\n", __func__, __LINE__); if (!disabled) { + pr_info("%s:%d\n", __func__, __LINE__); update_user_segments(mfsr(0) | SR_KS); isync(); /* Context sync required after mtsr() */ init_mm.context.sr0 |= SR_KS; current->thread.sr0 |= SR_KS; } + pr_info("%s:%d\n", __func__, __LINE__); if (smp_processor_id() != boot_cpuid) return; + pr_info("%s:%d\n", __func__, __LINE__); if (disabled) cur_cpu_spec->mmu_features &= ~MMU_FTR_KUAP; else pr_info("Activating Kernel Userspace Access Protection\n"); + pr_info("%s:%d\n", __func__, __LINE__); } diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 850783cfa9c7..65a3bb4213cd 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -86,6 +86,7 @@ int __init find_free_bat(void) if (!(bat[1].batu & 3)) return b; } + pr_err("NO FREE BAT (%d)\n", n); return -1; } @@ -143,6 +144,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to { int idx; + pr_info("%s:%d %lx %lx\n", __func__, __LINE__, base, top); while ((idx = find_free_bat()) != -1 && base != top) { unsigned int size = bat_block_size(base, top); @@ -151,6 +153,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X); base += size; } + pr_info("%s:%d %lx\n", __func__, __LINE__, base); return base; } @@ -164,6 +167,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET); setibat(0, PAGE_OFFSET, 0, size, PAGE_KERNEL_X); + pr_info("%s:%d %lx %lx %lx %lx\n", __func__, __LINE__, base, top, border, size); if (debug_pagealloc_enabled_or_kfence()) { pr_debug_once("Read-Write memory mapped without BATs\n"); if (base >= border) diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index d8adc452f431..54a45eced552 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -117,11 +117,6 @@ void __init MMU_init(void) if (ppc_md.progress) ppc_md.progress("MMU:exit", 0x211); - /* From now on, btext is no longer BAT mapped if it was at all */ -#ifdef CONFIG_BOOTX_TEXT - btext_unmap(); -#endif - kasan_mmu_init(); setup_kup(); @@ -130,4 +125,9 @@ void __init MMU_init(void) /* Shortly after that, the entire linear mapping will be available */ memblock_set_current_limit(lowmem_end_addr); + + /* From now on, btext is no longer BAT mapped if it was at all */ +#ifdef CONFIG_BOOTX_TEXT + btext_map(); +#endif } diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c index 450a67ef0bbe..e04f21908c6a 100644 --- a/arch/powerpc/mm/kasan/book3s_32.c +++ b/arch/powerpc/mm/kasan/book3s_32.c @@ -15,6 +15,7 @@ int __init kasan_init_region(void *start, size_t size) phys_addr_t phys; int ret; + pr_err("%s: %px %x %lx %lx\n", __func__, start, size, k_start, k_end); while (k_nobat < k_end) { unsigned int k_size = bat_block_size(k_nobat, k_end); int idx = find_free_bat(); @@ -28,6 +29,7 @@ int __init kasan_init_region(void *start, size_t size) if (!phys) break; + pr_err("%s: setbat %d %lx %x %x\n", __func__, idx, k_nobat, phys, k_size); setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL); k_nobat += k_size; } @@ -36,7 +38,7 @@ int __init kasan_init_region(void *start, size_t size) if (k_nobat < k_end) { phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0, - MEMBLOCK_ALLOC_ANYWHERE); + MEMBLOCK_ALLOC_ACCESSIBLE); if (!phys) return -ENOMEM; } @@ -47,6 +49,7 @@ int __init kasan_init_region(void *start, size_t size) kasan_update_early_region(k_start, k_nobat, __pte(0)); + pr_err("%s: loop %lx %lx\n", __func__, k_nobat, k_end); for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_off_k(k_cur); pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL); diff --git a/arch/powerpc/mm/kasan/init_32.c b/arch/powerpc/mm/kasan/init_32.c index a70828a6d935..d734e0e74942 100644 --- a/arch/powerpc/mm/kasan/init_32.c +++ b/arch/powerpc/mm/kasan/init_32.c @@ -84,6 +84,9 @@ kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte) { unsigned long k_cur; + if (k_start == k_end) + return; + for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_off_k(k_cur); pte_t *ptep = pte_offset_kernel(pmd, k_cur); @@ -126,12 +129,15 @@ void __init kasan_mmu_init(void) { int ret; + pr_info("%s:%d\n", __func__, __LINE__); if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) { ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); + pr_info("%s:%d %d\n", __func__, __LINE__, ret); if (ret) panic("kasan: kasan_init_shadow_page_tables() failed"); } + pr_info("%s:%d\n", __func__, __LINE__); } void __init kasan_init(void) diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 5c02fd08d61e..d75084f67d9b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -104,6 +104,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) phys_addr_t p; bool ktext; + pr_info("%s:%d %lx %lx\n", __func__, __LINE__, offset, top); s = offset; v = PAGE_OFFSET + s; p = memstart_addr + s; @@ -113,6 +114,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) v += PAGE_SIZE; p += PAGE_SIZE; } + pr_info("%s:%d\n", __func__, __LINE__); } void __init mapin_ram(void) @@ -120,6 +122,7 @@ void __init mapin_ram(void) phys_addr_t base, end; u64 i; + pr_info("%s:%d\n", __func__, __LINE__); for_each_mem_range(i, &base, &end) { phys_addr_t top = min(end, total_lowmem); @@ -128,6 +131,7 @@ void __init mapin_ram(void) base = mmu_mapin_ram(base, top); __mapin_ram_chunk(base, top); } + pr_info("%s:%d\n", __func__, __LINE__); } void mark_initmem_nx(void)