* [PATCH 6.1.y 1/3] MIPS: Always record SEGBITS in cpu_data.vmbits
@ 2026-04-10 0:52 Maciej W. Rozycki
2026-04-10 0:52 ` [PATCH 6.1.y 2/3] MIPS: mm: Suppress TLB uniquification on EHINV hardware Maciej W. Rozycki
2026-04-10 0:52 ` [PATCH 6.1.y 3/3] MIPS: mm: Rewrite TLB uniquification for the hidden bit feature Maciej W. Rozycki
0 siblings, 2 replies; 3+ messages in thread
From: Maciej W. Rozycki @ 2026-04-10 0:52 UTC (permalink / raw)
To: stable; +Cc: Maciej W. Rozycki, Thomas Bogendoerfer
commit 8374c2cb83b95b3c92f129fd56527225c20a058c upstream.
With a 32-bit kernel running on 64-bit MIPS hardware the hardcoded value
of `cpu_vmbits' only records the size of compatibility useg and does not
reflect the size of native xuseg or the complete range of values allowed
in the VPN2 field of TLB entries.
An upcoming change will need the actual VPN2 value range permitted even
in 32-bit kernel configurations, so always include the `vmbits' member
in `struct cpuinfo_mips' and probe for SEGBITS when running on 64-bit
hardware and resorting to the currently hardcoded value of 31 on 32-bit
processors. No functional change for users of `cpu_vmbits'.
Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
---
arch/mips/include/asm/cpu-features.h | 1 -
arch/mips/include/asm/cpu-info.h | 2 --
arch/mips/include/asm/mipsregs.h | 2 ++
arch/mips/kernel/cpu-probe.c | 13 ++++++++-----
arch/mips/kernel/cpu-r3k-probe.c | 2 ++
5 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index e0a4da4cfd8b..53ea41be3735 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -484,7 +484,6 @@
# endif
# ifndef cpu_vmbits
# define cpu_vmbits cpu_data[0].vmbits
-# define __NEED_VMBITS_PROBE
# endif
#endif
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index a600670d00e9..1aee44124f11 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -80,9 +80,7 @@ struct cpuinfo_mips {
int srsets; /* Shadow register sets */
int package;/* physical package number */
unsigned int globalnumber;
-#ifdef CONFIG_64BIT
int vmbits; /* Virtual memory size in bits */
-#endif
void *data; /* Additional data */
unsigned int watch_reg_count; /* Number that exist */
unsigned int watch_reg_use_cnt; /* Usable by ptrace */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index c60e72917a28..581aa8876a74 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1714,6 +1714,8 @@ do { \
#define read_c0_entryhi() __read_ulong_c0_register($10, 0)
#define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
+#define read_c0_entryhi_64() __read_64bit_c0_register($10, 0)
+#define write_c0_entryhi_64(val) __write_64bit_c0_register($10, 0, val)
#define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
#define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index fdf00c228b67..09d95482957a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -208,11 +208,14 @@ static inline void set_elf_base_platform(const char *plat)
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
{
-#ifdef __NEED_VMBITS_PROBE
- write_c0_entryhi(0x3fffffffffffe000ULL);
- back_to_back_c0_hazard();
- c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
-#endif
+ int vmbits = 31;
+
+ if (cpu_has_64bits) {
+ write_c0_entryhi_64(0x3fffffffffffe000ULL);
+ back_to_back_c0_hazard();
+ vmbits = fls64(read_c0_entryhi_64() & 0x3fffffffffffe000ULL);
+ }
+ c->vmbits = vmbits;
}
static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
index be93469c0e0e..2adf95225aa7 100644
--- a/arch/mips/kernel/cpu-r3k-probe.c
+++ b/arch/mips/kernel/cpu-r3k-probe.c
@@ -138,6 +138,8 @@ void cpu_probe(void)
else
cpu_set_nofpu_opts(c);
+ c->vmbits = 31;
+
reserve_exception_space(0, 0x400);
}
--
2.20.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH 6.1.y 2/3] MIPS: mm: Suppress TLB uniquification on EHINV hardware
2026-04-10 0:52 [PATCH 6.1.y 1/3] MIPS: Always record SEGBITS in cpu_data.vmbits Maciej W. Rozycki
@ 2026-04-10 0:52 ` Maciej W. Rozycki
2026-04-10 0:52 ` [PATCH 6.1.y 3/3] MIPS: mm: Rewrite TLB uniquification for the hidden bit feature Maciej W. Rozycki
1 sibling, 0 replies; 3+ messages in thread
From: Maciej W. Rozycki @ 2026-04-10 0:52 UTC (permalink / raw)
To: stable; +Cc: Maciej W. Rozycki, Thomas Bogendoerfer
commit 74283cfe216392c7b776ebf6045b5b15ed9dffcd upstream.
Hardware that supports the EHINV feature, mandatory for R6 ISA and FTLB
implementation, lets software mark TLB entries invalid, which eliminates
the need to ensure no duplicate matching entries are ever created. This
feature is already used by local_flush_tlb_all(), via the UNIQUE_ENTRYHI
macro, making the preceding call to r4k_tlb_uniquify() superfluous.
The next change will also modify uniquification code such that it'll
become incompatible with the FTLB and MMID features, as well as MIPSr6
CPUs that do not implement 4KiB pages.
Therefore prevent r4k_tlb_uniquify() from being used on EHINV hardware,
as denoted by `cpu_has_tlbinv'.
Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
---
arch/mips/mm/tlb-r4k.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index d9631f3b6460..3669895a85bf 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -630,7 +630,8 @@ static void r4k_tlb_configure(void)
temp_tlb_entry = current_cpu_data.tlbsize - 1;
/* From this point on the ARC firmware is dead. */
- r4k_tlb_uniquify();
+ if (!cpu_has_tlbinv)
+ r4k_tlb_uniquify();
local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */
--
2.20.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH 6.1.y 3/3] MIPS: mm: Rewrite TLB uniquification for the hidden bit feature
2026-04-10 0:52 [PATCH 6.1.y 1/3] MIPS: Always record SEGBITS in cpu_data.vmbits Maciej W. Rozycki
2026-04-10 0:52 ` [PATCH 6.1.y 2/3] MIPS: mm: Suppress TLB uniquification on EHINV hardware Maciej W. Rozycki
@ 2026-04-10 0:52 ` Maciej W. Rozycki
1 sibling, 0 replies; 3+ messages in thread
From: Maciej W. Rozycki @ 2026-04-10 0:52 UTC (permalink / raw)
To: stable; +Cc: Maciej W. Rozycki, Thomas Bogendoerfer
commit 540760b77b8fc49d39d1b2b76196e5ec57711a32 upstream.
Before the introduction of the EHINV feature, which lets software mark
TLB entries invalid, certain older implementations of the MIPS ISA were
equipped with an analogous bit, as a vendor extension, which however is
hidden from software and only ever set at reset, and then any software
write clears it, making the intended TLB entry valid.
This feature makes it unsafe to read a TLB entry with TLBR, modify the
page mask, and write the entry back with TLBWI, because this operation
will implicitly clear the hidden bit and this may create a duplicate
entry, as with the presence of the hidden bit there is no guarantee all
the entries across the TLB are unique each.
Usually the firmware has already uniquified TLB entries before handing
control over, in which case we only need to guarantee at bootstrap no
clash will happen with the VPN2 values chosen in local_flush_tlb_all().
However with systems such as Mikrotik RB532 we get handed the TLB as at
reset, with the hidden bit set across the entries and possibly duplicate
entries present. This then causes a machine check exception when page
sizes are reset in r4k_tlb_uniquify() and prevents the system from
booting.
Rewrite the algorithm used in r4k_tlb_uniquify() then such as to avoid
the reuse of ASID/VPN values across the TLB. Get rid of global entries
first as they may be blocking the entire address space, e.g. 16 256MiB
pages will exhaust the whole address space of a 32-bit CPU and a single
big page can exhaust the 32-bit compatibility space on a 64-bit CPU.
Details of the algorithm chosen are given across the code itself.
Fixes: 9f048fa48740 ("MIPS: mm: Prevent a TLB shutdown on initial uniquification")
Signed-off-by: Maciej W. Rozycki <macro@orcam.me.uk>
Cc: stable@vger.kernel.org # v6.18+
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
---
arch/mips/mm/tlb-r4k.c | 282 +++++++++++++++++++++++++++++++++--------
1 file changed, 228 insertions(+), 54 deletions(-)
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 3669895a85bf..8d111f0a5296 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/memblock.h>
+#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/export.h>
@@ -24,6 +25,7 @@
#include <asm/hazards.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
+#include <asm/tlbdebug.h>
#include <asm/tlbmisc.h>
extern void build_tlb_refill_handler(void);
@@ -501,87 +503,259 @@ static int __init set_ntlb(char *str)
__setup("ntlb=", set_ntlb);
-/* Comparison function for EntryHi VPN fields. */
-static int r4k_vpn_cmp(const void *a, const void *b)
+/* The start bit position of VPN2 and Mask in EntryHi/PageMask registers. */
+#define VPN2_SHIFT 13
+
+/* Read full EntryHi even with CONFIG_32BIT. */
+static inline unsigned long long read_c0_entryhi_native(void)
+{
+ return cpu_has_64bits ? read_c0_entryhi_64() : read_c0_entryhi();
+}
+
+/* Write full EntryHi even with CONFIG_32BIT. */
+static inline void write_c0_entryhi_native(unsigned long long v)
{
- long v = *(unsigned long *)a - *(unsigned long *)b;
- int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
- return s ? (v != 0) | v >> s : v;
+ if (cpu_has_64bits)
+ write_c0_entryhi_64(v);
+ else
+ write_c0_entryhi(v);
}
+/* TLB entry state for uniquification. */
+struct tlbent {
+ unsigned long long wired:1;
+ unsigned long long global:1;
+ unsigned long long asid:10;
+ unsigned long long vpn:51;
+ unsigned long long pagesz:5;
+ unsigned long long index:14;
+};
+
/*
- * Initialise all TLB entries with unique values that do not clash with
- * what we have been handed over and what we'll be using ourselves.
+ * Comparison function for TLB entry sorting. Place wired entries first,
+ * then global entries, then order by the increasing VPN/ASID and the
+ * decreasing page size. This lets us avoid clashes with wired entries
+ * easily and get entries for larger pages out of the way first.
+ *
+ * We could group bits so as to reduce the number of comparisons, but this
+ * is seldom executed and not performance-critical, so prefer legibility.
*/
-static void __ref r4k_tlb_uniquify(void)
+static int r4k_entry_cmp(const void *a, const void *b)
{
- int tlbsize = current_cpu_data.tlbsize;
- bool use_slab = slab_is_available();
- int start = num_wired_entries();
- phys_addr_t tlb_vpn_size;
- unsigned long *tlb_vpns;
- unsigned long vpn_mask;
- int cnt, ent, idx, i;
-
- vpn_mask = GENMASK(cpu_vmbits - 1, 13);
- vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
+ struct tlbent ea = *(struct tlbent *)a, eb = *(struct tlbent *)b;
+
+ if (ea.wired > eb.wired)
+ return -1;
+ else if (ea.wired < eb.wired)
+ return 1;
+ else if (ea.global > eb.global)
+ return -1;
+ else if (ea.global < eb.global)
+ return 1;
+ else if (ea.vpn < eb.vpn)
+ return -1;
+ else if (ea.vpn > eb.vpn)
+ return 1;
+ else if (ea.asid < eb.asid)
+ return -1;
+ else if (ea.asid > eb.asid)
+ return 1;
+ else if (ea.pagesz > eb.pagesz)
+ return -1;
+ else if (ea.pagesz < eb.pagesz)
+ return 1;
+ else
+ return 0;
+}
- tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
- tlb_vpns = (use_slab ?
- kmalloc(tlb_vpn_size, GFP_ATOMIC) :
- memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
- if (WARN_ON(!tlb_vpns))
- return; /* Pray local_flush_tlb_all() is good enough. */
+/*
+ * Fetch all the TLB entries. Mask individual VPN values retrieved with
+ * the corresponding page mask and ignoring any 1KiB extension as we'll
+ * be using 4KiB pages for uniquification.
+ */
+static void __ref r4k_tlb_uniquify_read(struct tlbent *tlb_vpns, int tlbsize)
+{
+ int start = num_wired_entries();
+ unsigned long long vpn_mask;
+ bool global;
+ int i;
- htw_stop();
+ vpn_mask = GENMASK(current_cpu_data.vmbits - 1, VPN2_SHIFT);
+ vpn_mask |= cpu_has_64bits ? 3ULL << 62 : 1 << 31;
- for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
- unsigned long vpn;
+ for (i = 0; i < tlbsize; i++) {
+ unsigned long long entryhi, vpn, mask, asid;
+ unsigned int pagesz;
write_c0_index(i);
mtc0_tlbr_hazard();
tlb_read();
tlb_read_hazard();
- vpn = read_c0_entryhi();
- vpn &= vpn_mask & PAGE_MASK;
- tlb_vpns[cnt] = vpn;
- /* Prevent any large pages from overlapping regular ones. */
- write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- tlbw_use_hazard();
+ global = !!(read_c0_entrylo0() & ENTRYLO_G);
+ entryhi = read_c0_entryhi_native();
+ mask = read_c0_pagemask();
+
+ asid = entryhi & cpu_asid_mask(¤t_cpu_data);
+ vpn = (entryhi & vpn_mask & ~mask) >> VPN2_SHIFT;
+ pagesz = ilog2((mask >> VPN2_SHIFT) + 1);
+
+ tlb_vpns[i].global = global;
+ tlb_vpns[i].asid = global ? 0 : asid;
+ tlb_vpns[i].vpn = vpn;
+ tlb_vpns[i].pagesz = pagesz;
+ tlb_vpns[i].wired = i < start;
+ tlb_vpns[i].index = i;
}
+}
- sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
+/*
+ * Write unique values to all but the wired TLB entries each, using
+ * the 4KiB page size. This size might not be supported with R6, but
+ * EHINV is mandatory for R6, so we won't ever be called in that case.
+ *
+ * A sorted table is supplied with any wired entries at the beginning,
+ * followed by any global entries, and then finally regular entries.
+ * We start at the VPN and ASID values of zero and only assign user
+ * addresses, therefore guaranteeing no clash with addresses produced
+ * by UNIQUE_ENTRYHI. We avoid any VPN values used by wired or global
+ * entries, by increasing the VPN value beyond the span of such entry.
+ *
+ * When a VPN/ASID clash is found with a regular entry we increment the
+ * ASID instead until no VPN/ASID clash has been found or the ASID space
+ * has been exhausted, in which case we increase the VPN value beyond
+ * the span of the largest clashing entry.
+ *
+ * We do not need to be concerned about FTLB or MMID configurations as
+ * those are required to implement the EHINV feature.
+ */
+static void __ref r4k_tlb_uniquify_write(struct tlbent *tlb_vpns, int tlbsize)
+{
+ unsigned long long asid, vpn, vpn_size, pagesz;
+ int widx, gidx, idx, sidx, lidx, i;
- write_c0_pagemask(PM_DEFAULT_MASK);
+ vpn_size = 1ULL << (current_cpu_data.vmbits - VPN2_SHIFT);
+ pagesz = ilog2((PM_4K >> VPN2_SHIFT) + 1);
+
+ write_c0_pagemask(PM_4K);
write_c0_entrylo0(0);
write_c0_entrylo1(0);
- idx = 0;
- ent = tlbsize;
- for (i = start; i < tlbsize; i++)
- while (1) {
- unsigned long entryhi, vpn;
+ asid = 0;
+ vpn = 0;
+ widx = 0;
+ gidx = 0;
+ for (sidx = 0; sidx < tlbsize && tlb_vpns[sidx].wired; sidx++)
+ ;
+ for (lidx = sidx; lidx < tlbsize && tlb_vpns[lidx].global; lidx++)
+ ;
+ idx = gidx = sidx + 1;
+ for (i = sidx; i < tlbsize; i++) {
+ unsigned long long entryhi, vpn_pagesz = 0;
- entryhi = UNIQUE_ENTRYHI(ent);
- vpn = entryhi & vpn_mask & PAGE_MASK;
+ while (1) {
+ if (WARN_ON(vpn >= vpn_size)) {
+ dump_tlb_all();
+ /* Pray local_flush_tlb_all() will cope. */
+ return;
+ }
- if (idx >= cnt || vpn < tlb_vpns[idx]) {
- write_c0_entryhi(entryhi);
- write_c0_index(i);
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- ent++;
- break;
- } else if (vpn == tlb_vpns[idx]) {
- ent++;
- } else {
+ /* VPN must be below the next wired entry. */
+ if (widx < sidx && vpn >= tlb_vpns[widx].vpn) {
+ vpn = max(vpn,
+ (tlb_vpns[widx].vpn +
+ (1ULL << tlb_vpns[widx].pagesz)));
+ asid = 0;
+ widx++;
+ continue;
+ }
+ /* VPN must be below the next global entry. */
+ if (gidx < lidx && vpn >= tlb_vpns[gidx].vpn) {
+ vpn = max(vpn,
+ (tlb_vpns[gidx].vpn +
+ (1ULL << tlb_vpns[gidx].pagesz)));
+ asid = 0;
+ gidx++;
+ continue;
+ }
+ /* Try to find a free ASID so as to conserve VPNs. */
+ if (idx < tlbsize && vpn == tlb_vpns[idx].vpn &&
+ asid == tlb_vpns[idx].asid) {
+ unsigned long long idx_pagesz;
+
+ idx_pagesz = tlb_vpns[idx].pagesz;
+ vpn_pagesz = max(vpn_pagesz, idx_pagesz);
+ do
+ idx++;
+ while (idx < tlbsize &&
+ vpn == tlb_vpns[idx].vpn &&
+ asid == tlb_vpns[idx].asid);
+ asid++;
+ if (asid > cpu_asid_mask(¤t_cpu_data)) {
+ vpn += vpn_pagesz;
+ asid = 0;
+ vpn_pagesz = 0;
+ }
+ continue;
+ }
+ /* VPN mustn't be above the next regular entry. */
+ if (idx < tlbsize && vpn > tlb_vpns[idx].vpn) {
+ vpn = max(vpn,
+ (tlb_vpns[idx].vpn +
+ (1ULL << tlb_vpns[idx].pagesz)));
+ asid = 0;
idx++;
+ continue;
}
+ break;
}
+ entryhi = (vpn << VPN2_SHIFT) | asid;
+ write_c0_entryhi_native(entryhi);
+ write_c0_index(tlb_vpns[i].index);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+
+ tlb_vpns[i].asid = asid;
+ tlb_vpns[i].vpn = vpn;
+ tlb_vpns[i].pagesz = pagesz;
+
+ asid++;
+ if (asid > cpu_asid_mask(¤t_cpu_data)) {
+ vpn += 1ULL << pagesz;
+ asid = 0;
+ }
+ }
+}
+
+/*
+ * Initialise all TLB entries with unique values that do not clash with
+ * what we have been handed over and what we'll be using ourselves.
+ */
+static void __ref r4k_tlb_uniquify(void)
+{
+ int tlbsize = current_cpu_data.tlbsize;
+ bool use_slab = slab_is_available();
+ phys_addr_t tlb_vpn_size;
+ struct tlbent *tlb_vpns;
+
+ tlb_vpn_size = tlbsize * sizeof(*tlb_vpns);
+ tlb_vpns = (use_slab ?
+ kmalloc(tlb_vpn_size, GFP_ATOMIC) :
+ memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns)));
+ if (WARN_ON(!tlb_vpns))
+ return; /* Pray local_flush_tlb_all() is good enough. */
+
+ htw_stop();
+
+ r4k_tlb_uniquify_read(tlb_vpns, tlbsize);
+
+ sort(tlb_vpns, tlbsize, sizeof(*tlb_vpns), r4k_entry_cmp, NULL);
+
+ r4k_tlb_uniquify_write(tlb_vpns, tlbsize);
+
+ write_c0_pagemask(PM_DEFAULT_MASK);
+
tlbw_use_hazard();
htw_start();
flush_micro_tlb();
--
2.20.1
^ permalink raw reply related [flat|nested] 3+ messages in thread
end of thread, other threads:[~2026-04-10 0:53 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-10 0:52 [PATCH 6.1.y 1/3] MIPS: Always record SEGBITS in cpu_data.vmbits Maciej W. Rozycki
2026-04-10 0:52 ` [PATCH 6.1.y 2/3] MIPS: mm: Suppress TLB uniquification on EHINV hardware Maciej W. Rozycki
2026-04-10 0:52 ` [PATCH 6.1.y 3/3] MIPS: mm: Rewrite TLB uniquification for the hidden bit feature Maciej W. Rozycki
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox