Linux-ARM-Kernel Archive on lore.kernel.org
 help / color / mirror / Atom feed
* [RFC PATCH] arm64: Bring back linear map randomization using PArange override
@ 2025-12-11  4:09 Ard Biesheuvel
  2025-12-16 18:13 ` Seth Jenkins
  2026-01-19 15:18 ` Will Deacon
  0 siblings, 2 replies; 5+ messages in thread
From: Ard Biesheuvel @ 2025-12-11  4:09 UTC (permalink / raw)
  To: linux-arm-kernel
  Cc: linux-hardening, mark.rutland, catalin.marinas, will, kees,
	Ard Biesheuvel, Liz Prucka, Seth Jenkins

Commit

  1db780bafa4ce ("arm64/mm: Remove randomization of the linear map")

removed linear map randomization from the arm64 port, on the basis that
a prior change to the logic rendered it non-functional on the majority
of relevant CPU implementations.

As has been reported numerous times now, the upshot of this is that the
virtual addresses of statically allocated kernel data structures are
highly predictable if the kernel is loaded at a known physical address.
Any bootloader that still adheres to the original arm64 boot protocol,
which stipulated that the kernel should be loaded at the lowest
available physical address, is affected by this.

So bring back the most recent version of linear map randomization, which
is based on the CPU's physical address range, but this time, allow this
PA range to be overridden on the kernel command line.

E.g., by passing

  id_aa64mmfr0.parange=1 # 36 bits
  id_aa64mmfr0.parange=2 # 40 bits

the CPU's supported physical range can be reduced to the point where
linear map randomization becomes feasible again. It also means that
nothing else is permitted to appear in that physical window, i.e.,
hotplug memory but also non-memory peripherals, or stage-2 mappings on
behalf of KVM guests.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
Cc: Liz Prucka <lizprucka@google.com>
Cc: Seth Jenkins <sethjenkins@google.com>

This is posted as an RFC because there are obvious shortcomings to this
approach. However, before I spend more time on this, I'd like to gauge
if there is any consensus that bringing this back is a good idea.

 arch/arm64/include/asm/cpufeature.h   | 13 +++++++++++++
 arch/arm64/kernel/image-vars.h        |  1 +
 arch/arm64/kernel/kaslr.c             |  2 ++
 arch/arm64/kernel/pi/idreg-override.c |  1 +
 arch/arm64/kernel/pi/kaslr_early.c    |  4 ++++
 arch/arm64/mm/init.c                  | 16 ++++++++++++++++
 6 files changed, 37 insertions(+)

diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 4de51f8d92cb..fdb1331c406d 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -1078,6 +1078,19 @@ static inline bool cpu_has_lpa2(void)
 #endif
 }
 
+static inline u64 cpu_get_phys_range(void)
+{
+	u64 mmfr0 = read_sysreg(id_aa64mmfr0_el1);
+
+	mmfr0 &= ~id_aa64mmfr0_override.mask;
+	mmfr0 |= id_aa64mmfr0_override.val;
+
+	int parange = cpuid_feature_extract_unsigned_field(mmfr0,
+						ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+
+	return BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
+}
+
 #endif /* __ASSEMBLER__ */
 
 #endif
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 85bc629270bd..263543ad6155 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -58,6 +58,7 @@ PI_EXPORT_SYM(id_aa64zfr0_override);
 PI_EXPORT_SYM(arm64_sw_feature_override);
 PI_EXPORT_SYM(arm64_use_ng_mappings);
 PI_EXPORT_SYM(_ctype);
+PI_EXPORT_SYM(memstart_offset_seed);
 
 PI_EXPORT_SYM(swapper_pg_dir);
 
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index c9503ed45a6c..1da3e25f9d9e 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -10,6 +10,8 @@
 #include <asm/cpufeature.h>
 #include <asm/memory.h>
 
+u16 __initdata memstart_offset_seed;
+
 bool __ro_after_init __kaslr_is_enabled = false;
 
 void __init kaslr_init(void)
diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
index bc57b290e5e7..a8351ba70300 100644
--- a/arch/arm64/kernel/pi/idreg-override.c
+++ b/arch/arm64/kernel/pi/idreg-override.c
@@ -43,6 +43,7 @@ static const struct ftr_set_desc mmfr0 __prel64_initconst = {
 	.override	= &id_aa64mmfr0_override,
 	.fields		= {
 		FIELD("ecv", ID_AA64MMFR0_EL1_ECV_SHIFT, NULL),
+		FIELD("parange", ID_AA64MMFR0_EL1_PARANGE_SHIFT, NULL),
 		{}
 	},
 };
diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c
index e0e018046a46..0257b43819db 100644
--- a/arch/arm64/kernel/pi/kaslr_early.c
+++ b/arch/arm64/kernel/pi/kaslr_early.c
@@ -18,6 +18,8 @@
 
 #include "pi.h"
 
+extern u16 memstart_offset_seed;
+
 static u64 __init get_kaslr_seed(void *fdt, int node)
 {
 	static char const seed_str[] __initconst = "kaslr-seed";
@@ -51,6 +53,8 @@ u64 __init kaslr_early_init(void *fdt, int chosen)
 			return 0;
 	}
 
+	memstart_offset_seed = seed & U16_MAX;
+
 	/*
 	 * OK, so we are proceeding with KASLR enabled. Calculate a suitable
 	 * kernel image offset from the seed. Let's place the kernel in the
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 524d34a0e921..6c55eca6ccad 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -275,6 +275,22 @@ void __init arm64_memblock_init(void)
 		}
 	}
 
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+		extern u16 memstart_offset_seed;
+		s64 range = linear_region_size - cpu_get_phys_range();
+
+		/*
+		 * If the size of the linear region exceeds, by a sufficient
+		 * margin, the size of the region that the physical memory can
+		 * span, randomize the linear region as well.
+		 */
+		if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
+			range /= ARM64_MEMSTART_ALIGN;
+			memstart_addr -= ARM64_MEMSTART_ALIGN *
+					 ((range * memstart_offset_seed) >> 16);
+		}
+	}
+
 	/*
 	 * Register the kernel text, kernel data, initrd, and initial
 	 * pagetables with memblock.
-- 
2.47.3



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [RFC PATCH] arm64: Bring back linear map randomization using PArange override
  2025-12-11  4:09 [RFC PATCH] arm64: Bring back linear map randomization using PArange override Ard Biesheuvel
@ 2025-12-16 18:13 ` Seth Jenkins
  2026-01-08 13:33   ` Ard Biesheuvel
  2026-01-19 15:18 ` Will Deacon
  1 sibling, 1 reply; 5+ messages in thread
From: Seth Jenkins @ 2025-12-16 18:13 UTC (permalink / raw)
  To: Ard Biesheuvel
  Cc: linux-arm-kernel, linux-hardening, mark.rutland, catalin.marinas,
	will, kees, Liz Prucka

> This is posted as an RFC because there are obvious shortcomings to this
> approach. However, before I spend more time on this, I'd like to gauge
> if there is any consensus that bringing this back is a good idea.
I may be a minority, but I do think bringing this back is a good idea.
It has now been 9+ years since hardware side channels for bypassing
KASLR were discovered, and it still appears that real-world attackers
overwhelmingly prefer alternative strategies - such as this one.

I'm under no illusion that if we just do this one thing KASLR will be
secure again, but I think this patch would still have a meaningful
real-world impact in terms of making exploits significantly harder to
write.


On Wed, Dec 10, 2025 at 11:10 PM Ard Biesheuvel <ardb@kernel.org> wrote:
>
> Commit
>
>   1db780bafa4ce ("arm64/mm: Remove randomization of the linear map")
>
> removed linear map randomization from the arm64 port, on the basis that
> a prior change to the logic rendered it non-functional on the majority
> of relevant CPU implementations.
>
> As has been reported numerous times now, the upshot of this is that the
> virtual addresses of statically allocated kernel data structures are
> highly predictable if the kernel is loaded at a known physical address.
> Any bootloader that still adheres to the original arm64 boot protocol,
> which stipulated that the kernel should be loaded at the lowest
> available physical address, is affected by this.
>
> So bring back the most recent version of linear map randomization, which
> is based on the CPU's physical address range, but this time, allow this
> PA range to be overridden on the kernel command line.
>
> E.g., by passing
>
>   id_aa64mmfr0.parange=1 # 36 bits
>   id_aa64mmfr0.parange=2 # 40 bits
>
> the CPU's supported physical range can be reduced to the point where
> linear map randomization becomes feasible again. It also means that
> nothing else is permitted to appear in that physical window, i.e.,
> hotplug memory but also non-memory peripherals, or stage-2 mappings on
> behalf of KVM guests.
>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> Cc: Liz Prucka <lizprucka@google.com>
> Cc: Seth Jenkins <sethjenkins@google.com>
>
> This is posted as an RFC because there are obvious shortcomings to this
> approach. However, before I spend more time on this, I'd like to gauge
> if there is any consensus that bringing this back is a good idea.
>
>  arch/arm64/include/asm/cpufeature.h   | 13 +++++++++++++
>  arch/arm64/kernel/image-vars.h        |  1 +
>  arch/arm64/kernel/kaslr.c             |  2 ++
>  arch/arm64/kernel/pi/idreg-override.c |  1 +
>  arch/arm64/kernel/pi/kaslr_early.c    |  4 ++++
>  arch/arm64/mm/init.c                  | 16 ++++++++++++++++
>  6 files changed, 37 insertions(+)
>
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index 4de51f8d92cb..fdb1331c406d 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -1078,6 +1078,19 @@ static inline bool cpu_has_lpa2(void)
>  #endif
>  }
>
> +static inline u64 cpu_get_phys_range(void)
> +{
> +       u64 mmfr0 = read_sysreg(id_aa64mmfr0_el1);
> +
> +       mmfr0 &= ~id_aa64mmfr0_override.mask;
> +       mmfr0 |= id_aa64mmfr0_override.val;
> +
> +       int parange = cpuid_feature_extract_unsigned_field(mmfr0,
> +                                               ID_AA64MMFR0_EL1_PARANGE_SHIFT);
> +
> +       return BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
> +}
> +
>  #endif /* __ASSEMBLER__ */
>
>  #endif
> diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
> index 85bc629270bd..263543ad6155 100644
> --- a/arch/arm64/kernel/image-vars.h
> +++ b/arch/arm64/kernel/image-vars.h
> @@ -58,6 +58,7 @@ PI_EXPORT_SYM(id_aa64zfr0_override);
>  PI_EXPORT_SYM(arm64_sw_feature_override);
>  PI_EXPORT_SYM(arm64_use_ng_mappings);
>  PI_EXPORT_SYM(_ctype);
> +PI_EXPORT_SYM(memstart_offset_seed);
>
>  PI_EXPORT_SYM(swapper_pg_dir);
>
> diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
> index c9503ed45a6c..1da3e25f9d9e 100644
> --- a/arch/arm64/kernel/kaslr.c
> +++ b/arch/arm64/kernel/kaslr.c
> @@ -10,6 +10,8 @@
>  #include <asm/cpufeature.h>
>  #include <asm/memory.h>
>
> +u16 __initdata memstart_offset_seed;
> +
>  bool __ro_after_init __kaslr_is_enabled = false;
>
>  void __init kaslr_init(void)
> diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
> index bc57b290e5e7..a8351ba70300 100644
> --- a/arch/arm64/kernel/pi/idreg-override.c
> +++ b/arch/arm64/kernel/pi/idreg-override.c
> @@ -43,6 +43,7 @@ static const struct ftr_set_desc mmfr0 __prel64_initconst = {
>         .override       = &id_aa64mmfr0_override,
>         .fields         = {
>                 FIELD("ecv", ID_AA64MMFR0_EL1_ECV_SHIFT, NULL),
> +               FIELD("parange", ID_AA64MMFR0_EL1_PARANGE_SHIFT, NULL),
>                 {}
>         },
>  };
> diff --git a/arch/arm64/kernel/pi/kaslr_early.c b/arch/arm64/kernel/pi/kaslr_early.c
> index e0e018046a46..0257b43819db 100644
> --- a/arch/arm64/kernel/pi/kaslr_early.c
> +++ b/arch/arm64/kernel/pi/kaslr_early.c
> @@ -18,6 +18,8 @@
>
>  #include "pi.h"
>
> +extern u16 memstart_offset_seed;
> +
>  static u64 __init get_kaslr_seed(void *fdt, int node)
>  {
>         static char const seed_str[] __initconst = "kaslr-seed";
> @@ -51,6 +53,8 @@ u64 __init kaslr_early_init(void *fdt, int chosen)
>                         return 0;
>         }
>
> +       memstart_offset_seed = seed & U16_MAX;
> +
>         /*
>          * OK, so we are proceeding with KASLR enabled. Calculate a suitable
>          * kernel image offset from the seed. Let's place the kernel in the
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 524d34a0e921..6c55eca6ccad 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -275,6 +275,22 @@ void __init arm64_memblock_init(void)
>                 }
>         }
>
> +       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
> +               extern u16 memstart_offset_seed;
> +               s64 range = linear_region_size - cpu_get_phys_range();
> +
> +               /*
> +                * If the size of the linear region exceeds, by a sufficient
> +                * margin, the size of the region that the physical memory can
> +                * span, randomize the linear region as well.
> +                */
> +               if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
> +                       range /= ARM64_MEMSTART_ALIGN;
> +                       memstart_addr -= ARM64_MEMSTART_ALIGN *
> +                                        ((range * memstart_offset_seed) >> 16);
> +               }
> +       }
> +
>         /*
>          * Register the kernel text, kernel data, initrd, and initial
>          * pagetables with memblock.
> --
> 2.47.3
>


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC PATCH] arm64: Bring back linear map randomization using PArange override
  2025-12-16 18:13 ` Seth Jenkins
@ 2026-01-08 13:33   ` Ard Biesheuvel
  2026-01-08 17:59     ` Seth Jenkins
  0 siblings, 1 reply; 5+ messages in thread
From: Ard Biesheuvel @ 2026-01-08 13:33 UTC (permalink / raw)
  To: Seth Jenkins
  Cc: linux-arm-kernel, linux-hardening, mark.rutland, catalin.marinas,
	will, kees, Liz Prucka

On Tue, 16 Dec 2025 at 19:14, Seth Jenkins <sethjenkins@google.com> wrote:
>
> > This is posted as an RFC because there are obvious shortcomings to this
> > approach. However, before I spend more time on this, I'd like to gauge
> > if there is any consensus that bringing this back is a good idea.
> I may be a minority, but I do think bringing this back is a good idea.
> It has now been 9+ years since hardware side channels for bypassing
> KASLR were discovered, and it still appears that real-world attackers
> overwhelmingly prefer alternative strategies - such as this one.
>
> I'm under no illusion that if we just do this one thing KASLR will be
> secure again, but I think this patch would still have a meaningful
> real-world impact in terms of making exploits significantly harder to
> write.
>

Thanks for the feedback.

I was hoping to get some other views on this matter, but sending the
patch during plumbers and right before the holidays probably didn't
help in that regard, I'm afraid.

Any thoughts on the desirability of this feature wrt randomization of
the placement of vmlinux in physical memory? AIUI, the main issue
being addressed here is predictable placement of statically allocated
kernel objects in the virtual address space, which could be solved in
either way. Or are there other concerns here?

Just trying to elicit as much input as possible on the list, so the
maintainers can make an informed decision.

Thanks,
Ard.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC PATCH] arm64: Bring back linear map randomization using PArange override
  2026-01-08 13:33   ` Ard Biesheuvel
@ 2026-01-08 17:59     ` Seth Jenkins
  0 siblings, 0 replies; 5+ messages in thread
From: Seth Jenkins @ 2026-01-08 17:59 UTC (permalink / raw)
  To: Ard Biesheuvel
  Cc: linux-arm-kernel, linux-hardening, mark.rutland, catalin.marinas,
	will, kees, Liz Prucka

> Any thoughts on the desirability of this feature wrt randomization of
the placement of vmlinux in physical memory?
Even with vmlinux randomization in physical memory, dynamically
allocated memory ALSO ends up getting predictably placed. If I
allocate basically all physical memory, e.g. with mmap, I can predict
with >90% accuracy a PFN that was allocated by that mmap with the
caveat that my testing was soon after boot so that reliability may go
down over time in a real world scenario.

So I think linear map randomization is still relevant in making
real-world exploitation more difficult even if vmlinux is randomized
in physical memory.


On Thu, Jan 8, 2026 at 8:33 AM Ard Biesheuvel <ardb@kernel.org> wrote:
>
> On Tue, 16 Dec 2025 at 19:14, Seth Jenkins <sethjenkins@google.com> wrote:
> >
> > > This is posted as an RFC because there are obvious shortcomings to this
> > > approach. However, before I spend more time on this, I'd like to gauge
> > > if there is any consensus that bringing this back is a good idea.
> > I may be a minority, but I do think bringing this back is a good idea.
> > It has now been 9+ years since hardware side channels for bypassing
> > KASLR were discovered, and it still appears that real-world attackers
> > overwhelmingly prefer alternative strategies - such as this one.
> >
> > I'm under no illusion that if we just do this one thing KASLR will be
> > secure again, but I think this patch would still have a meaningful
> > real-world impact in terms of making exploits significantly harder to
> > write.
> >
>
> Thanks for the feedback.
>
> I was hoping to get some other views on this matter, but sending the
> patch during plumbers and right before the holidays probably didn't
> help in that regard, I'm afraid.
>
> Any thoughts on the desirability of this feature wrt randomization of
> the placement of vmlinux in physical memory? AIUI, the main issue
> being addressed here is predictable placement of statically allocated
> kernel objects in the virtual address space, which could be solved in
> either way. Or are there other concerns here?
>
> Just trying to elicit as much input as possible on the list, so the
> maintainers can make an informed decision.
>
> Thanks,
> Ard.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [RFC PATCH] arm64: Bring back linear map randomization using PArange override
  2025-12-11  4:09 [RFC PATCH] arm64: Bring back linear map randomization using PArange override Ard Biesheuvel
  2025-12-16 18:13 ` Seth Jenkins
@ 2026-01-19 15:18 ` Will Deacon
  1 sibling, 0 replies; 5+ messages in thread
From: Will Deacon @ 2026-01-19 15:18 UTC (permalink / raw)
  To: Ard Biesheuvel
  Cc: linux-arm-kernel, linux-hardening, mark.rutland, catalin.marinas,
	kees, Liz Prucka, Seth Jenkins

On Thu, Dec 11, 2025 at 05:09:36AM +0100, Ard Biesheuvel wrote:
> Commit
> 
>   1db780bafa4ce ("arm64/mm: Remove randomization of the linear map")
> 
> removed linear map randomization from the arm64 port, on the basis that
> a prior change to the logic rendered it non-functional on the majority
> of relevant CPU implementations.
> 
> As has been reported numerous times now, the upshot of this is that the
> virtual addresses of statically allocated kernel data structures are
> highly predictable if the kernel is loaded at a known physical address.
> Any bootloader that still adheres to the original arm64 boot protocol,
> which stipulated that the kernel should be loaded at the lowest
> available physical address, is affected by this.
> 
> So bring back the most recent version of linear map randomization, which
> is based on the CPU's physical address range, but this time, allow this
> PA range to be overridden on the kernel command line.
> 
> E.g., by passing
> 
>   id_aa64mmfr0.parange=1 # 36 bits
>   id_aa64mmfr0.parange=2 # 40 bits
> 
> the CPU's supported physical range can be reduced to the point where
> linear map randomization becomes feasible again. It also means that
> nothing else is permitted to appear in that physical window, i.e.,
> hotplug memory but also non-memory peripherals, or stage-2 mappings on
> behalf of KVM guests.
> 
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> Cc: Liz Prucka <lizprucka@google.com>
> Cc: Seth Jenkins <sethjenkins@google.com>
> 
> This is posted as an RFC because there are obvious shortcomings to this
> approach. However, before I spend more time on this, I'd like to gauge
> if there is any consensus that bringing this back is a good idea.

I'm a bit worried about knock-on effects of restricting the parange,
especially for guests, although I admittedly haven't had a chance to
investigate it properly.

If we instead made the cmdline option specific to memory hotplug (e.g.
by providing a ceiling for the maximum physical address of memory that
can appear dynamically), would that give us what we need? For Android,
we could then pass a value of '0' in the default cmdline (effectively
disabling memory hotplug) and the onus would be on the few platforms
that care about hotplug to set the value correctly.

We already have "mem=", so maybe it's just a question of special-casing
0 for that (assuming that memory hotplug respects the memory limit).

Will


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2026-01-19 15:19 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-12-11  4:09 [RFC PATCH] arm64: Bring back linear map randomization using PArange override Ard Biesheuvel
2025-12-16 18:13 ` Seth Jenkins
2026-01-08 13:33   ` Ard Biesheuvel
2026-01-08 17:59     ` Seth Jenkins
2026-01-19 15:18 ` Will Deacon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox