linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v7] arm64/module: Use text-poke API for late relocations.
@ 2025-06-03 22:34 Dylan Hatch
  2025-06-04 23:58 ` Toshiyuki Sato (Fujitsu)
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Dylan Hatch @ 2025-06-03 22:34 UTC (permalink / raw)
  To: Catalin Marinas, Will Deacon, Ard Biesheuvel, Sami Tolvanen,
	Geert Uytterhoeven, Song Liu
  Cc: linux-arm-kernel, linux-kernel, Dylan Hatch, Roman Gushchin,
	Toshiyuki Sato

To enable late module patching, livepatch modules need to be able to
apply some of their relocations well after being loaded. In this
scenario however, the livepatch module text and data is already RX-only,
so special treatment is needed to make the late relocations possible. To
do this, use the text-poking API for these late relocations.

This patch is partially based off commit 88fc078a7a8f6 ("x86/module: Use
text_poke() for late relocations").

Signed-off-by: Dylan Hatch <dylanbhatch@google.com>
Acked-by: Song Liu <song@kernel.org>
---
 arch/arm64/kernel/module.c | 101 +++++++++++++++++++++----------------
 1 file changed, 57 insertions(+), 44 deletions(-)

diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 06bb680bfe975..5b793ec6f2e03 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -23,6 +23,7 @@
 #include <asm/insn.h>
 #include <asm/scs.h>
 #include <asm/sections.h>
+#include <asm/text-patching.h>
 
 enum aarch64_reloc_op {
 	RELOC_OP_NONE,
@@ -48,7 +49,17 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
 	return 0;
 }
 
-static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
+#define WRITE_PLACE(place, val, mod) do {				\
+	__typeof__(val) __val = (val);					\
+									\
+	if (mod->state == MODULE_STATE_UNFORMED)			\
+		*(place) = __val;					\
+	else								\
+		aarch64_insn_copy(place, &(__val), sizeof(*place));	\
+} while (0)
+
+static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len,
+		      struct module *me)
 {
 	s64 sval = do_reloc(op, place, val);
 
@@ -66,7 +77,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
 
 	switch (len) {
 	case 16:
-		*(s16 *)place = sval;
+		WRITE_PLACE((s16 *)place, sval, me);
 		switch (op) {
 		case RELOC_OP_ABS:
 			if (sval < 0 || sval > U16_MAX)
@@ -82,7 +93,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
 		}
 		break;
 	case 32:
-		*(s32 *)place = sval;
+		WRITE_PLACE((s32 *)place, sval, me);
 		switch (op) {
 		case RELOC_OP_ABS:
 			if (sval < 0 || sval > U32_MAX)
@@ -98,7 +109,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
 		}
 		break;
 	case 64:
-		*(s64 *)place = sval;
+		WRITE_PLACE((s64 *)place, sval, me);
 		break;
 	default:
 		pr_err("Invalid length (%d) for data relocation\n", len);
@@ -113,7 +124,8 @@ enum aarch64_insn_movw_imm_type {
 };
 
 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
-			   int lsb, enum aarch64_insn_movw_imm_type imm_type)
+			   int lsb, enum aarch64_insn_movw_imm_type imm_type,
+			   struct module *me)
 {
 	u64 imm;
 	s64 sval;
@@ -145,7 +157,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
 
 	/* Update the instruction with the new encoding. */
 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
-	*place = cpu_to_le32(insn);
+	WRITE_PLACE(place, cpu_to_le32(insn), me);
 
 	if (imm > U16_MAX)
 		return -ERANGE;
@@ -154,7 +166,8 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
 }
 
 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
-			  int lsb, int len, enum aarch64_insn_imm_type imm_type)
+			  int lsb, int len, enum aarch64_insn_imm_type imm_type,
+			  struct module *me)
 {
 	u64 imm, imm_mask;
 	s64 sval;
@@ -170,7 +183,7 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
 
 	/* Update the instruction's immediate field. */
 	insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
-	*place = cpu_to_le32(insn);
+	WRITE_PLACE(place, cpu_to_le32(insn), me);
 
 	/*
 	 * Extract the upper value bits (including the sign bit) and
@@ -189,17 +202,17 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
 }
 
 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
-			   __le32 *place, u64 val)
+			   __le32 *place, u64 val, struct module *me)
 {
 	u32 insn;
 
 	if (!is_forbidden_offset_for_adrp(place))
 		return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
-				      AARCH64_INSN_IMM_ADR);
+				      AARCH64_INSN_IMM_ADR, me);
 
 	/* patch ADRP to ADR if it is in range */
 	if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
-			    AARCH64_INSN_IMM_ADR)) {
+			    AARCH64_INSN_IMM_ADR, me)) {
 		insn = le32_to_cpu(*place);
 		insn &= ~BIT(31);
 	} else {
@@ -211,7 +224,7 @@ static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
 						   AARCH64_INSN_BRANCH_NOLINK);
 	}
 
-	*place = cpu_to_le32(insn);
+	WRITE_PLACE(place, cpu_to_le32(insn), me);
 	return 0;
 }
 
@@ -255,23 +268,23 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 		/* Data relocations. */
 		case R_AARCH64_ABS64:
 			overflow_check = false;
-			ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
+			ovf = reloc_data(RELOC_OP_ABS, loc, val, 64, me);
 			break;
 		case R_AARCH64_ABS32:
-			ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
+			ovf = reloc_data(RELOC_OP_ABS, loc, val, 32, me);
 			break;
 		case R_AARCH64_ABS16:
-			ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
+			ovf = reloc_data(RELOC_OP_ABS, loc, val, 16, me);
 			break;
 		case R_AARCH64_PREL64:
 			overflow_check = false;
-			ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
+			ovf = reloc_data(RELOC_OP_PREL, loc, val, 64, me);
 			break;
 		case R_AARCH64_PREL32:
-			ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
+			ovf = reloc_data(RELOC_OP_PREL, loc, val, 32, me);
 			break;
 		case R_AARCH64_PREL16:
-			ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
+			ovf = reloc_data(RELOC_OP_PREL, loc, val, 16, me);
 			break;
 
 		/* MOVW instruction relocations. */
@@ -280,88 +293,88 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 			fallthrough;
 		case R_AARCH64_MOVW_UABS_G0:
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVKZ);
+					      AARCH64_INSN_IMM_MOVKZ, me);
 			break;
 		case R_AARCH64_MOVW_UABS_G1_NC:
 			overflow_check = false;
 			fallthrough;
 		case R_AARCH64_MOVW_UABS_G1:
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVKZ);
+					      AARCH64_INSN_IMM_MOVKZ, me);
 			break;
 		case R_AARCH64_MOVW_UABS_G2_NC:
 			overflow_check = false;
 			fallthrough;
 		case R_AARCH64_MOVW_UABS_G2:
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVKZ);
+					      AARCH64_INSN_IMM_MOVKZ, me);
 			break;
 		case R_AARCH64_MOVW_UABS_G3:
 			/* We're using the top bits so we can't overflow. */
 			overflow_check = false;
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
-					      AARCH64_INSN_IMM_MOVKZ);
+					      AARCH64_INSN_IMM_MOVKZ, me);
 			break;
 		case R_AARCH64_MOVW_SABS_G0:
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVNZ);
+					      AARCH64_INSN_IMM_MOVNZ, me);
 			break;
 		case R_AARCH64_MOVW_SABS_G1:
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVNZ);
+					      AARCH64_INSN_IMM_MOVNZ, me);
 			break;
 		case R_AARCH64_MOVW_SABS_G2:
 			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVNZ);
+					      AARCH64_INSN_IMM_MOVNZ, me);
 			break;
 		case R_AARCH64_MOVW_PREL_G0_NC:
 			overflow_check = false;
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVKZ);
+					      AARCH64_INSN_IMM_MOVKZ, me);
 			break;
 		case R_AARCH64_MOVW_PREL_G0:
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
-					      AARCH64_INSN_IMM_MOVNZ);
+					      AARCH64_INSN_IMM_MOVNZ, me);
 			break;
 		case R_AARCH64_MOVW_PREL_G1_NC:
 			overflow_check = false;
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVKZ);
+					      AARCH64_INSN_IMM_MOVKZ, me);
 			break;
 		case R_AARCH64_MOVW_PREL_G1:
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
-					      AARCH64_INSN_IMM_MOVNZ);
+					      AARCH64_INSN_IMM_MOVNZ, me);
 			break;
 		case R_AARCH64_MOVW_PREL_G2_NC:
 			overflow_check = false;
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVKZ);
+					      AARCH64_INSN_IMM_MOVKZ, me);
 			break;
 		case R_AARCH64_MOVW_PREL_G2:
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
-					      AARCH64_INSN_IMM_MOVNZ);
+					      AARCH64_INSN_IMM_MOVNZ, me);
 			break;
 		case R_AARCH64_MOVW_PREL_G3:
 			/* We're using the top bits so we can't overflow. */
 			overflow_check = false;
 			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
-					      AARCH64_INSN_IMM_MOVNZ);
+					      AARCH64_INSN_IMM_MOVNZ, me);
 			break;
 
 		/* Immediate instruction relocations. */
 		case R_AARCH64_LD_PREL_LO19:
 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
-					     AARCH64_INSN_IMM_19);
+					     AARCH64_INSN_IMM_19, me);
 			break;
 		case R_AARCH64_ADR_PREL_LO21:
 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
-					     AARCH64_INSN_IMM_ADR);
+					     AARCH64_INSN_IMM_ADR, me);
 			break;
 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
 			overflow_check = false;
 			fallthrough;
 		case R_AARCH64_ADR_PREL_PG_HI21:
-			ovf = reloc_insn_adrp(me, sechdrs, loc, val);
+			ovf = reloc_insn_adrp(me, sechdrs, loc, val, me);
 			if (ovf && ovf != -ERANGE)
 				return ovf;
 			break;
@@ -369,46 +382,46 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 		case R_AARCH64_LDST8_ABS_LO12_NC:
 			overflow_check = false;
 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
-					     AARCH64_INSN_IMM_12);
+					     AARCH64_INSN_IMM_12, me);
 			break;
 		case R_AARCH64_LDST16_ABS_LO12_NC:
 			overflow_check = false;
 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
-					     AARCH64_INSN_IMM_12);
+					     AARCH64_INSN_IMM_12, me);
 			break;
 		case R_AARCH64_LDST32_ABS_LO12_NC:
 			overflow_check = false;
 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
-					     AARCH64_INSN_IMM_12);
+					     AARCH64_INSN_IMM_12, me);
 			break;
 		case R_AARCH64_LDST64_ABS_LO12_NC:
 			overflow_check = false;
 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
-					     AARCH64_INSN_IMM_12);
+					     AARCH64_INSN_IMM_12, me);
 			break;
 		case R_AARCH64_LDST128_ABS_LO12_NC:
 			overflow_check = false;
 			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
-					     AARCH64_INSN_IMM_12);
+					     AARCH64_INSN_IMM_12, me);
 			break;
 		case R_AARCH64_TSTBR14:
 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
-					     AARCH64_INSN_IMM_14);
+					     AARCH64_INSN_IMM_14, me);
 			break;
 		case R_AARCH64_CONDBR19:
 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
-					     AARCH64_INSN_IMM_19);
+					     AARCH64_INSN_IMM_19, me);
 			break;
 		case R_AARCH64_JUMP26:
 		case R_AARCH64_CALL26:
 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
-					     AARCH64_INSN_IMM_26);
+					     AARCH64_INSN_IMM_26, me);
 			if (ovf == -ERANGE) {
 				val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
 				if (!val)
 					return -ENOEXEC;
 				ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
-						     26, AARCH64_INSN_IMM_26);
+						     26, AARCH64_INSN_IMM_26, me);
 			}
 			break;
 
-- 
2.49.0.1204.g71687c7c1d-goog



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* RE: [PATCH v7] arm64/module: Use text-poke API for late relocations.
  2025-06-03 22:34 [PATCH v7] arm64/module: Use text-poke API for late relocations Dylan Hatch
@ 2025-06-04 23:58 ` Toshiyuki Sato (Fujitsu)
  2025-06-09 17:59 ` Song Liu
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Toshiyuki Sato (Fujitsu) @ 2025-06-04 23:58 UTC (permalink / raw)
  To: 'Dylan Hatch'
  Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Sami Tolvanen,
	Geert Uytterhoeven, Song Liu,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, Roman Gushchin,
	Toshiyuki Sato (Fujitsu)

Hi Dylan,

> To enable late module patching, livepatch modules need to be able to
> apply some of their relocations well after being loaded. In this
> scenario however, the livepatch module text and data is already RX-only,
> so special treatment is needed to make the late relocations possible. To
> do this, use the text-poking API for these late relocations.
> 
> This patch is partially based off commit 88fc078a7a8f6 ("x86/module: Use
> text_poke() for late relocations").
> 
> Signed-off-by: Dylan Hatch <dylanbhatch@google.com>
> Acked-by: Song Liu <song@kernel.org>
> ---
>  arch/arm64/kernel/module.c | 101 +++++++++++++++++++++----------------
>  1 file changed, 57 insertions(+), 44 deletions(-)
> 
> diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
> index 06bb680bfe975..5b793ec6f2e03 100644
> --- a/arch/arm64/kernel/module.c
> +++ b/arch/arm64/kernel/module.c
> @@ -23,6 +23,7 @@
>  #include <asm/insn.h>
>  #include <asm/scs.h>
>  #include <asm/sections.h>
> +#include <asm/text-patching.h>
> 
>  enum aarch64_reloc_op {
>  	RELOC_OP_NONE,
> @@ -48,7 +49,17 @@ static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
>  	return 0;
>  }
> 
> -static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
> +#define WRITE_PLACE(place, val, mod) do {				\
> +	__typeof__(val) __val = (val);					\
> +									\
> +	if (mod->state == MODULE_STATE_UNFORMED)			\
> +		*(place) = __val;					\
> +	else								\
> +		aarch64_insn_copy(place, &(__val), sizeof(*place));	\
> +} while (0)
> +
> +static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len,
> +		      struct module *me)
>  {
>  	s64 sval = do_reloc(op, place, val);
> 
> @@ -66,7 +77,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
> 
>  	switch (len) {
>  	case 16:
> -		*(s16 *)place = sval;
> +		WRITE_PLACE((s16 *)place, sval, me);
>  		switch (op) {
>  		case RELOC_OP_ABS:
>  			if (sval < 0 || sval > U16_MAX)
> @@ -82,7 +93,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
>  		}
>  		break;
>  	case 32:
> -		*(s32 *)place = sval;
> +		WRITE_PLACE((s32 *)place, sval, me);
>  		switch (op) {
>  		case RELOC_OP_ABS:
>  			if (sval < 0 || sval > U32_MAX)
> @@ -98,7 +109,7 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
>  		}
>  		break;
>  	case 64:
> -		*(s64 *)place = sval;
> +		WRITE_PLACE((s64 *)place, sval, me);
>  		break;
>  	default:
>  		pr_err("Invalid length (%d) for data relocation\n", len);
> @@ -113,7 +124,8 @@ enum aarch64_insn_movw_imm_type {
>  };
> 
>  static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
> -			   int lsb, enum aarch64_insn_movw_imm_type imm_type)
> +			   int lsb, enum aarch64_insn_movw_imm_type imm_type,
> +			   struct module *me)
>  {
>  	u64 imm;
>  	s64 sval;
> @@ -145,7 +157,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
> 
>  	/* Update the instruction with the new encoding. */
>  	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
> -	*place = cpu_to_le32(insn);
> +	WRITE_PLACE(place, cpu_to_le32(insn), me);
> 
>  	if (imm > U16_MAX)
>  		return -ERANGE;
> @@ -154,7 +166,8 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
>  }
> 
>  static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
> -			  int lsb, int len, enum aarch64_insn_imm_type imm_type)
> +			  int lsb, int len, enum aarch64_insn_imm_type imm_type,
> +			  struct module *me)
>  {
>  	u64 imm, imm_mask;
>  	s64 sval;
> @@ -170,7 +183,7 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
> 
>  	/* Update the instruction's immediate field. */
>  	insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
> -	*place = cpu_to_le32(insn);
> +	WRITE_PLACE(place, cpu_to_le32(insn), me);
> 
>  	/*
>  	 * Extract the upper value bits (including the sign bit) and
> @@ -189,17 +202,17 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
>  }
> 
>  static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
> -			   __le32 *place, u64 val)
> +			   __le32 *place, u64 val, struct module *me)
>  {
>  	u32 insn;
> 
>  	if (!is_forbidden_offset_for_adrp(place))
>  		return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
> -				      AARCH64_INSN_IMM_ADR);
> +				      AARCH64_INSN_IMM_ADR, me);
> 
>  	/* patch ADRP to ADR if it is in range */
>  	if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
> -			    AARCH64_INSN_IMM_ADR)) {
> +			    AARCH64_INSN_IMM_ADR, me)) {
>  		insn = le32_to_cpu(*place);
>  		insn &= ~BIT(31);
>  	} else {
> @@ -211,7 +224,7 @@ static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
>  						   AARCH64_INSN_BRANCH_NOLINK);
>  	}
> 
> -	*place = cpu_to_le32(insn);
> +	WRITE_PLACE(place, cpu_to_le32(insn), me);
>  	return 0;
>  }
> 
> @@ -255,23 +268,23 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
>  		/* Data relocations. */
>  		case R_AARCH64_ABS64:
>  			overflow_check = false;
> -			ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
> +			ovf = reloc_data(RELOC_OP_ABS, loc, val, 64, me);
>  			break;
>  		case R_AARCH64_ABS32:
> -			ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
> +			ovf = reloc_data(RELOC_OP_ABS, loc, val, 32, me);
>  			break;
>  		case R_AARCH64_ABS16:
> -			ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
> +			ovf = reloc_data(RELOC_OP_ABS, loc, val, 16, me);
>  			break;
>  		case R_AARCH64_PREL64:
>  			overflow_check = false;
> -			ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
> +			ovf = reloc_data(RELOC_OP_PREL, loc, val, 64, me);
>  			break;
>  		case R_AARCH64_PREL32:
> -			ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
> +			ovf = reloc_data(RELOC_OP_PREL, loc, val, 32, me);
>  			break;
>  		case R_AARCH64_PREL16:
> -			ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
> +			ovf = reloc_data(RELOC_OP_PREL, loc, val, 16, me);
>  			break;
> 
>  		/* MOVW instruction relocations. */
> @@ -280,88 +293,88 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
>  			fallthrough;
>  		case R_AARCH64_MOVW_UABS_G0:
>  			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
> -					      AARCH64_INSN_IMM_MOVKZ);
> +					      AARCH64_INSN_IMM_MOVKZ, me);
>  			break;
>  		case R_AARCH64_MOVW_UABS_G1_NC:
>  			overflow_check = false;
>  			fallthrough;
>  		case R_AARCH64_MOVW_UABS_G1:
>  			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
> -					      AARCH64_INSN_IMM_MOVKZ);
> +					      AARCH64_INSN_IMM_MOVKZ, me);
>  			break;
>  		case R_AARCH64_MOVW_UABS_G2_NC:
>  			overflow_check = false;
>  			fallthrough;
>  		case R_AARCH64_MOVW_UABS_G2:
>  			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
> -					      AARCH64_INSN_IMM_MOVKZ);
> +					      AARCH64_INSN_IMM_MOVKZ, me);
>  			break;
>  		case R_AARCH64_MOVW_UABS_G3:
>  			/* We're using the top bits so we can't overflow. */
>  			overflow_check = false;
>  			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
> -					      AARCH64_INSN_IMM_MOVKZ);
> +					      AARCH64_INSN_IMM_MOVKZ, me);
>  			break;
>  		case R_AARCH64_MOVW_SABS_G0:
>  			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
> -					      AARCH64_INSN_IMM_MOVNZ);
> +					      AARCH64_INSN_IMM_MOVNZ, me);
>  			break;
>  		case R_AARCH64_MOVW_SABS_G1:
>  			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
> -					      AARCH64_INSN_IMM_MOVNZ);
> +					      AARCH64_INSN_IMM_MOVNZ, me);
>  			break;
>  		case R_AARCH64_MOVW_SABS_G2:
>  			ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
> -					      AARCH64_INSN_IMM_MOVNZ);
> +					      AARCH64_INSN_IMM_MOVNZ, me);
>  			break;
>  		case R_AARCH64_MOVW_PREL_G0_NC:
>  			overflow_check = false;
>  			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
> -					      AARCH64_INSN_IMM_MOVKZ);
> +					      AARCH64_INSN_IMM_MOVKZ, me);
>  			break;
>  		case R_AARCH64_MOVW_PREL_G0:
>  			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
> -					      AARCH64_INSN_IMM_MOVNZ);
> +					      AARCH64_INSN_IMM_MOVNZ, me);
>  			break;
>  		case R_AARCH64_MOVW_PREL_G1_NC:
>  			overflow_check = false;
>  			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
> -					      AARCH64_INSN_IMM_MOVKZ);
> +					      AARCH64_INSN_IMM_MOVKZ, me);
>  			break;
>  		case R_AARCH64_MOVW_PREL_G1:
>  			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
> -					      AARCH64_INSN_IMM_MOVNZ);
> +					      AARCH64_INSN_IMM_MOVNZ, me);
>  			break;
>  		case R_AARCH64_MOVW_PREL_G2_NC:
>  			overflow_check = false;
>  			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
> -					      AARCH64_INSN_IMM_MOVKZ);
> +					      AARCH64_INSN_IMM_MOVKZ, me);
>  			break;
>  		case R_AARCH64_MOVW_PREL_G2:
>  			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
> -					      AARCH64_INSN_IMM_MOVNZ);
> +					      AARCH64_INSN_IMM_MOVNZ, me);
>  			break;
>  		case R_AARCH64_MOVW_PREL_G3:
>  			/* We're using the top bits so we can't overflow. */
>  			overflow_check = false;
>  			ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
> -					      AARCH64_INSN_IMM_MOVNZ);
> +					      AARCH64_INSN_IMM_MOVNZ, me);
>  			break;
> 
>  		/* Immediate instruction relocations. */
>  		case R_AARCH64_LD_PREL_LO19:
>  			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
> -					     AARCH64_INSN_IMM_19);
> +					     AARCH64_INSN_IMM_19, me);
>  			break;
>  		case R_AARCH64_ADR_PREL_LO21:
>  			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
> -					     AARCH64_INSN_IMM_ADR);
> +					     AARCH64_INSN_IMM_ADR, me);
>  			break;
>  		case R_AARCH64_ADR_PREL_PG_HI21_NC:
>  			overflow_check = false;
>  			fallthrough;
>  		case R_AARCH64_ADR_PREL_PG_HI21:
> -			ovf = reloc_insn_adrp(me, sechdrs, loc, val);
> +			ovf = reloc_insn_adrp(me, sechdrs, loc, val, me);
>  			if (ovf && ovf != -ERANGE)
>  				return ovf;
>  			break;
> @@ -369,46 +382,46 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
>  		case R_AARCH64_LDST8_ABS_LO12_NC:
>  			overflow_check = false;
>  			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
> -					     AARCH64_INSN_IMM_12);
> +					     AARCH64_INSN_IMM_12, me);
>  			break;
>  		case R_AARCH64_LDST16_ABS_LO12_NC:
>  			overflow_check = false;
>  			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
> -					     AARCH64_INSN_IMM_12);
> +					     AARCH64_INSN_IMM_12, me);
>  			break;
>  		case R_AARCH64_LDST32_ABS_LO12_NC:
>  			overflow_check = false;
>  			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
> -					     AARCH64_INSN_IMM_12);
> +					     AARCH64_INSN_IMM_12, me);
>  			break;
>  		case R_AARCH64_LDST64_ABS_LO12_NC:
>  			overflow_check = false;
>  			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
> -					     AARCH64_INSN_IMM_12);
> +					     AARCH64_INSN_IMM_12, me);
>  			break;
>  		case R_AARCH64_LDST128_ABS_LO12_NC:
>  			overflow_check = false;
>  			ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
> -					     AARCH64_INSN_IMM_12);
> +					     AARCH64_INSN_IMM_12, me);
>  			break;
>  		case R_AARCH64_TSTBR14:
>  			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
> -					     AARCH64_INSN_IMM_14);
> +					     AARCH64_INSN_IMM_14, me);
>  			break;
>  		case R_AARCH64_CONDBR19:
>  			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
> -					     AARCH64_INSN_IMM_19);
> +					     AARCH64_INSN_IMM_19, me);
>  			break;
>  		case R_AARCH64_JUMP26:
>  		case R_AARCH64_CALL26:
>  			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
> -					     AARCH64_INSN_IMM_26);
> +					     AARCH64_INSN_IMM_26, me);
>  			if (ovf == -ERANGE) {
>  				val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
>  				if (!val)
>  					return -ENOEXEC;
>  				ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
> -						     26, AARCH64_INSN_IMM_26);
> +						     26, AARCH64_INSN_IMM_26, me);
>  			}
>  			break;
> 
> --
> 2.49.0.1204.g71687c7c1d-goog

Thanks for posting the new patch.

I ran kpatch's integration tests and no issues were detected.

The livepatch patches [1][2] (Manually adjusting arch/arm64/Kconfig) have been applied to the kernel (6.15.0).
The kpatch uses the same one as the previous test [3][4].

[1] https://lore.kernel.org/all/20250521111000.2237470-1-mark.rutland@arm.com/
[2] https://lore.kernel.org/all/20250320171559.3423224-3-song@kernel.org/
[3] https://lore.kernel.org/all/TY4PR01MB1377739F1CC08549A619C8635D7BA2@TY4PR01MB13777.jpnprd01.prod.outlook.com/
[4] https://github.com/dynup/kpatch/pull/1439

Tested-by: Toshiyuki Sato <fj6611ie@aa.jp.fujitsu.com>

Regards,
Toshiyuki Sato

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v7] arm64/module: Use text-poke API for late relocations.
  2025-06-03 22:34 [PATCH v7] arm64/module: Use text-poke API for late relocations Dylan Hatch
  2025-06-04 23:58 ` Toshiyuki Sato (Fujitsu)
@ 2025-06-09 17:59 ` Song Liu
  2025-06-12 15:18 ` Will Deacon
  2025-06-20 12:35 ` Catalin Marinas
  3 siblings, 0 replies; 7+ messages in thread
From: Song Liu @ 2025-06-09 17:59 UTC (permalink / raw)
  To: Dylan Hatch
  Cc: Catalin Marinas, Will Deacon, Ard Biesheuvel, Sami Tolvanen,
	Geert Uytterhoeven, linux-arm-kernel, linux-kernel,
	Roman Gushchin, Toshiyuki Sato

Hi Will and folks,

We (Meta) are hoping to test livepatch arm64 with our kernels soon.
In general, we strongly prefer patches that are already landed in the
subsystem's git tree, and being tested in linux-next.

Could you please help us review and land this set and [1] and [2] so
that we can test them in our kernels?

Thanks,
Song

[1] https://lore.kernel.org/linux-arm-kernel/20250320171559.3423224-1-song@kernel.org/
[2] https://lore.kernel.org/linux-arm-kernel/20250521111000.2237470-2-mark.rutland@arm.com/

On Tue, Jun 3, 2025 at 3:34 PM Dylan Hatch <dylanbhatch@google.com> wrote:
>
> To enable late module patching, livepatch modules need to be able to
> apply some of their relocations well after being loaded. In this
> scenario however, the livepatch module text and data is already RX-only,
> so special treatment is needed to make the late relocations possible. To
> do this, use the text-poking API for these late relocations.
>
> This patch is partially based off commit 88fc078a7a8f6 ("x86/module: Use
> text_poke() for late relocations").
>
> Signed-off-by: Dylan Hatch <dylanbhatch@google.com>
> Acked-by: Song Liu <song@kernel.org>


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v7] arm64/module: Use text-poke API for late relocations.
  2025-06-03 22:34 [PATCH v7] arm64/module: Use text-poke API for late relocations Dylan Hatch
  2025-06-04 23:58 ` Toshiyuki Sato (Fujitsu)
  2025-06-09 17:59 ` Song Liu
@ 2025-06-12 15:18 ` Will Deacon
  2025-06-16 22:35   ` Song Liu
  2025-06-20 12:35 ` Catalin Marinas
  3 siblings, 1 reply; 7+ messages in thread
From: Will Deacon @ 2025-06-12 15:18 UTC (permalink / raw)
  To: Dylan Hatch
  Cc: Catalin Marinas, Ard Biesheuvel, Sami Tolvanen,
	Geert Uytterhoeven, Song Liu, linux-arm-kernel, linux-kernel,
	Roman Gushchin, Toshiyuki Sato

On Tue, Jun 03, 2025 at 10:34:17PM +0000, Dylan Hatch wrote:
> To enable late module patching, livepatch modules need to be able to
> apply some of their relocations well after being loaded. In this
> scenario however, the livepatch module text and data is already RX-only,
> so special treatment is needed to make the late relocations possible. To
> do this, use the text-poking API for these late relocations.
> 
> This patch is partially based off commit 88fc078a7a8f6 ("x86/module: Use
> text_poke() for late relocations").
> 
> Signed-off-by: Dylan Hatch <dylanbhatch@google.com>
> Acked-by: Song Liu <song@kernel.org>
> ---
>  arch/arm64/kernel/module.c | 101 +++++++++++++++++++++----------------
>  1 file changed, 57 insertions(+), 44 deletions(-)

Thanks for sticking with this, Dylan:

Acked-by: Will Deacon <will@kernel.org>

Catalin, I assume you'll pick this up for 6.17.

Cheers,

Will


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v7] arm64/module: Use text-poke API for late relocations.
  2025-06-12 15:18 ` Will Deacon
@ 2025-06-16 22:35   ` Song Liu
  2025-06-17 13:28     ` Catalin Marinas
  0 siblings, 1 reply; 7+ messages in thread
From: Song Liu @ 2025-06-16 22:35 UTC (permalink / raw)
  To: Will Deacon
  Cc: Dylan Hatch, Catalin Marinas, Ard Biesheuvel, Sami Tolvanen,
	Geert Uytterhoeven, linux-arm-kernel, linux-kernel,
	Roman Gushchin, Toshiyuki Sato

Hi Will and Catalin,

What's our plan with this set and [1]? Once we land these two sets, I
will respin [2] which enables livepatch for arm64. We (Meta) are hoping
to land these in linux-next soon, and back port them to our kernels.

Thanks,
Song

[1] https://lore.kernel.org/linux-arm-kernel/20250521111000.2237470-1-mark.rutland@arm.com/
[2] https://lore.kernel.org/linux-arm-kernel/20250320171559.3423224-3-song@kernel.org/

On Thu, Jun 12, 2025 at 8:19 AM Will Deacon <will@kernel.org> wrote:
>
> On Tue, Jun 03, 2025 at 10:34:17PM +0000, Dylan Hatch wrote:
> > To enable late module patching, livepatch modules need to be able to
> > apply some of their relocations well after being loaded. In this
> > scenario however, the livepatch module text and data is already RX-only,
> > so special treatment is needed to make the late relocations possible. To
> > do this, use the text-poking API for these late relocations.
> >
> > This patch is partially based off commit 88fc078a7a8f6 ("x86/module: Use
> > text_poke() for late relocations").
> >
> > Signed-off-by: Dylan Hatch <dylanbhatch@google.com>
> > Acked-by: Song Liu <song@kernel.org>
> > ---
> >  arch/arm64/kernel/module.c | 101 +++++++++++++++++++++----------------
> >  1 file changed, 57 insertions(+), 44 deletions(-)
>
> Thanks for sticking with this, Dylan:
>
> Acked-by: Will Deacon <will@kernel.org>
>
> Catalin, I assume you'll pick this up for 6.17.


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v7] arm64/module: Use text-poke API for late relocations.
  2025-06-16 22:35   ` Song Liu
@ 2025-06-17 13:28     ` Catalin Marinas
  0 siblings, 0 replies; 7+ messages in thread
From: Catalin Marinas @ 2025-06-17 13:28 UTC (permalink / raw)
  To: Song Liu
  Cc: Will Deacon, Dylan Hatch, Ard Biesheuvel, Sami Tolvanen,
	Geert Uytterhoeven, linux-arm-kernel, linux-kernel,
	Roman Gushchin, Toshiyuki Sato

On Mon, Jun 16, 2025 at 03:35:14PM -0700, Song Liu wrote:
> What's our plan with this set and [1]? Once we land these two sets, I
> will respin [2] which enables livepatch for arm64. We (Meta) are hoping
> to land these in linux-next soon, and back port them to our kernels.
[...] 
> [1] https://lore.kernel.org/linux-arm-kernel/20250521111000.2237470-1-mark.rutland@arm.com/
> [2] https://lore.kernel.org/linux-arm-kernel/20250320171559.3423224-3-song@kernel.org/

I plan to queue (for 6.17) this patch and Mark's reliable stacktrace
series sometime this week.

-- 
Catalin


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v7] arm64/module: Use text-poke API for late relocations.
  2025-06-03 22:34 [PATCH v7] arm64/module: Use text-poke API for late relocations Dylan Hatch
                   ` (2 preceding siblings ...)
  2025-06-12 15:18 ` Will Deacon
@ 2025-06-20 12:35 ` Catalin Marinas
  3 siblings, 0 replies; 7+ messages in thread
From: Catalin Marinas @ 2025-06-20 12:35 UTC (permalink / raw)
  To: Will Deacon, Ard Biesheuvel, Sami Tolvanen, Geert Uytterhoeven,
	Song Liu, Dylan Hatch
  Cc: linux-arm-kernel, linux-kernel, Roman Gushchin, Toshiyuki Sato

On Tue, 03 Jun 2025 22:34:17 +0000, Dylan Hatch wrote:
> To enable late module patching, livepatch modules need to be able to
> apply some of their relocations well after being loaded. In this
> scenario however, the livepatch module text and data is already RX-only,
> so special treatment is needed to make the late relocations possible. To
> do this, use the text-poking API for these late relocations.
> 
> This patch is partially based off commit 88fc078a7a8f6 ("x86/module: Use
> text_poke() for late relocations").
> 
> [...]

Applied to arm64 (for-next/livepatch), thanks!

[1/1] arm64/module: Use text-poke API for late relocations.
      https://git.kernel.org/arm64/c/91b89a634487

-- 
Catalin



^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2025-06-20 12:58 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-03 22:34 [PATCH v7] arm64/module: Use text-poke API for late relocations Dylan Hatch
2025-06-04 23:58 ` Toshiyuki Sato (Fujitsu)
2025-06-09 17:59 ` Song Liu
2025-06-12 15:18 ` Will Deacon
2025-06-16 22:35   ` Song Liu
2025-06-17 13:28     ` Catalin Marinas
2025-06-20 12:35 ` Catalin Marinas

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).