From mboxrd@z Thu Jan 1 00:00:00 1970 From: Catalin Marinas Subject: [PATCH v2 27/31] arm64: Loadable modules Date: Tue, 14 Aug 2012 18:52:28 +0100 Message-ID: <1344966752-16102-28-git-send-email-catalin.marinas@arm.com> References: <1344966752-16102-1-git-send-email-catalin.marinas@arm.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1344966752-16102-1-git-send-email-catalin.marinas@arm.com> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: linux-arm-kernel-bounces@lists.infradead.org Errors-To: linux-arm-kernel-bounces+linux-arm-kernel=m.gmane.org@lists.infradead.org To: linux-arch@vger.kernel.org, linux-arm-kernel@lists.infradead.org Cc: Will Deacon , linux-kernel@vger.kernel.org, Arnd Bergmann List-Id: linux-arch.vger.kernel.org From: Will Deacon This patch adds support for loadable modules. Loadable modules are loaded 64MB below the kernel image due to branch relocation restrictions (see Documentation/arm64/memory.txt). Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/module.h | 23 ++ arch/arm64/kernel/module.c | 456 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 479 insertions(+), 0 deletions(-) create mode 100644 arch/arm64/include/asm/module.h create mode 100644 arch/arm64/kernel/module.c diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h new file mode 100644 index 0000000..e80e232 --- /dev/null +++ b/arch/arm64/include/asm/module.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_MODULE_H +#define __ASM_MODULE_H + +#include + +#define MODULE_ARCH_VERMAGIC "aarch64" + +#endif /* __ASM_MODULE_H */ diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c new file mode 100644 index 0000000..ca0e3d5 --- /dev/null +++ b/arch/arm64/kernel/module.c @@ -0,0 +1,456 @@ +/* + * AArch64 loadable module support. + * + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Author: Will Deacon + */ + +#include +#include +#include +#include +#include +#include +#include + +void *module_alloc(unsigned long size) +{ + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, -1, + __builtin_return_address(0)); +} + +enum aarch64_reloc_op { + RELOC_OP_NONE, + RELOC_OP_ABS, + RELOC_OP_PREL, + RELOC_OP_PAGE, +}; + +static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) +{ + switch (reloc_op) { + case RELOC_OP_ABS: + return val; + case RELOC_OP_PREL: + return val - (u64)place; + case RELOC_OP_PAGE: + return (val & ~0xfff) - ((u64)place & ~0xfff); + case RELOC_OP_NONE: + return 0; + } + + pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); + return 0; +} + +static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) +{ + u64 imm_mask = (1 << len) - 1; + s64 sval = do_reloc(op, place, val); + + switch (len) { + case 16: + *(s16 *)place = sval; + break; + case 32: + *(s32 *)place = sval; + break; + case 64: + *(s64 *)place = sval; + break; + default: + pr_err("Invalid length (%d) for data relocation\n", len); + return 0; + } + + /* + * Extract the upper value bits (including the sign bit) and + * shift them to bit 0. + */ + sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); + + /* + * Overflow has occurred if the value is not representable in + * len bits (i.e the bottom len bits are not sign-extended and + * the top bits are not all zero). + */ + if ((u64)(sval + 1) > 2) + return -ERANGE; + + return 0; +} + +enum aarch64_imm_type { + INSN_IMM_MOVNZ, + INSN_IMM_MOVK, + INSN_IMM_ADR, + INSN_IMM_26, + INSN_IMM_19, + INSN_IMM_16, + INSN_IMM_14, + INSN_IMM_12, + INSN_IMM_9, +}; + +static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm) +{ + u32 immlo, immhi, lomask, himask, mask; + int shift; + + switch (type) { + case INSN_IMM_MOVNZ: + /* + * For signed MOVW relocations, we have to manipulate the + * instruction encoding depending on whether or not the + * immediate is less than zero. + */ + insn &= ~(3 << 29); + if ((s64)imm >= 0) { + /* >=0: Set the instruction to MOVZ (opcode 10b). */ + insn |= 2 << 29; + } else { + /* + * <0: Set the instruction to MOVN (opcode 00b). + * Since we've masked the opcode already, we + * don't need to do anything other than + * inverting the new immediate field. + */ + imm = ~imm; + } + case INSN_IMM_MOVK: + mask = BIT(16) - 1; + shift = 5; + break; + case INSN_IMM_ADR: + lomask = 0x3; + himask = 0x7ffff; + immlo = imm & lomask; + imm >>= 2; + immhi = imm & himask; + imm = (immlo << 24) | (immhi); + mask = (lomask << 24) | (himask); + shift = 5; + break; + case INSN_IMM_26: + mask = BIT(26) - 1; + shift = 0; + break; + case INSN_IMM_19: + mask = BIT(19) - 1; + shift = 5; + break; + case INSN_IMM_16: + mask = BIT(16) - 1; + shift = 5; + break; + case INSN_IMM_14: + mask = BIT(14) - 1; + shift = 5; + break; + case INSN_IMM_12: + mask = BIT(12) - 1; + shift = 10; + break; + case INSN_IMM_9: + mask = BIT(9) - 1; + shift = 12; + break; + default: + pr_err("encode_insn_immediate: unknown immediate encoding %d\n", + type); + return 0; + } + + /* Update the immediate field. */ + insn &= ~(mask << shift); + insn |= (imm & mask) << shift; + + return insn; +} + +static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, + int lsb, enum aarch64_imm_type imm_type) +{ + u64 imm, limit = 0; + s64 sval; + u32 insn = *(u32 *)place; + + sval = do_reloc(op, place, val); + sval >>= lsb; + imm = sval & 0xffff; + + /* Update the instruction with the new encoding. */ + *(u32 *)place = encode_insn_immediate(imm_type, insn, imm); + + /* Shift out the immediate field. */ + sval >>= 16; + + /* + * For unsigned immediates, the overflow check is straightforward. + * For signed immediates, the sign bit is actually the bit past the + * most significant bit of the field. + * The INSN_IMM_16 immediate type is unsigned. + */ + if (imm_type != INSN_IMM_16) { + sval++; + limit++; + } + + /* Check the upper bits depending on the sign of the immediate. */ + if ((u64)sval > limit) + return -ERANGE; + + return 0; +} + +static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, + int lsb, int len, enum aarch64_imm_type imm_type) +{ + u64 imm, imm_mask; + s64 sval; + u32 insn = *(u32 *)place; + + /* Calculate the relocation value. */ + sval = do_reloc(op, place, val); + sval >>= lsb; + + /* Extract the value bits and shift them to bit 0. */ + imm_mask = (BIT(lsb + len) - 1) >> lsb; + imm = sval & imm_mask; + + /* Update the instruction's immediate field. */ + *(u32 *)place = encode_insn_immediate(imm_type, insn, imm); + + /* + * Extract the upper value bits (including the sign bit) and + * shift them to bit 0. + */ + sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); + + /* + * Overflow has occurred if the upper bits are not all equal to + * the sign bit of the value. + */ + if ((u64)(sval + 1) >= 2) + return -ERANGE; + + return 0; +} + +int apply_relocate_add(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + unsigned int i; + int ovf; + bool overflow_check; + Elf64_Sym *sym; + void *loc; + u64 val; + Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* loc corresponds to P in the AArch64 ELF document. */ + loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + + /* sym is the ELF symbol we're referring to. */ + sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + + ELF64_R_SYM(rel[i].r_info); + + /* val corresponds to (S + A) in the AArch64 ELF document. */ + val = sym->st_value + rel[i].r_addend; + + /* Check for overflow by default. */ + overflow_check = true; + + /* Perform the static relocation. */ + switch (ELF64_R_TYPE(rel[i].r_info)) { + /* Null relocations. */ + case R_ARM_NONE: + case R_AARCH64_NONE: + ovf = 0; + break; + + /* Data relocations. */ + case R_AARCH64_ABS64: + overflow_check = false; + ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); + break; + case R_AARCH64_ABS32: + ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); + break; + case R_AARCH64_ABS16: + ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); + break; + case R_AARCH64_PREL64: + overflow_check = false; + ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); + break; + case R_AARCH64_PREL32: + ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); + break; + case R_AARCH64_PREL16: + ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); + break; + + /* MOVW instruction relocations. */ + case R_AARCH64_MOVW_UABS_G0_NC: + overflow_check = false; + case R_AARCH64_MOVW_UABS_G0: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, + INSN_IMM_16); + break; + case R_AARCH64_MOVW_UABS_G1_NC: + overflow_check = false; + case R_AARCH64_MOVW_UABS_G1: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, + INSN_IMM_16); + break; + case R_AARCH64_MOVW_UABS_G2_NC: + overflow_check = false; + case R_AARCH64_MOVW_UABS_G2: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, + INSN_IMM_16); + break; + case R_AARCH64_MOVW_UABS_G3: + /* We're using the top bits so we can't overflow. */ + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, + INSN_IMM_16); + break; + case R_AARCH64_MOVW_SABS_G0: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_SABS_G1: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_SABS_G2: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G0_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, + INSN_IMM_MOVK); + break; + case R_AARCH64_MOVW_PREL_G0: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G1_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, + INSN_IMM_MOVK); + break; + case R_AARCH64_MOVW_PREL_G1: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G2_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, + INSN_IMM_MOVK); + break; + case R_AARCH64_MOVW_PREL_G2: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G3: + /* We're using the top bits so we can't overflow. */ + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, + INSN_IMM_MOVNZ); + break; + + /* Immediate instruction relocations. */ + case R_AARCH64_LD_PREL_LO19: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, + INSN_IMM_19); + break; + case R_AARCH64_ADR_PREL_LO21: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, + INSN_IMM_ADR); + break; + case R_AARCH64_ADR_PREL_PG_HI21_NC: + overflow_check = false; + case R_AARCH64_ADR_PREL_PG_HI21: + ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, + INSN_IMM_ADR); + break; + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_LDST8_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, + INSN_IMM_12); + break; + case R_AARCH64_LDST16_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, + INSN_IMM_12); + break; + case R_AARCH64_LDST32_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, + INSN_IMM_12); + break; + case R_AARCH64_LDST64_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, + INSN_IMM_12); + break; + case R_AARCH64_LDST128_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, + INSN_IMM_12); + break; + case R_AARCH64_TSTBR14: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, + INSN_IMM_14); + break; + case R_AARCH64_CONDBR19: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, + INSN_IMM_19); + break; + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, + INSN_IMM_26); + break; + + default: + pr_err("module %s: unsupported RELA relocation: %llu\n", + me->name, ELF64_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + + if (overflow_check && ovf == -ERANGE) + goto overflow; + + } + + return 0; + +overflow: + pr_err("module %s: overflow in relocation type %d val %Lx\n", + me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); + return -ENOEXEC; +} From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from service87.mimecast.com ([91.220.42.44]:55485 "EHLO service87.mimecast.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756906Ab2HNRxO (ORCPT ); Tue, 14 Aug 2012 13:53:14 -0400 From: Catalin Marinas Subject: [PATCH v2 27/31] arm64: Loadable modules Date: Tue, 14 Aug 2012 18:52:28 +0100 Message-ID: <1344966752-16102-28-git-send-email-catalin.marinas@arm.com> In-Reply-To: <1344966752-16102-1-git-send-email-catalin.marinas@arm.com> References: <1344966752-16102-1-git-send-email-catalin.marinas@arm.com> Content-Type: text/plain; charset=WINDOWS-1252 Content-Transfer-Encoding: quoted-printable Sender: linux-arch-owner@vger.kernel.org List-ID: To: linux-arch@vger.kernel.org, linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org, Arnd Bergmann , Will Deacon Message-ID: <20120814175228.Ir0POrdRC-sotug558q4i2yiRqQupIS-2B8E7NcfyK4@z> From: Will Deacon This patch adds support for loadable modules. Loadable modules are loaded 64MB below the kernel image due to branch relocation restrictions (see Documentation/arm64/memory.txt). Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/module.h | 23 ++ arch/arm64/kernel/module.c | 456 +++++++++++++++++++++++++++++++++++= ++++ 2 files changed, 479 insertions(+), 0 deletions(-) create mode 100644 arch/arm64/include/asm/module.h create mode 100644 arch/arm64/kernel/module.c diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/modul= e.h new file mode 100644 index 0000000..e80e232 --- /dev/null +++ b/arch/arm64/include/asm/module.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_MODULE_H +#define __ASM_MODULE_H + +#include + +#define MODULE_ARCH_VERMAGIC=09"aarch64" + +#endif /* __ASM_MODULE_H */ diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c new file mode 100644 index 0000000..ca0e3d5 --- /dev/null +++ b/arch/arm64/kernel/module.c @@ -0,0 +1,456 @@ +/* + * AArch64 loadable module support. + * + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Author: Will Deacon + */ + +#include +#include +#include +#include +#include +#include +#include + +void *module_alloc(unsigned long size) +{ +=09return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, +=09=09=09=09 GFP_KERNEL, PAGE_KERNEL_EXEC, -1, +=09=09=09=09 __builtin_return_address(0)); +} + +enum aarch64_reloc_op { +=09RELOC_OP_NONE, +=09RELOC_OP_ABS, +=09RELOC_OP_PREL, +=09RELOC_OP_PAGE, +}; + +static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) +{ +=09switch (reloc_op) { +=09case RELOC_OP_ABS: +=09=09return val; +=09case RELOC_OP_PREL: +=09=09return val - (u64)place; +=09case RELOC_OP_PAGE: +=09=09return (val & ~0xfff) - ((u64)place & ~0xfff); +=09case RELOC_OP_NONE: +=09=09return 0; +=09} + +=09pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); +=09return 0; +} + +static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int = len) +{ +=09u64 imm_mask =3D (1 << len) - 1; +=09s64 sval =3D do_reloc(op, place, val); + +=09switch (len) { +=09case 16: +=09=09*(s16 *)place =3D sval; +=09=09break; +=09case 32: +=09=09*(s32 *)place =3D sval; +=09=09break; +=09case 64: +=09=09*(s64 *)place =3D sval; +=09=09break; +=09default: +=09=09pr_err("Invalid length (%d) for data relocation\n", len); +=09=09return 0; +=09} + +=09/* +=09 * Extract the upper value bits (including the sign bit) and +=09 * shift them to bit 0. +=09 */ +=09sval =3D (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); + +=09/* +=09 * Overflow has occurred if the value is not representable in +=09 * len bits (i.e the bottom len bits are not sign-extended and +=09 * the top bits are not all zero). +=09 */ +=09if ((u64)(sval + 1) > 2) +=09=09return -ERANGE; + +=09return 0; +} + +enum aarch64_imm_type { +=09INSN_IMM_MOVNZ, +=09INSN_IMM_MOVK, +=09INSN_IMM_ADR, +=09INSN_IMM_26, +=09INSN_IMM_19, +=09INSN_IMM_16, +=09INSN_IMM_14, +=09INSN_IMM_12, +=09INSN_IMM_9, +}; + +static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64= imm) +{ +=09u32 immlo, immhi, lomask, himask, mask; +=09int shift; + +=09switch (type) { +=09case INSN_IMM_MOVNZ: +=09=09/* +=09=09 * For signed MOVW relocations, we have to manipulate the +=09=09 * instruction encoding depending on whether or not the +=09=09 * immediate is less than zero. +=09=09 */ +=09=09insn &=3D ~(3 << 29); +=09=09if ((s64)imm >=3D 0) { +=09=09=09/* >=3D0: Set the instruction to MOVZ (opcode 10b). */ +=09=09=09insn |=3D 2 << 29; +=09=09} else { +=09=09=09/* +=09=09=09 * <0: Set the instruction to MOVN (opcode 00b). +=09=09=09 * Since we've masked the opcode already, we +=09=09=09 * don't need to do anything other than +=09=09=09 * inverting the new immediate field. +=09=09=09 */ +=09=09=09imm =3D ~imm; +=09=09} +=09case INSN_IMM_MOVK: +=09=09mask =3D BIT(16) - 1; +=09=09shift =3D 5; +=09=09break; +=09case INSN_IMM_ADR: +=09=09lomask =3D 0x3; +=09=09himask =3D 0x7ffff; +=09=09immlo =3D imm & lomask; +=09=09imm >>=3D 2; +=09=09immhi =3D imm & himask; +=09=09imm =3D (immlo << 24) | (immhi); +=09=09mask =3D (lomask << 24) | (himask); +=09=09shift =3D 5; +=09=09break; +=09case INSN_IMM_26: +=09=09mask =3D BIT(26) - 1; +=09=09shift =3D 0; +=09=09break; +=09case INSN_IMM_19: +=09=09mask =3D BIT(19) - 1; +=09=09shift =3D 5; +=09=09break; +=09case INSN_IMM_16: +=09=09mask =3D BIT(16) - 1; +=09=09shift =3D 5; +=09=09break; +=09case INSN_IMM_14: +=09=09mask =3D BIT(14) - 1; +=09=09shift =3D 5; +=09=09break; +=09case INSN_IMM_12: +=09=09mask =3D BIT(12) - 1; +=09=09shift =3D 10; +=09=09break; +=09case INSN_IMM_9: +=09=09mask =3D BIT(9) - 1; +=09=09shift =3D 12; +=09=09break; +=09default: +=09=09pr_err("encode_insn_immediate: unknown immediate encoding %d\n", +=09=09=09type); +=09=09return 0; +=09} + +=09/* Update the immediate field. */ +=09insn &=3D ~(mask << shift); +=09insn |=3D (imm & mask) << shift; + +=09return insn; +} + +static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, +=09=09=09 int lsb, enum aarch64_imm_type imm_type) +{ +=09u64 imm, limit =3D 0; +=09s64 sval; +=09u32 insn =3D *(u32 *)place; + +=09sval =3D do_reloc(op, place, val); +=09sval >>=3D lsb; +=09imm =3D sval & 0xffff; + +=09/* Update the instruction with the new encoding. */ +=09*(u32 *)place =3D encode_insn_immediate(imm_type, insn, imm); + +=09/* Shift out the immediate field. */ +=09sval >>=3D 16; + +=09/* +=09 * For unsigned immediates, the overflow check is straightforward. +=09 * For signed immediates, the sign bit is actually the bit past the +=09 * most significant bit of the field. +=09 * The INSN_IMM_16 immediate type is unsigned. +=09 */ +=09if (imm_type !=3D INSN_IMM_16) { +=09=09sval++; +=09=09limit++; +=09} + +=09/* Check the upper bits depending on the sign of the immediate. */ +=09if ((u64)sval > limit) +=09=09return -ERANGE; + +=09return 0; +} + +static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, +=09=09=09 int lsb, int len, enum aarch64_imm_type imm_type) +{ +=09u64 imm, imm_mask; +=09s64 sval; +=09u32 insn =3D *(u32 *)place; + +=09/* Calculate the relocation value. */ +=09sval =3D do_reloc(op, place, val); +=09sval >>=3D lsb; + +=09/* Extract the value bits and shift them to bit 0. */ +=09imm_mask =3D (BIT(lsb + len) - 1) >> lsb; +=09imm =3D sval & imm_mask; + +=09/* Update the instruction's immediate field. */ +=09*(u32 *)place =3D encode_insn_immediate(imm_type, insn, imm); + +=09/* +=09 * Extract the upper value bits (including the sign bit) and +=09 * shift them to bit 0. +=09 */ +=09sval =3D (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); + +=09/* +=09 * Overflow has occurred if the upper bits are not all equal to +=09 * the sign bit of the value. +=09 */ +=09if ((u64)(sval + 1) >=3D 2) +=09=09return -ERANGE; + +=09return 0; +} + +int apply_relocate_add(Elf64_Shdr *sechdrs, +=09=09 const char *strtab, +=09=09 unsigned int symindex, +=09=09 unsigned int relsec, +=09=09 struct module *me) +{ +=09unsigned int i; +=09int ovf; +=09bool overflow_check; +=09Elf64_Sym *sym; +=09void *loc; +=09u64 val; +=09Elf64_Rela *rel =3D (void *)sechdrs[relsec].sh_addr; + +=09for (i =3D 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { +=09=09/* loc corresponds to P in the AArch64 ELF document. */ +=09=09loc =3D (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr +=09=09=09+ rel[i].r_offset; + +=09=09/* sym is the ELF symbol we're referring to. */ +=09=09sym =3D (Elf64_Sym *)sechdrs[symindex].sh_addr +=09=09=09+ ELF64_R_SYM(rel[i].r_info); + +=09=09/* val corresponds to (S + A) in the AArch64 ELF document. */ +=09=09val =3D sym->st_value + rel[i].r_addend; + +=09=09/* Check for overflow by default. */ +=09=09overflow_check =3D true; + +=09=09/* Perform the static relocation. */ +=09=09switch (ELF64_R_TYPE(rel[i].r_info)) { +=09=09/* Null relocations. */ +=09=09case R_ARM_NONE: +=09=09case R_AARCH64_NONE: +=09=09=09ovf =3D 0; +=09=09=09break; + +=09=09/* Data relocations. */ +=09=09case R_AARCH64_ABS64: +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_data(RELOC_OP_ABS, loc, val, 64); +=09=09=09break; +=09=09case R_AARCH64_ABS32: +=09=09=09ovf =3D reloc_data(RELOC_OP_ABS, loc, val, 32); +=09=09=09break; +=09=09case R_AARCH64_ABS16: +=09=09=09ovf =3D reloc_data(RELOC_OP_ABS, loc, val, 16); +=09=09=09break; +=09=09case R_AARCH64_PREL64: +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_data(RELOC_OP_PREL, loc, val, 64); +=09=09=09break; +=09=09case R_AARCH64_PREL32: +=09=09=09ovf =3D reloc_data(RELOC_OP_PREL, loc, val, 32); +=09=09=09break; +=09=09case R_AARCH64_PREL16: +=09=09=09ovf =3D reloc_data(RELOC_OP_PREL, loc, val, 16); +=09=09=09break; + +=09=09/* MOVW instruction relocations. */ +=09=09case R_AARCH64_MOVW_UABS_G0_NC: +=09=09=09overflow_check =3D false; +=09=09case R_AARCH64_MOVW_UABS_G0: +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, +=09=09=09=09=09 INSN_IMM_16); +=09=09=09break; +=09=09case R_AARCH64_MOVW_UABS_G1_NC: +=09=09=09overflow_check =3D false; +=09=09case R_AARCH64_MOVW_UABS_G1: +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, +=09=09=09=09=09 INSN_IMM_16); +=09=09=09break; +=09=09case R_AARCH64_MOVW_UABS_G2_NC: +=09=09=09overflow_check =3D false; +=09=09case R_AARCH64_MOVW_UABS_G2: +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, +=09=09=09=09=09 INSN_IMM_16); +=09=09=09break; +=09=09case R_AARCH64_MOVW_UABS_G3: +=09=09=09/* We're using the top bits so we can't overflow. */ +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, +=09=09=09=09=09 INSN_IMM_16); +=09=09=09break; +=09=09case R_AARCH64_MOVW_SABS_G0: +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, +=09=09=09=09=09 INSN_IMM_MOVNZ); +=09=09=09break; +=09=09case R_AARCH64_MOVW_SABS_G1: +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, +=09=09=09=09=09 INSN_IMM_MOVNZ); +=09=09=09break; +=09=09case R_AARCH64_MOVW_SABS_G2: +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, +=09=09=09=09=09 INSN_IMM_MOVNZ); +=09=09=09break; +=09=09case R_AARCH64_MOVW_PREL_G0_NC: +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, +=09=09=09=09=09 INSN_IMM_MOVK); +=09=09=09break; +=09=09case R_AARCH64_MOVW_PREL_G0: +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, +=09=09=09=09=09 INSN_IMM_MOVNZ); +=09=09=09break; +=09=09case R_AARCH64_MOVW_PREL_G1_NC: +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, +=09=09=09=09=09 INSN_IMM_MOVK); +=09=09=09break; +=09=09case R_AARCH64_MOVW_PREL_G1: +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, +=09=09=09=09=09 INSN_IMM_MOVNZ); +=09=09=09break; +=09=09case R_AARCH64_MOVW_PREL_G2_NC: +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, +=09=09=09=09=09 INSN_IMM_MOVK); +=09=09=09break; +=09=09case R_AARCH64_MOVW_PREL_G2: +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, +=09=09=09=09=09 INSN_IMM_MOVNZ); +=09=09=09break; +=09=09case R_AARCH64_MOVW_PREL_G3: +=09=09=09/* We're using the top bits so we can't overflow. */ +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, +=09=09=09=09=09 INSN_IMM_MOVNZ); +=09=09=09break; + +=09=09/* Immediate instruction relocations. */ +=09=09case R_AARCH64_LD_PREL_LO19: +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, +=09=09=09=09=09 INSN_IMM_19); +=09=09=09break; +=09=09case R_AARCH64_ADR_PREL_LO21: +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, +=09=09=09=09=09 INSN_IMM_ADR); +=09=09=09break; +=09=09case R_AARCH64_ADR_PREL_PG_HI21_NC: +=09=09=09overflow_check =3D false; +=09=09case R_AARCH64_ADR_PREL_PG_HI21: +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, +=09=09=09=09=09 INSN_IMM_ADR); +=09=09=09break; +=09=09case R_AARCH64_ADD_ABS_LO12_NC: +=09=09case R_AARCH64_LDST8_ABS_LO12_NC: +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, +=09=09=09=09=09 INSN_IMM_12); +=09=09=09break; +=09=09case R_AARCH64_LDST16_ABS_LO12_NC: +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, +=09=09=09=09=09 INSN_IMM_12); +=09=09=09break; +=09=09case R_AARCH64_LDST32_ABS_LO12_NC: +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, +=09=09=09=09=09 INSN_IMM_12); +=09=09=09break; +=09=09case R_AARCH64_LDST64_ABS_LO12_NC: +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, +=09=09=09=09=09 INSN_IMM_12); +=09=09=09break; +=09=09case R_AARCH64_LDST128_ABS_LO12_NC: +=09=09=09overflow_check =3D false; +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, +=09=09=09=09=09 INSN_IMM_12); +=09=09=09break; +=09=09case R_AARCH64_TSTBR14: +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, +=09=09=09=09=09 INSN_IMM_14); +=09=09=09break; +=09=09case R_AARCH64_CONDBR19: +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, +=09=09=09=09=09 INSN_IMM_19); +=09=09=09break; +=09=09case R_AARCH64_JUMP26: +=09=09case R_AARCH64_CALL26: +=09=09=09ovf =3D reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, +=09=09=09=09=09 INSN_IMM_26); +=09=09=09break; + +=09=09default: +=09=09=09pr_err("module %s: unsupported RELA relocation: %llu\n", +=09=09=09 me->name, ELF64_R_TYPE(rel[i].r_info)); +=09=09=09return -ENOEXEC; +=09=09} + +=09=09if (overflow_check && ovf =3D=3D -ERANGE) +=09=09=09goto overflow; + +=09} + +=09return 0; + +overflow: +=09pr_err("module %s: overflow in relocation type %d val %Lx\n", +=09 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); +=09return -ENOEXEC; +}