* [PATCH v3 05/10] s390: Change s390_kernel_write() return type to match memcpy() [not found] <cover.1587812518.git.jpoimboe@redhat.com> @ 2020-04-25 11:07 ` Josh Poimboeuf 2020-04-25 11:07 ` [PATCH v3 06/10] s390/module: Use s390_kernel_write() for late relocations Josh Poimboeuf 1 sibling, 0 replies; 3+ messages in thread From: Josh Poimboeuf @ 2020-04-25 11:07 UTC (permalink / raw) To: live-patching Cc: linux-kernel, Peter Zijlstra, Jessica Yu, Joe Lawrence, linux-s390, heiko.carstens s390_kernel_write()'s function type is almost identical to memcpy(). Change its return type to "void *" so they can be used interchangeably. Cc: linux-s390@vger.kernel.org Cc: heiko.carstens@de.ibm.com Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> --- arch/s390/include/asm/uaccess.h | 2 +- arch/s390/mm/maccess.c | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index a470f1fa9f2a..324438889fe1 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -276,6 +276,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo } int copy_to_user_real(void __user *dest, void *src, unsigned long count); -void s390_kernel_write(void *dst, const void *src, size_t size); +void *s390_kernel_write(void *dst, const void *src, size_t size); #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index de7ca4b6718f..22a0be655f27 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -55,19 +55,22 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz */ static DEFINE_SPINLOCK(s390_kernel_write_lock); -void notrace s390_kernel_write(void *dst, const void *src, size_t size) +notrace void *s390_kernel_write(void *dst, const void *src, size_t size) { + void *tmp = dst; unsigned long flags; long copied; spin_lock_irqsave(&s390_kernel_write_lock, flags); while (size) { - copied = s390_kernel_write_odd(dst, src, size); - dst += copied; + copied = s390_kernel_write_odd(tmp, src, size); + tmp += copied; src += copied; size -= copied; } spin_unlock_irqrestore(&s390_kernel_write_lock, flags); + + return dst; } static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count) -- 2.21.1 ^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH v3 06/10] s390/module: Use s390_kernel_write() for late relocations [not found] <cover.1587812518.git.jpoimboe@redhat.com> 2020-04-25 11:07 ` [PATCH v3 05/10] s390: Change s390_kernel_write() return type to match memcpy() Josh Poimboeuf @ 2020-04-25 11:07 ` Josh Poimboeuf 2020-04-25 11:14 ` Josh Poimboeuf 1 sibling, 1 reply; 3+ messages in thread From: Josh Poimboeuf @ 2020-04-25 11:07 UTC (permalink / raw) To: live-patching Cc: linux-kernel, Peter Zijlstra, Jessica Yu, Joe Lawrence, linux-s390, Gerald Schaefer, Christian Borntraeger From: Peter Zijlstra <peterz@infradead.org> Because of late module patching, a livepatch module needs to be able to apply some of its relocations well after it has been loaded. Instead of playing games with module_{dis,en}able_ro(), use existing text poking mechanisms to apply relocations after module loading. So far only x86, s390 and Power have HAVE_LIVEPATCH but only the first two also have STRICT_MODULE_RWX. This will allow removal of the last module_disable_ro() usage in livepatch. The ultimate goal is to completely disallow making executable mappings writable. [ jpoimboe: Split up patches. Use mod state to determine whether memcpy() can be used. Test and add fixes. ] Cc: linux-s390@vger.kernel.org Cc: Heiko Carstens heiko.carstens@de.ibm.com Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Suggested-by: Josh Poimboeuf <jpoimboe@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- arch/s390/kernel/module.c | 147 +++++++++++++++++++++++--------------- 1 file changed, 88 insertions(+), 59 deletions(-) diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index ba8f19bb438b..4055f1c49814 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -19,6 +19,7 @@ #include <linux/kasan.h> #include <linux/moduleloader.h> #include <linux/bug.h> +#include <linux/memory.h> #include <asm/alternative.h> #include <asm/nospec-branch.h> #include <asm/facility.h> @@ -174,10 +175,12 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, } static int apply_rela_bits(Elf_Addr loc, Elf_Addr val, - int sign, int bits, int shift) + int sign, int bits, int shift, + void *(*write)(void *dest, const void *src, size_t len)) { unsigned long umax; long min, max; + void *dest = (void *)loc; if (val & ((1UL << shift) - 1)) return -ENOEXEC; @@ -194,26 +197,33 @@ static int apply_rela_bits(Elf_Addr loc, Elf_Addr val, return -ENOEXEC; } - if (bits == 8) - *(unsigned char *) loc = val; - else if (bits == 12) - *(unsigned short *) loc = (val & 0xfff) | + if (bits == 8) { + unsigned char tmp = val; + write(dest, &tmp, 1); + } else if (bits == 12) { + unsigned short tmp = (val & 0xfff) | (*(unsigned short *) loc & 0xf000); - else if (bits == 16) - *(unsigned short *) loc = val; - else if (bits == 20) - *(unsigned int *) loc = (val & 0xfff) << 16 | - (val & 0xff000) >> 4 | - (*(unsigned int *) loc & 0xf00000ff); - else if (bits == 32) - *(unsigned int *) loc = val; - else if (bits == 64) - *(unsigned long *) loc = val; + write(dest, &tmp, 2); + } else if (bits == 16) { + unsigned short tmp = val; + write(dest, &tmp, 2); + } else if (bits == 20) { + unsigned int tmp = (val & 0xfff) << 16 | + (val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff); + write(dest, &tmp, 4); + } else if (bits == 32) { + unsigned int tmp = val; + write(dest, &tmp, 4); + } else if (bits == 64) { + unsigned long tmp = val; + write(dest, &tmp, 8); + } return 0; } static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - const char *strtab, struct module *me) + const char *strtab, struct module *me, + void *(*write)(void *dest, const void *src, size_t len)) { struct mod_arch_syminfo *info; Elf_Addr loc, val; @@ -241,17 +251,17 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_64: /* Direct 64 bit. */ val += rela->r_addend; if (r_type == R_390_8) - rc = apply_rela_bits(loc, val, 0, 8, 0); + rc = apply_rela_bits(loc, val, 0, 8, 0, write); else if (r_type == R_390_12) - rc = apply_rela_bits(loc, val, 0, 12, 0); + rc = apply_rela_bits(loc, val, 0, 12, 0, write); else if (r_type == R_390_16) - rc = apply_rela_bits(loc, val, 0, 16, 0); + rc = apply_rela_bits(loc, val, 0, 16, 0, write); else if (r_type == R_390_20) - rc = apply_rela_bits(loc, val, 1, 20, 0); + rc = apply_rela_bits(loc, val, 1, 20, 0, write); else if (r_type == R_390_32) - rc = apply_rela_bits(loc, val, 0, 32, 0); + rc = apply_rela_bits(loc, val, 0, 32, 0, write); else if (r_type == R_390_64) - rc = apply_rela_bits(loc, val, 0, 64, 0); + rc = apply_rela_bits(loc, val, 0, 64, 0, write); break; case R_390_PC16: /* PC relative 16 bit. */ case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ @@ -260,15 +270,15 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_PC64: /* PC relative 64 bit. */ val += rela->r_addend - loc; if (r_type == R_390_PC16) - rc = apply_rela_bits(loc, val, 1, 16, 0); + rc = apply_rela_bits(loc, val, 1, 16, 0, write); else if (r_type == R_390_PC16DBL) - rc = apply_rela_bits(loc, val, 1, 16, 1); + rc = apply_rela_bits(loc, val, 1, 16, 1, write); else if (r_type == R_390_PC32DBL) - rc = apply_rela_bits(loc, val, 1, 32, 1); + rc = apply_rela_bits(loc, val, 1, 32, 1, write); else if (r_type == R_390_PC32) - rc = apply_rela_bits(loc, val, 1, 32, 0); + rc = apply_rela_bits(loc, val, 1, 32, 0, write); else if (r_type == R_390_PC64) - rc = apply_rela_bits(loc, val, 1, 64, 0); + rc = apply_rela_bits(loc, val, 1, 64, 0, write); break; case R_390_GOT12: /* 12 bit GOT offset. */ case R_390_GOT16: /* 16 bit GOT offset. */ @@ -283,33 +293,33 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_GOTPLT64: /* 64 bit offset to jump slot. */ case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ if (info->got_initialized == 0) { - Elf_Addr *gotent; + Elf_Addr *gotent = me->core_layout.base + + me->arch.got_offset + + info->got_offset; - gotent = me->core_layout.base + me->arch.got_offset + - info->got_offset; - *gotent = val; + write(gotent, &val, sizeof(*gotent)); info->got_initialized = 1; } val = info->got_offset + rela->r_addend; if (r_type == R_390_GOT12 || r_type == R_390_GOTPLT12) - rc = apply_rela_bits(loc, val, 0, 12, 0); + rc = apply_rela_bits(loc, val, 0, 12, 0, write); else if (r_type == R_390_GOT16 || r_type == R_390_GOTPLT16) - rc = apply_rela_bits(loc, val, 0, 16, 0); + rc = apply_rela_bits(loc, val, 0, 16, 0, write); else if (r_type == R_390_GOT20 || r_type == R_390_GOTPLT20) - rc = apply_rela_bits(loc, val, 1, 20, 0); + rc = apply_rela_bits(loc, val, 1, 20, 0, write); else if (r_type == R_390_GOT32 || r_type == R_390_GOTPLT32) - rc = apply_rela_bits(loc, val, 0, 32, 0); + rc = apply_rela_bits(loc, val, 0, 32, 0, write); else if (r_type == R_390_GOT64 || r_type == R_390_GOTPLT64) - rc = apply_rela_bits(loc, val, 0, 64, 0); + rc = apply_rela_bits(loc, val, 0, 64, 0, write); else if (r_type == R_390_GOTENT || r_type == R_390_GOTPLTENT) { val += (Elf_Addr) me->core_layout.base - loc; - rc = apply_rela_bits(loc, val, 1, 32, 1); + rc = apply_rela_bits(loc, val, 1, 32, 1, write); } break; case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ @@ -320,25 +330,29 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_initialized == 0) { - unsigned int *ip; - ip = me->core_layout.base + me->arch.plt_offset + - info->plt_offset; - ip[0] = 0x0d10e310; /* basr 1,0 */ - ip[1] = 0x100a0004; /* lg 1,10(1) */ + unsigned int insn[5]; + unsigned int *ip = me->core_layout.base + + me->arch.plt_offset + + info->plt_offset; + + insn[0] = 0x0d10e310; /* basr 1,0 */ + insn[1] = 0x100a0004; /* lg 1,10(1) */ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) { unsigned int *ij; ij = me->core_layout.base + me->arch.plt_offset + me->arch.plt_size - PLT_ENTRY_SIZE; - ip[2] = 0xa7f40000 + /* j __jump_r1 */ + insn[2] = 0xa7f40000 + /* j __jump_r1 */ (unsigned int)(u16) (((unsigned long) ij - 8 - (unsigned long) ip) / 2); } else { - ip[2] = 0x07f10000; /* br %r1 */ + insn[2] = 0x07f10000; /* br %r1 */ } - ip[3] = (unsigned int) (val >> 32); - ip[4] = (unsigned int) val; + insn[3] = (unsigned int) (val >> 32); + insn[4] = (unsigned int) val; + + write(ip, insn, sizeof(insn)); info->plt_initialized = 1; } if (r_type == R_390_PLTOFF16 || @@ -357,17 +371,17 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val += rela->r_addend - loc; } if (r_type == R_390_PLT16DBL) - rc = apply_rela_bits(loc, val, 1, 16, 1); + rc = apply_rela_bits(loc, val, 1, 16, 1, write); else if (r_type == R_390_PLTOFF16) - rc = apply_rela_bits(loc, val, 0, 16, 0); + rc = apply_rela_bits(loc, val, 0, 16, 0, write); else if (r_type == R_390_PLT32DBL) - rc = apply_rela_bits(loc, val, 1, 32, 1); + rc = apply_rela_bits(loc, val, 1, 32, 1, write); else if (r_type == R_390_PLT32 || r_type == R_390_PLTOFF32) - rc = apply_rela_bits(loc, val, 0, 32, 0); + rc = apply_rela_bits(loc, val, 0, 32, 0, write); else if (r_type == R_390_PLT64 || r_type == R_390_PLTOFF64) - rc = apply_rela_bits(loc, val, 0, 64, 0); + rc = apply_rela_bits(loc, val, 0, 64, 0, write); break; case R_390_GOTOFF16: /* 16 bit offset to GOT. */ case R_390_GOTOFF32: /* 32 bit offset to GOT. */ @@ -375,20 +389,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val = val + rela->r_addend - ((Elf_Addr) me->core_layout.base + me->arch.got_offset); if (r_type == R_390_GOTOFF16) - rc = apply_rela_bits(loc, val, 0, 16, 0); + rc = apply_rela_bits(loc, val, 0, 16, 0, write); else if (r_type == R_390_GOTOFF32) - rc = apply_rela_bits(loc, val, 0, 32, 0); + rc = apply_rela_bits(loc, val, 0, 32, 0, write); else if (r_type == R_390_GOTOFF64) - rc = apply_rela_bits(loc, val, 0, 64, 0); + rc = apply_rela_bits(loc, val, 0, 64, 0, write); break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ val = (Elf_Addr) me->core_layout.base + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) - rc = apply_rela_bits(loc, val, 1, 32, 0); + rc = apply_rela_bits(loc, val, 1, 32, 0, write); else if (r_type == R_390_GOTPCDBL) - rc = apply_rela_bits(loc, val, 1, 32, 1); + rc = apply_rela_bits(loc, val, 1, 32, 1, write); break; case R_390_COPY: case R_390_GLOB_DAT: /* Create GOT entry. */ @@ -412,9 +426,10 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, return 0; } -int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, +static int __apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, - struct module *me) + struct module *me, + void *(*write)(void *dest, const void *src, size_t len)) { Elf_Addr base; Elf_Sym *symtab; @@ -430,13 +445,27 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); for (i = 0; i < n; i++, rela++) { - rc = apply_rela(rela, base, symtab, strtab, me); + rc = apply_rela(rela, base, symtab, strtab, me, write); if (rc) return rc; } return 0; } +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + bool early = me->state == MODULE_STATE_UNFORMED; + void *(*write)(void *, const void *, size_t) = memcpy; + + if (!early) + write = s390_kernel_write; + + return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, + write); +} + int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) -- 2.21.1 ^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH v3 06/10] s390/module: Use s390_kernel_write() for late relocations 2020-04-25 11:07 ` [PATCH v3 06/10] s390/module: Use s390_kernel_write() for late relocations Josh Poimboeuf @ 2020-04-25 11:14 ` Josh Poimboeuf 0 siblings, 0 replies; 3+ messages in thread From: Josh Poimboeuf @ 2020-04-25 11:14 UTC (permalink / raw) To: live-patching Cc: linux-kernel, Peter Zijlstra, Jessica Yu, Joe Lawrence, linux-s390, Gerald Schaefer, Christian Borntraeger, Heiko Carstens On Sat, Apr 25, 2020 at 06:07:26AM -0500, Josh Poimboeuf wrote: > From: Peter Zijlstra <peterz@infradead.org> > > Because of late module patching, a livepatch module needs to be able to > apply some of its relocations well after it has been loaded. Instead of > playing games with module_{dis,en}able_ro(), use existing text poking > mechanisms to apply relocations after module loading. > > So far only x86, s390 and Power have HAVE_LIVEPATCH but only the first > two also have STRICT_MODULE_RWX. > > This will allow removal of the last module_disable_ro() usage in > livepatch. The ultimate goal is to completely disallow making > executable mappings writable. > > [ jpoimboe: Split up patches. Use mod state to determine whether > memcpy() can be used. Test and add fixes. ] > > Cc: linux-s390@vger.kernel.org > Cc: Heiko Carstens heiko.carstens@de.ibm.com Gah, I somehow messed up the formatting on this Cc, should be Cc: Heiko Carstens <heiko.carstens@de.ibm.com> > Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> > Cc: Christian Borntraeger <borntraeger@de.ibm.com> > Suggested-by: Josh Poimboeuf <jpoimboe@redhat.com> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> > Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> > --- > arch/s390/kernel/module.c | 147 +++++++++++++++++++++++--------------- > 1 file changed, 88 insertions(+), 59 deletions(-) > > diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c > index ba8f19bb438b..4055f1c49814 100644 > --- a/arch/s390/kernel/module.c > +++ b/arch/s390/kernel/module.c > @@ -19,6 +19,7 @@ > #include <linux/kasan.h> > #include <linux/moduleloader.h> > #include <linux/bug.h> > +#include <linux/memory.h> > #include <asm/alternative.h> > #include <asm/nospec-branch.h> > #include <asm/facility.h> > @@ -174,10 +175,12 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, > } > > static int apply_rela_bits(Elf_Addr loc, Elf_Addr val, > - int sign, int bits, int shift) > + int sign, int bits, int shift, > + void *(*write)(void *dest, const void *src, size_t len)) > { > unsigned long umax; > long min, max; > + void *dest = (void *)loc; > > if (val & ((1UL << shift) - 1)) > return -ENOEXEC; > @@ -194,26 +197,33 @@ static int apply_rela_bits(Elf_Addr loc, Elf_Addr val, > return -ENOEXEC; > } > > - if (bits == 8) > - *(unsigned char *) loc = val; > - else if (bits == 12) > - *(unsigned short *) loc = (val & 0xfff) | > + if (bits == 8) { > + unsigned char tmp = val; > + write(dest, &tmp, 1); > + } else if (bits == 12) { > + unsigned short tmp = (val & 0xfff) | > (*(unsigned short *) loc & 0xf000); > - else if (bits == 16) > - *(unsigned short *) loc = val; > - else if (bits == 20) > - *(unsigned int *) loc = (val & 0xfff) << 16 | > - (val & 0xff000) >> 4 | > - (*(unsigned int *) loc & 0xf00000ff); > - else if (bits == 32) > - *(unsigned int *) loc = val; > - else if (bits == 64) > - *(unsigned long *) loc = val; > + write(dest, &tmp, 2); > + } else if (bits == 16) { > + unsigned short tmp = val; > + write(dest, &tmp, 2); > + } else if (bits == 20) { > + unsigned int tmp = (val & 0xfff) << 16 | > + (val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff); > + write(dest, &tmp, 4); > + } else if (bits == 32) { > + unsigned int tmp = val; > + write(dest, &tmp, 4); > + } else if (bits == 64) { > + unsigned long tmp = val; > + write(dest, &tmp, 8); > + } > return 0; > } > > static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, > - const char *strtab, struct module *me) > + const char *strtab, struct module *me, > + void *(*write)(void *dest, const void *src, size_t len)) > { > struct mod_arch_syminfo *info; > Elf_Addr loc, val; > @@ -241,17 +251,17 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, > case R_390_64: /* Direct 64 bit. */ > val += rela->r_addend; > if (r_type == R_390_8) > - rc = apply_rela_bits(loc, val, 0, 8, 0); > + rc = apply_rela_bits(loc, val, 0, 8, 0, write); > else if (r_type == R_390_12) > - rc = apply_rela_bits(loc, val, 0, 12, 0); > + rc = apply_rela_bits(loc, val, 0, 12, 0, write); > else if (r_type == R_390_16) > - rc = apply_rela_bits(loc, val, 0, 16, 0); > + rc = apply_rela_bits(loc, val, 0, 16, 0, write); > else if (r_type == R_390_20) > - rc = apply_rela_bits(loc, val, 1, 20, 0); > + rc = apply_rela_bits(loc, val, 1, 20, 0, write); > else if (r_type == R_390_32) > - rc = apply_rela_bits(loc, val, 0, 32, 0); > + rc = apply_rela_bits(loc, val, 0, 32, 0, write); > else if (r_type == R_390_64) > - rc = apply_rela_bits(loc, val, 0, 64, 0); > + rc = apply_rela_bits(loc, val, 0, 64, 0, write); > break; > case R_390_PC16: /* PC relative 16 bit. */ > case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ > @@ -260,15 +270,15 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, > case R_390_PC64: /* PC relative 64 bit. */ > val += rela->r_addend - loc; > if (r_type == R_390_PC16) > - rc = apply_rela_bits(loc, val, 1, 16, 0); > + rc = apply_rela_bits(loc, val, 1, 16, 0, write); > else if (r_type == R_390_PC16DBL) > - rc = apply_rela_bits(loc, val, 1, 16, 1); > + rc = apply_rela_bits(loc, val, 1, 16, 1, write); > else if (r_type == R_390_PC32DBL) > - rc = apply_rela_bits(loc, val, 1, 32, 1); > + rc = apply_rela_bits(loc, val, 1, 32, 1, write); > else if (r_type == R_390_PC32) > - rc = apply_rela_bits(loc, val, 1, 32, 0); > + rc = apply_rela_bits(loc, val, 1, 32, 0, write); > else if (r_type == R_390_PC64) > - rc = apply_rela_bits(loc, val, 1, 64, 0); > + rc = apply_rela_bits(loc, val, 1, 64, 0, write); > break; > case R_390_GOT12: /* 12 bit GOT offset. */ > case R_390_GOT16: /* 16 bit GOT offset. */ > @@ -283,33 +293,33 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, > case R_390_GOTPLT64: /* 64 bit offset to jump slot. */ > case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */ > if (info->got_initialized == 0) { > - Elf_Addr *gotent; > + Elf_Addr *gotent = me->core_layout.base + > + me->arch.got_offset + > + info->got_offset; > > - gotent = me->core_layout.base + me->arch.got_offset + > - info->got_offset; > - *gotent = val; > + write(gotent, &val, sizeof(*gotent)); > info->got_initialized = 1; > } > val = info->got_offset + rela->r_addend; > if (r_type == R_390_GOT12 || > r_type == R_390_GOTPLT12) > - rc = apply_rela_bits(loc, val, 0, 12, 0); > + rc = apply_rela_bits(loc, val, 0, 12, 0, write); > else if (r_type == R_390_GOT16 || > r_type == R_390_GOTPLT16) > - rc = apply_rela_bits(loc, val, 0, 16, 0); > + rc = apply_rela_bits(loc, val, 0, 16, 0, write); > else if (r_type == R_390_GOT20 || > r_type == R_390_GOTPLT20) > - rc = apply_rela_bits(loc, val, 1, 20, 0); > + rc = apply_rela_bits(loc, val, 1, 20, 0, write); > else if (r_type == R_390_GOT32 || > r_type == R_390_GOTPLT32) > - rc = apply_rela_bits(loc, val, 0, 32, 0); > + rc = apply_rela_bits(loc, val, 0, 32, 0, write); > else if (r_type == R_390_GOT64 || > r_type == R_390_GOTPLT64) > - rc = apply_rela_bits(loc, val, 0, 64, 0); > + rc = apply_rela_bits(loc, val, 0, 64, 0, write); > else if (r_type == R_390_GOTENT || > r_type == R_390_GOTPLTENT) { > val += (Elf_Addr) me->core_layout.base - loc; > - rc = apply_rela_bits(loc, val, 1, 32, 1); > + rc = apply_rela_bits(loc, val, 1, 32, 1, write); > } > break; > case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */ > @@ -320,25 +330,29 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, > case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ > case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ > if (info->plt_initialized == 0) { > - unsigned int *ip; > - ip = me->core_layout.base + me->arch.plt_offset + > - info->plt_offset; > - ip[0] = 0x0d10e310; /* basr 1,0 */ > - ip[1] = 0x100a0004; /* lg 1,10(1) */ > + unsigned int insn[5]; > + unsigned int *ip = me->core_layout.base + > + me->arch.plt_offset + > + info->plt_offset; > + > + insn[0] = 0x0d10e310; /* basr 1,0 */ > + insn[1] = 0x100a0004; /* lg 1,10(1) */ > if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) { > unsigned int *ij; > ij = me->core_layout.base + > me->arch.plt_offset + > me->arch.plt_size - PLT_ENTRY_SIZE; > - ip[2] = 0xa7f40000 + /* j __jump_r1 */ > + insn[2] = 0xa7f40000 + /* j __jump_r1 */ > (unsigned int)(u16) > (((unsigned long) ij - 8 - > (unsigned long) ip) / 2); > } else { > - ip[2] = 0x07f10000; /* br %r1 */ > + insn[2] = 0x07f10000; /* br %r1 */ > } > - ip[3] = (unsigned int) (val >> 32); > - ip[4] = (unsigned int) val; > + insn[3] = (unsigned int) (val >> 32); > + insn[4] = (unsigned int) val; > + > + write(ip, insn, sizeof(insn)); > info->plt_initialized = 1; > } > if (r_type == R_390_PLTOFF16 || > @@ -357,17 +371,17 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, > val += rela->r_addend - loc; > } > if (r_type == R_390_PLT16DBL) > - rc = apply_rela_bits(loc, val, 1, 16, 1); > + rc = apply_rela_bits(loc, val, 1, 16, 1, write); > else if (r_type == R_390_PLTOFF16) > - rc = apply_rela_bits(loc, val, 0, 16, 0); > + rc = apply_rela_bits(loc, val, 0, 16, 0, write); > else if (r_type == R_390_PLT32DBL) > - rc = apply_rela_bits(loc, val, 1, 32, 1); > + rc = apply_rela_bits(loc, val, 1, 32, 1, write); > else if (r_type == R_390_PLT32 || > r_type == R_390_PLTOFF32) > - rc = apply_rela_bits(loc, val, 0, 32, 0); > + rc = apply_rela_bits(loc, val, 0, 32, 0, write); > else if (r_type == R_390_PLT64 || > r_type == R_390_PLTOFF64) > - rc = apply_rela_bits(loc, val, 0, 64, 0); > + rc = apply_rela_bits(loc, val, 0, 64, 0, write); > break; > case R_390_GOTOFF16: /* 16 bit offset to GOT. */ > case R_390_GOTOFF32: /* 32 bit offset to GOT. */ > @@ -375,20 +389,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, > val = val + rela->r_addend - > ((Elf_Addr) me->core_layout.base + me->arch.got_offset); > if (r_type == R_390_GOTOFF16) > - rc = apply_rela_bits(loc, val, 0, 16, 0); > + rc = apply_rela_bits(loc, val, 0, 16, 0, write); > else if (r_type == R_390_GOTOFF32) > - rc = apply_rela_bits(loc, val, 0, 32, 0); > + rc = apply_rela_bits(loc, val, 0, 32, 0, write); > else if (r_type == R_390_GOTOFF64) > - rc = apply_rela_bits(loc, val, 0, 64, 0); > + rc = apply_rela_bits(loc, val, 0, 64, 0, write); > break; > case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ > case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ > val = (Elf_Addr) me->core_layout.base + me->arch.got_offset + > rela->r_addend - loc; > if (r_type == R_390_GOTPC) > - rc = apply_rela_bits(loc, val, 1, 32, 0); > + rc = apply_rela_bits(loc, val, 1, 32, 0, write); > else if (r_type == R_390_GOTPCDBL) > - rc = apply_rela_bits(loc, val, 1, 32, 1); > + rc = apply_rela_bits(loc, val, 1, 32, 1, write); > break; > case R_390_COPY: > case R_390_GLOB_DAT: /* Create GOT entry. */ > @@ -412,9 +426,10 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, > return 0; > } > > -int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, > +static int __apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, > unsigned int symindex, unsigned int relsec, > - struct module *me) > + struct module *me, > + void *(*write)(void *dest, const void *src, size_t len)) > { > Elf_Addr base; > Elf_Sym *symtab; > @@ -430,13 +445,27 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, > n = sechdrs[relsec].sh_size / sizeof(Elf_Rela); > > for (i = 0; i < n; i++, rela++) { > - rc = apply_rela(rela, base, symtab, strtab, me); > + rc = apply_rela(rela, base, symtab, strtab, me, write); > if (rc) > return rc; > } > return 0; > } > > +int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, > + unsigned int symindex, unsigned int relsec, > + struct module *me) > +{ > + bool early = me->state == MODULE_STATE_UNFORMED; > + void *(*write)(void *, const void *, size_t) = memcpy; > + > + if (!early) > + write = s390_kernel_write; > + > + return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me, > + write); > +} > + > int module_finalize(const Elf_Ehdr *hdr, > const Elf_Shdr *sechdrs, > struct module *me) > -- > 2.21.1 > -- Josh ^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-04-25 11:15 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <cover.1587812518.git.jpoimboe@redhat.com>
2020-04-25 11:07 ` [PATCH v3 05/10] s390: Change s390_kernel_write() return type to match memcpy() Josh Poimboeuf
2020-04-25 11:07 ` [PATCH v3 06/10] s390/module: Use s390_kernel_write() for late relocations Josh Poimboeuf
2020-04-25 11:14 ` Josh Poimboeuf
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox