linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] ARM: kernel: module PLT optimizations
@ 2016-08-16 15:51 Ard Biesheuvel
  2016-08-16 15:51 ` [PATCH 1/2] ARM: kernel: merge core and init PLTs Ard Biesheuvel
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Ard Biesheuvel @ 2016-08-16 15:51 UTC (permalink / raw)
  To: linux-arm-kernel

As reported by Jongsung, the O(n^2) search in the PLT allocation code may
disproportionately affect module load time for modules with a larger number
of relocations.

Since the existing routines rather naively take branch instructions into
account that are internal to the module, we can improve the situation
significantly by checking the symbol section index first, and disregarding
symbols that are defined in the same module.

Patch #1 merge the core and init PLTs, since the latter is virtually empty
anyway.

Patch #2 implements the optimization to only take SHN_UNDEF symbols into
account.

Ard Biesheuvel (2):
  ARM: kernel: merge core and init PLTs
  ARM: kernel: allocate PLT entries only for external symbols

 arch/arm/include/asm/module.h |   6 +-
 arch/arm/kernel/module-plts.c | 100 ++++++++++----------
 arch/arm/kernel/module.lds    |   3 +-
 3 files changed, 53 insertions(+), 56 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/2] ARM: kernel: merge core and init PLTs
  2016-08-16 15:51 [PATCH 0/2] ARM: kernel: module PLT optimizations Ard Biesheuvel
@ 2016-08-16 15:51 ` Ard Biesheuvel
  2016-08-16 15:51 ` [PATCH 2/2] ARM: kernel: allocate PLT entries only for external symbols Ard Biesheuvel
  2016-08-17 11:06 ` [PATCH 0/2] ARM: kernel: module PLT optimizations Dave Martin
  2 siblings, 0 replies; 5+ messages in thread
From: Ard Biesheuvel @ 2016-08-16 15:51 UTC (permalink / raw)
  To: linux-arm-kernel

The PLT code uses a separate .init.plt section to allocate PLT entries
for jump and call instructions in __init code. However, even for fairly
sizable modules like mac80211.ko, we only end up with a couple of PLT
entries in the .init section, and so we can simplify the code
significantly by emitting all PLT entries into the same section.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
 arch/arm/include/asm/module.h |  6 +-
 arch/arm/kernel/module-plts.c | 68 +++++++-------------
 arch/arm/kernel/module.lds    |  3 +-
 3 files changed, 25 insertions(+), 52 deletions(-)

diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index e358b7966c06..464748b9fd7d 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -23,10 +23,8 @@ struct mod_arch_specific {
 	struct unwind_table *unwind[ARM_SEC_MAX];
 #endif
 #ifdef CONFIG_ARM_MODULE_PLTS
-	struct elf32_shdr   *core_plt;
-	struct elf32_shdr   *init_plt;
-	int		    core_plt_count;
-	int		    init_plt_count;
+	struct elf32_shdr   *plt;
+	int		    plt_count;
 #endif
 };
 
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 0c7efc3446c0..6832d1d6444e 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -30,28 +30,16 @@ struct plt_entries {
 	u32	lit[PLT_ENT_COUNT];
 };
 
-static bool in_init(const struct module *mod, u32 addr)
-{
-	return addr - (u32)mod->init_layout.base < mod->init_layout.size;
-}
-
 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 {
 	struct plt_entries *plt, *plt_end;
-	int c, *count;
-
-	if (in_init(mod, loc)) {
-		plt = (void *)mod->arch.init_plt->sh_addr;
-		plt_end = (void *)plt + mod->arch.init_plt->sh_size;
-		count = &mod->arch.init_plt_count;
-	} else {
-		plt = (void *)mod->arch.core_plt->sh_addr;
-		plt_end = (void *)plt + mod->arch.core_plt->sh_size;
-		count = &mod->arch.core_plt_count;
-	}
+	int c;
+
+	plt = (void *)mod->arch.plt->sh_addr;
+	plt_end = (void *)plt + mod->arch.plt->sh_size;
 
 	/* Look for an existing entry pointing to 'val' */
-	for (c = *count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) {
+	for (c = mod->arch.plt_count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) {
 		int i;
 
 		if (!c) {
@@ -60,13 +48,13 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 				{ [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
 				{ val, }
 			};
-			++*count;
+			mod->arch.plt_count++;
 			return (u32)plt->ldr;
 		}
 		for (i = 0; i < PLT_ENT_COUNT; i++) {
 			if (!plt->lit[i]) {
 				plt->lit[i] = val;
-				++*count;
+				mod->arch.plt_count++;
 			}
 			if (plt->lit[i] == val)
 				return (u32)&plt->ldr[i];
@@ -132,21 +120,19 @@ static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 			      char *secstrings, struct module *mod)
 {
-	unsigned long core_plts = 0, init_plts = 0;
+	unsigned long plts = 0;
 	Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
 
 	/*
 	 * To store the PLTs, we expand the .text section for core module code
-	 * and the .init.text section for initialization code.
+	 * and for initialization code.
 	 */
 	for (s = sechdrs; s < sechdrs_end; ++s)
-		if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
-			mod->arch.core_plt = s;
-		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
-			mod->arch.init_plt = s;
+		if (strcmp(".plt", secstrings + s->sh_name) == 0)
+			mod->arch.plt = s;
 
-	if (!mod->arch.core_plt || !mod->arch.init_plt) {
-		pr_err("%s: sections missing\n", mod->name);
+	if (!mod->arch.plt) {
+		pr_err("%s: module PLT section missing\n", mod->name);
 		return -ENOEXEC;
 	}
 
@@ -158,26 +144,16 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 		if (s->sh_type != SHT_REL)
 			continue;
 
-		if (strstr(secstrings + s->sh_name, ".init"))
-			init_plts += count_plts(dstsec->sh_addr, rels, numrels);
-		else
-			core_plts += count_plts(dstsec->sh_addr, rels, numrels);
+		plts += count_plts(dstsec->sh_addr, rels, numrels);
 	}
 
-	mod->arch.core_plt->sh_type = SHT_NOBITS;
-	mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
-	mod->arch.core_plt->sh_addralign = L1_CACHE_BYTES;
-	mod->arch.core_plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
-					       sizeof(struct plt_entries));
-	mod->arch.core_plt_count = 0;
-
-	mod->arch.init_plt->sh_type = SHT_NOBITS;
-	mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
-	mod->arch.init_plt->sh_addralign = L1_CACHE_BYTES;
-	mod->arch.init_plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
-					       sizeof(struct plt_entries));
-	mod->arch.init_plt_count = 0;
-	pr_debug("%s: core.plt=%x, init.plt=%x\n", __func__,
-		 mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size);
+	mod->arch.plt->sh_type = SHT_NOBITS;
+	mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+	mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
+					  sizeof(struct plt_entries));
+	mod->arch.plt_count = 0;
+
+	pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
 	return 0;
 }
diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds
index 3682fa107918..05881e2b414c 100644
--- a/arch/arm/kernel/module.lds
+++ b/arch/arm/kernel/module.lds
@@ -1,4 +1,3 @@
 SECTIONS {
-        .core.plt : { BYTE(0) }
-        .init.plt : { BYTE(0) }
+	.plt : { BYTE(0) }
 }
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/2] ARM: kernel: allocate PLT entries only for external symbols
  2016-08-16 15:51 [PATCH 0/2] ARM: kernel: module PLT optimizations Ard Biesheuvel
  2016-08-16 15:51 ` [PATCH 1/2] ARM: kernel: merge core and init PLTs Ard Biesheuvel
@ 2016-08-16 15:51 ` Ard Biesheuvel
  2016-08-17 11:06 ` [PATCH 0/2] ARM: kernel: module PLT optimizations Dave Martin
  2 siblings, 0 replies; 5+ messages in thread
From: Ard Biesheuvel @ 2016-08-16 15:51 UTC (permalink / raw)
  To: linux-arm-kernel

When CONFIG_ARM_MODULE_PLTS is enabled, jump and call instructions in
modules no longer need to be within 16 MB (8 MB for Thumb2) of their
targets. If they are further away, a PLT entry will be generated on the
fly for each of them, which extends the range to the entire 32-bit
address space.

However, since these PLT entries will become the branch targets of the
original jump and call instructions, the PLT itself needs to be in
range, or we end up in the same situation we started in. Since the PLT
is in a separate section, this essentially means that all jumps and calls
inside the same module must be resolvable without PLT entries.

The PLT allocation code executes before the module itself is loaded in
its final location, and so it has to use a worst-case estimate for
which jumps and calls will require an entry in the PLT at relocation
time. As an optimization, this code deduplicates entries pointing to
the same symbol, using a O(n^2) algorithm. However, it does not take
the above into account, i.e., that PLT entries will only be needed for
jump and call relocations against symbols that are not defined in the
module.

So disregard relocations against symbols that are defined in the module
itself.

As an additional minor optimization, ignore input sections that lack
the SHF_EXECINSTR flag. Since jump and call relocations operate on
executable instructions only, there is no need to look in sections that
do not contain executable code.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
 arch/arm/kernel/module-plts.c | 32 +++++++++++++++++---
 1 file changed, 28 insertions(+), 4 deletions(-)

diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 6832d1d6444e..59d2fcaff7d2 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -88,7 +88,8 @@ static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num,
 }
 
 /* Count how many PLT entries we may need */
-static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
+static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
+			       const Elf32_Rel *rel, int num)
 {
 	unsigned int ret = 0;
 	int i;
@@ -97,7 +98,17 @@ static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
 	 * Sure, this is order(n^2), but it's usually short, and not
 	 * time critical
 	 */
-	for (i = 0; i < num; i++)
+	for (i = 0; i < num; i++) {
+		/*
+		 * We only have to consider branch targets that resolve
+		 * to undefined symbols. This is not simply a heuristic,
+		 * it is a fundamental limitation, since the PLT itself
+		 * is part of the module, and needs to be within range
+		 * as well, so modules can never grow beyond that limit.
+		 */
+		if (syms[ELF32_R_SYM(rel[i].r_info)].st_shndx != SHN_UNDEF)
+			continue;
+
 		switch (ELF32_R_TYPE(rel[i].r_info)) {
 		case R_ARM_CALL:
 		case R_ARM_PC24:
@@ -114,6 +125,7 @@ static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
 				ret++;
 #endif
 		}
+	}
 	return ret;
 }
 
@@ -122,19 +134,27 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 {
 	unsigned long plts = 0;
 	Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
+	Elf32_Sym *syms = NULL;
 
 	/*
 	 * To store the PLTs, we expand the .text section for core module code
 	 * and for initialization code.
 	 */
-	for (s = sechdrs; s < sechdrs_end; ++s)
+	for (s = sechdrs; s < sechdrs_end; ++s) {
 		if (strcmp(".plt", secstrings + s->sh_name) == 0)
 			mod->arch.plt = s;
+		else if (s->sh_type == SHT_SYMTAB)
+			syms = (Elf32_Sym *)s->sh_addr;
+	}
 
 	if (!mod->arch.plt) {
 		pr_err("%s: module PLT section missing\n", mod->name);
 		return -ENOEXEC;
 	}
+	if (!syms) {
+		pr_err("%s: module symtab section missing\n", mod->name);
+		return -ENOEXEC;
+	}
 
 	for (s = sechdrs + 1; s < sechdrs_end; ++s) {
 		const Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
@@ -144,7 +164,11 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 		if (s->sh_type != SHT_REL)
 			continue;
 
-		plts += count_plts(dstsec->sh_addr, rels, numrels);
+		/* ignore relocations that operate on non-exec sections */
+		if (!(dstsec->sh_flags & SHF_EXECINSTR))
+			continue;
+
+		plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
 	}
 
 	mod->arch.plt->sh_type = SHT_NOBITS;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 0/2] ARM: kernel: module PLT optimizations
  2016-08-16 15:51 [PATCH 0/2] ARM: kernel: module PLT optimizations Ard Biesheuvel
  2016-08-16 15:51 ` [PATCH 1/2] ARM: kernel: merge core and init PLTs Ard Biesheuvel
  2016-08-16 15:51 ` [PATCH 2/2] ARM: kernel: allocate PLT entries only for external symbols Ard Biesheuvel
@ 2016-08-17 11:06 ` Dave Martin
  2016-08-17 11:10   ` Ard Biesheuvel
  2 siblings, 1 reply; 5+ messages in thread
From: Dave Martin @ 2016-08-17 11:06 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Aug 16, 2016 at 05:51:44PM +0200, Ard Biesheuvel wrote:
> As reported by Jongsung, the O(n^2) search in the PLT allocation code may
> disproportionately affect module load time for modules with a larger number
> of relocations.
> 
> Since the existing routines rather naively take branch instructions into
> account that are internal to the module, we can improve the situation
> significantly by checking the symbol section index first, and disregarding
> symbols that are defined in the same module.
> 
> Patch #1 merge the core and init PLTs, since the latter is virtually empty
> anyway.
> 
> Patch #2 implements the optimization to only take SHN_UNDEF symbols into
> account.

Could we not sort the relocs on target address at the modpost stage?

The order of reloc entries in ELF is not significant, so we could sort
them offline before the module loader sees them.

The kernel code then just needs to merge adjacent relocs with the same
target to each PLT.  This won't be optimal for all cases, but it's
simple.


Alternatively, we could do the reloc->PLT grouping work at modpost time,
spitting out the result some extra ELF section which the module loader
can consume.  This may be a better approach if we want to be able to
allocate a single PLT to relocs in multiple sections.

Either approach keeps the module loader logic simple and O(n), and
should be easily reused across arches (if someone needs that).

[...]

Cheers
---Dave

> 
> I quickly tested this with the module above:
> Before:
> 
> # insmod cfg80211.ko
> [   45.981587] Allocating 238 PLT entries for 3632 external
> jumps/calls (out of 3632 relocations)
> [   45.981967] Allocating 4 PLT entries for 10 external jumps/calls
> (out of 10 relocations)
> [   45.982386] Allocating 19 PLT entries for 37 external jumps/calls
> (out of 37 relocations)
> [   45.982895] Allocating 7 PLT entries for 11 external jumps/calls
> (out of 11 relocations)
> [   45.983409] Allocating 4 PLT entries for 16 external jumps/calls
> (out of 16 relocations)
> 
> # insmod mac80211.ko
> [   52.028863] Allocating 545 PLT entries for 5762 external
> jumps/calls (out of 5762 relocations)
> [   52.029207] Allocating 8 PLT entries for 16 external jumps/calls
> (out of 16 relocations)
> [   52.029431] Allocating 4 PLT entries for 4 external jumps/calls
> (out of 4 relocations)
> [   52.029676] Allocating 39 PLT entries for 107 external jumps/calls
> (out of 107 relocations)
> 
> (i.e., without the optimization, all jumps and calls are identified as
> potentially external)
> 
> After:
> 
> # insmod cfg80211.ko
> [   47.685451] Allocating 111 PLT entries for 2097 external
> jumps/calls (out of 3632 relocations)
> [   47.686016] Allocating 3 PLT entries for 5 external jumps/calls
> (out of 10 relocations)
> [   47.686440] Allocating 11 PLT entries for 11 external jumps/calls
> (out of 37 relocations)
> [   47.686837] Allocating 4 PLT entries for 4 external jumps/calls
> (out of 11 relocations)
> [   47.687098] Allocating 3 PLT entries for 13 external jumps/calls
> (out of 16 relocations)
> 
> # insmod mac80211.ko
> [   50.410922] Allocating 231 PLT entries for 2857 external
> jumps/calls (out of 5762 relocations)
> [   50.411277] Allocating 2 PLT entries for 2 external jumps/calls
> (out of 16 relocations)
> [   50.411562] Allocating 1 PLT entries for 1 external jumps/calls
> (out of 4 relocations)
> [   50.411918] Allocating 20 PLT entries for 43 external jumps/calls
> (out of 107 relocations)
> 
> Another thing to note is that the .init section hardly deserves its
> own PLT. In the example above the 3rd resp 2nd line refers to
> .init.text, and there is really no point in putting 11 resp 2 PLT
> entries (or 88 resp 16 bytes) into a separate section just so that we
> can release it again after init. So the next optimization is to simply
> merge them.
> 
> I will send out the patches separately, please tell me what you think.
> 
> Thanks,
> Ard.
> 
> 
> > Reported-by: Chanho Min <chanho.min@lge.com>
> > Suggested-by: Youngho Shin <youngho.shin@lge.com>
> > Signed-off-by: Jongsung Kim <neidhard.kim@lge.com>
> > Reviewed-by: Namhyung Kim <namhyung.kim@lge.com>
> > ---
> >  arch/arm/kernel/module-plts.c | 111 +++++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 110 insertions(+), 1 deletion(-)
> >
> > diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
> > index 0c7efc3..dae7459 100644
> > --- a/arch/arm/kernel/module-plts.c
> > +++ b/arch/arm/kernel/module-plts.c
> > @@ -9,6 +9,8 @@
> >  #include <linux/elf.h>
> >  #include <linux/kernel.h>
> >  #include <linux/module.h>
> > +#include <linux/sched.h>
> > +#include <linux/vmalloc.h>
> >
> >  #include <asm/cache.h>
> >  #include <asm/opcodes.h>
> > @@ -25,11 +27,26 @@
> >                                                     (PLT_ENT_STRIDE - 8))
> >  #endif
> >
> > +#define PLT_HASH_SHIFT         10
> > +#define PLT_HASH_SIZE          (1 << PLT_HASH_SHIFT)
> > +#define PLT_HASH_MASK          (PLT_HASH_SIZE - 1)
> > +
> >  struct plt_entries {
> >         u32     ldr[PLT_ENT_COUNT];
> >         u32     lit[PLT_ENT_COUNT];
> >  };
> >
> > +struct plt_hash_entry {
> > +       struct plt_hash_entry *next;
> > +       Elf32_Rel const *plt;
> > +};
> > +
> > +struct plt_hash_table {
> > +       struct plt_hash_entry *table[PLT_HASH_SIZE];
> > +       size_t used;
> > +       struct plt_hash_entry entry[0];
> > +};
> > +
> >  static bool in_init(const struct module *mod, u32 addr)
> >  {
> >         return addr - (u32)mod->init_layout.base < mod->init_layout.size;
> > @@ -100,7 +117,7 @@ static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num,
> >  }
> >
> >  /* Count how many PLT entries we may need */
> > -static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
> > +static unsigned int _count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
> >  {
> >         unsigned int ret = 0;
> >         int i;
> > @@ -129,6 +146,98 @@ static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
> >         return ret;
> >  }
> >
> > +static unsigned int hash_plt(Elf32_Rel const *plt, Elf32_Addr base, u32 mask)
> > +{
> > +       u32 const *loc = (u32 *)(base + plt->r_offset);
> > +       u32 hash = (plt->r_info >> 8) ^ (*loc & mask);
> > +       return hash & PLT_HASH_MASK;
> > +}
> > +
> > +static bool
> > +same_plts(Elf32_Rel const *a, Elf32_Rel const *b, Elf32_Addr base, u32 mask)
> > +{
> > +       u32 const *loc1;
> > +       u32 const *loc2;
> > +
> > +       if (a->r_info != b->r_info)
> > +               return false;
> > +
> > +       loc1 = (u32 *)(base + a->r_offset);
> > +       loc2 = (u32 *)(base + b->r_offset);
> > +
> > +       return ((*loc1 ^ *loc2) & mask) == 0;
> > +}
> > +
> > +static int hash_insert_plt(struct plt_hash_table *table, Elf32_Rel const *plt,
> > +                          Elf32_Addr base, u32 mask)
> > +{
> > +       unsigned int hash = hash_plt(plt, base, mask);
> > +       struct plt_hash_entry *entry;
> > +
> > +       for (entry = table->table[hash]; entry; entry = entry->next)
> > +               if (same_plts(entry->plt, plt, base, mask))
> > +                       return 0;
> > +
> > +       entry = &table->entry[table->used++];
> > +       entry->next = table->table[hash];
> > +       entry->plt = plt;
> > +       table->table[hash] = entry;
> > +
> > +       return 1;
> > +}
> > +
> > +static size_t count_plts(Elf32_Addr base, Elf32_Rel const *rel, int num)
> > +{
> > +       struct plt_hash_table *table;
> > +       size_t plts;
> > +       u32 mask;
> > +       int i;
> > +
> > +       /* count PLTs first to optimize memory usage */
> > +       for (plts = i = 0; i < num; i++) {
> > +               switch (ELF32_R_TYPE(rel[i].r_info)) {
> > +               case R_ARM_CALL:
> > +               case R_ARM_PC24:
> > +               case R_ARM_JUMP24:
> > +#ifdef CONFIG_THUMB2_KERNEL
> > +               case R_ARM_THM_CALL:
> > +               case R_ARM_THM_JUMP24:
> > +#endif
> > +                       plts++;
> > +                       break;
> > +               }
> > +       }
> > +
> > +       table = vzalloc(sizeof(struct plt_hash_table) +
> > +                       sizeof(struct plt_hash_entry) * plts);
> > +       if (!table) {
> > +               /* fall-back to O(n^2) counting on memory shortage */
> > +               return _count_plts(base, rel, num);
> > +       }
> > +
> > +       for (plts = i = 0; i < num; i++) {
> > +               switch (ELF32_R_TYPE(rel[i].r_info)) {
> > +               case R_ARM_CALL:
> > +               case R_ARM_PC24:
> > +               case R_ARM_JUMP24:
> > +                       mask = __opcode_to_mem_arm(0x00ffffff);
> > +                       plts += hash_insert_plt(table, &rel[i], base, mask);
> > +                       break;
> > +#ifdef CONFIG_THUMB2_KERNEL
> > +               case R_ARM_THM_CALL:
> > +               case R_ARM_THM_JUMP24:
> > +                       mask = __opcode_to_mem_thumb32(0x07ff2fff);
> > +                       plts += hash_insert_plt(table, &rel[i], base, mask);
> > +                       break;
> > +#endif
> > +               }
> > +       }
> > +
> > +       vfree(table);
> > +
> > +       return plts;
> > +}
> > +
> >  int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
> >                               char *secstrings, struct module *mod)
> >  {
> > --
> > 2.7.4
> >
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 0/2] ARM: kernel: module PLT optimizations
  2016-08-17 11:06 ` [PATCH 0/2] ARM: kernel: module PLT optimizations Dave Martin
@ 2016-08-17 11:10   ` Ard Biesheuvel
  0 siblings, 0 replies; 5+ messages in thread
From: Ard Biesheuvel @ 2016-08-17 11:10 UTC (permalink / raw)
  To: linux-arm-kernel

On 17 August 2016 at 13:06, Dave Martin <Dave.Martin@arm.com> wrote:
> On Tue, Aug 16, 2016 at 05:51:44PM +0200, Ard Biesheuvel wrote:
>> As reported by Jongsung, the O(n^2) search in the PLT allocation code may
>> disproportionately affect module load time for modules with a larger number
>> of relocations.
>>
>> Since the existing routines rather naively take branch instructions into
>> account that are internal to the module, we can improve the situation
>> significantly by checking the symbol section index first, and disregarding
>> symbols that are defined in the same module.
>>
>> Patch #1 merge the core and init PLTs, since the latter is virtually empty
>> anyway.
>>
>> Patch #2 implements the optimization to only take SHN_UNDEF symbols into
>> account.
>
> Could we not sort the relocs on target address at the modpost stage?
>
> The order of reloc entries in ELF is not significant, so we could sort
> them offline before the module loader sees them.
>

That is exactly what I implemented already for arm64, and I am
currently implementing the same for ARM.

Also, call/jump relocations against SHN_UNDEF symbols typically have a
zero addend, so there is no point in trying very hard to reduce the
worst-case upper bound by looking for duplicate relocations that only
differ in the added.

I'll have a patch out soon.

Thanks,
Ard.


> The kernel code then just needs to merge adjacent relocs with the same
> target to each PLT.  This won't be optimal for all cases, but it's
> simple.
>
>
> Alternatively, we could do the reloc->PLT grouping work at modpost time,
> spitting out the result some extra ELF section which the module loader
> can consume.  This may be a better approach if we want to be able to
> allocate a single PLT to relocs in multiple sections.
>
> Either approach keeps the module loader logic simple and O(n), and
> should be easily reused across arches (if someone needs that).
>


>>
>> I quickly tested this with the module above:
>> Before:
>>
>> # insmod cfg80211.ko
>> [   45.981587] Allocating 238 PLT entries for 3632 external
>> jumps/calls (out of 3632 relocations)
>> [   45.981967] Allocating 4 PLT entries for 10 external jumps/calls
>> (out of 10 relocations)
>> [   45.982386] Allocating 19 PLT entries for 37 external jumps/calls
>> (out of 37 relocations)
>> [   45.982895] Allocating 7 PLT entries for 11 external jumps/calls
>> (out of 11 relocations)
>> [   45.983409] Allocating 4 PLT entries for 16 external jumps/calls
>> (out of 16 relocations)
>>
>> # insmod mac80211.ko
>> [   52.028863] Allocating 545 PLT entries for 5762 external
>> jumps/calls (out of 5762 relocations)
>> [   52.029207] Allocating 8 PLT entries for 16 external jumps/calls
>> (out of 16 relocations)
>> [   52.029431] Allocating 4 PLT entries for 4 external jumps/calls
>> (out of 4 relocations)
>> [   52.029676] Allocating 39 PLT entries for 107 external jumps/calls
>> (out of 107 relocations)
>>
>> (i.e., without the optimization, all jumps and calls are identified as
>> potentially external)
>>
>> After:
>>
>> # insmod cfg80211.ko
>> [   47.685451] Allocating 111 PLT entries for 2097 external
>> jumps/calls (out of 3632 relocations)
>> [   47.686016] Allocating 3 PLT entries for 5 external jumps/calls
>> (out of 10 relocations)
>> [   47.686440] Allocating 11 PLT entries for 11 external jumps/calls
>> (out of 37 relocations)
>> [   47.686837] Allocating 4 PLT entries for 4 external jumps/calls
>> (out of 11 relocations)
>> [   47.687098] Allocating 3 PLT entries for 13 external jumps/calls
>> (out of 16 relocations)
>>
>> # insmod mac80211.ko
>> [   50.410922] Allocating 231 PLT entries for 2857 external
>> jumps/calls (out of 5762 relocations)
>> [   50.411277] Allocating 2 PLT entries for 2 external jumps/calls
>> (out of 16 relocations)
>> [   50.411562] Allocating 1 PLT entries for 1 external jumps/calls
>> (out of 4 relocations)
>> [   50.411918] Allocating 20 PLT entries for 43 external jumps/calls
>> (out of 107 relocations)
>>
>> Another thing to note is that the .init section hardly deserves its
>> own PLT. In the example above the 3rd resp 2nd line refers to
>> .init.text, and there is really no point in putting 11 resp 2 PLT
>> entries (or 88 resp 16 bytes) into a separate section just so that we
>> can release it again after init. So the next optimization is to simply
>> merge them.
>>
>> I will send out the patches separately, please tell me what you think.
>>
>> Thanks,
>> Ard.
>>
>>
>> > Reported-by: Chanho Min <chanho.min@lge.com>
>> > Suggested-by: Youngho Shin <youngho.shin@lge.com>
>> > Signed-off-by: Jongsung Kim <neidhard.kim@lge.com>
>> > Reviewed-by: Namhyung Kim <namhyung.kim@lge.com>
>> > ---
>> >  arch/arm/kernel/module-plts.c | 111 +++++++++++++++++++++++++++++++++++++++++-
>> >  1 file changed, 110 insertions(+), 1 deletion(-)
>> >
>> > diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
>> > index 0c7efc3..dae7459 100644
>> > --- a/arch/arm/kernel/module-plts.c
>> > +++ b/arch/arm/kernel/module-plts.c
>> > @@ -9,6 +9,8 @@
>> >  #include <linux/elf.h>
>> >  #include <linux/kernel.h>
>> >  #include <linux/module.h>
>> > +#include <linux/sched.h>
>> > +#include <linux/vmalloc.h>
>> >
>> >  #include <asm/cache.h>
>> >  #include <asm/opcodes.h>
>> > @@ -25,11 +27,26 @@
>> >                                                     (PLT_ENT_STRIDE - 8))
>> >  #endif
>> >
>> > +#define PLT_HASH_SHIFT         10
>> > +#define PLT_HASH_SIZE          (1 << PLT_HASH_SHIFT)
>> > +#define PLT_HASH_MASK          (PLT_HASH_SIZE - 1)
>> > +
>> >  struct plt_entries {
>> >         u32     ldr[PLT_ENT_COUNT];
>> >         u32     lit[PLT_ENT_COUNT];
>> >  };
>> >
>> > +struct plt_hash_entry {
>> > +       struct plt_hash_entry *next;
>> > +       Elf32_Rel const *plt;
>> > +};
>> > +
>> > +struct plt_hash_table {
>> > +       struct plt_hash_entry *table[PLT_HASH_SIZE];
>> > +       size_t used;
>> > +       struct plt_hash_entry entry[0];
>> > +};
>> > +
>> >  static bool in_init(const struct module *mod, u32 addr)
>> >  {
>> >         return addr - (u32)mod->init_layout.base < mod->init_layout.size;
>> > @@ -100,7 +117,7 @@ static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num,
>> >  }
>> >
>> >  /* Count how many PLT entries we may need */
>> > -static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
>> > +static unsigned int _count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
>> >  {
>> >         unsigned int ret = 0;
>> >         int i;
>> > @@ -129,6 +146,98 @@ static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
>> >         return ret;
>> >  }
>> >
>> > +static unsigned int hash_plt(Elf32_Rel const *plt, Elf32_Addr base, u32 mask)
>> > +{
>> > +       u32 const *loc = (u32 *)(base + plt->r_offset);
>> > +       u32 hash = (plt->r_info >> 8) ^ (*loc & mask);
>> > +       return hash & PLT_HASH_MASK;
>> > +}
>> > +
>> > +static bool
>> > +same_plts(Elf32_Rel const *a, Elf32_Rel const *b, Elf32_Addr base, u32 mask)
>> > +{
>> > +       u32 const *loc1;
>> > +       u32 const *loc2;
>> > +
>> > +       if (a->r_info != b->r_info)
>> > +               return false;
>> > +
>> > +       loc1 = (u32 *)(base + a->r_offset);
>> > +       loc2 = (u32 *)(base + b->r_offset);
>> > +
>> > +       return ((*loc1 ^ *loc2) & mask) == 0;
>> > +}
>> > +
>> > +static int hash_insert_plt(struct plt_hash_table *table, Elf32_Rel const *plt,
>> > +                          Elf32_Addr base, u32 mask)
>> > +{
>> > +       unsigned int hash = hash_plt(plt, base, mask);
>> > +       struct plt_hash_entry *entry;
>> > +
>> > +       for (entry = table->table[hash]; entry; entry = entry->next)
>> > +               if (same_plts(entry->plt, plt, base, mask))
>> > +                       return 0;
>> > +
>> > +       entry = &table->entry[table->used++];
>> > +       entry->next = table->table[hash];
>> > +       entry->plt = plt;
>> > +       table->table[hash] = entry;
>> > +
>> > +       return 1;
>> > +}
>> > +
>> > +static size_t count_plts(Elf32_Addr base, Elf32_Rel const *rel, int num)
>> > +{
>> > +       struct plt_hash_table *table;
>> > +       size_t plts;
>> > +       u32 mask;
>> > +       int i;
>> > +
>> > +       /* count PLTs first to optimize memory usage */
>> > +       for (plts = i = 0; i < num; i++) {
>> > +               switch (ELF32_R_TYPE(rel[i].r_info)) {
>> > +               case R_ARM_CALL:
>> > +               case R_ARM_PC24:
>> > +               case R_ARM_JUMP24:
>> > +#ifdef CONFIG_THUMB2_KERNEL
>> > +               case R_ARM_THM_CALL:
>> > +               case R_ARM_THM_JUMP24:
>> > +#endif
>> > +                       plts++;
>> > +                       break;
>> > +               }
>> > +       }
>> > +
>> > +       table = vzalloc(sizeof(struct plt_hash_table) +
>> > +                       sizeof(struct plt_hash_entry) * plts);
>> > +       if (!table) {
>> > +               /* fall-back to O(n^2) counting on memory shortage */
>> > +               return _count_plts(base, rel, num);
>> > +       }
>> > +
>> > +       for (plts = i = 0; i < num; i++) {
>> > +               switch (ELF32_R_TYPE(rel[i].r_info)) {
>> > +               case R_ARM_CALL:
>> > +               case R_ARM_PC24:
>> > +               case R_ARM_JUMP24:
>> > +                       mask = __opcode_to_mem_arm(0x00ffffff);
>> > +                       plts += hash_insert_plt(table, &rel[i], base, mask);
>> > +                       break;
>> > +#ifdef CONFIG_THUMB2_KERNEL
>> > +               case R_ARM_THM_CALL:
>> > +               case R_ARM_THM_JUMP24:
>> > +                       mask = __opcode_to_mem_thumb32(0x07ff2fff);
>> > +                       plts += hash_insert_plt(table, &rel[i], base, mask);
>> > +                       break;
>> > +#endif
>> > +               }
>> > +       }
>> > +
>> > +       vfree(table);
>> > +
>> > +       return plts;
>> > +}
>> > +
>> >  int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
>> >                               char *secstrings, struct module *mod)
>> >  {
>> > --
>> > 2.7.4
>> >
>>

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2016-08-17 11:10 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-08-16 15:51 [PATCH 0/2] ARM: kernel: module PLT optimizations Ard Biesheuvel
2016-08-16 15:51 ` [PATCH 1/2] ARM: kernel: merge core and init PLTs Ard Biesheuvel
2016-08-16 15:51 ` [PATCH 2/2] ARM: kernel: allocate PLT entries only for external symbols Ard Biesheuvel
2016-08-17 11:06 ` [PATCH 0/2] ARM: kernel: module PLT optimizations Dave Martin
2016-08-17 11:10   ` Ard Biesheuvel

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).