aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/module_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/module_32.c')
-rw-r--r--arch/powerpc/kernel/module_32.c200
1 files changed, 84 insertions, 116 deletions
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index e2c3c6a85f3..6cff040bf45 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -22,9 +22,11 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/ftrace.h>
#include <linux/cache.h>
-
-#include "setup.h"
+#include <linux/bug.h>
+#include <linux/sort.h>
+#include <asm/setup.h>
#if 0
#define DEBUGP printk
@@ -32,43 +34,67 @@
#define DEBUGP(fmt , ...)
#endif
-LIST_HEAD(module_bug_list);
-
-void *module_alloc(unsigned long size)
+/* Count how many different relocations (different symbol, different
+ addend) */
+static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
{
- if (size == 0)
- return NULL;
- return vmalloc(size);
+ unsigned int i, r_info, r_addend, _count_relocs;
+
+ _count_relocs = 0;
+ r_info = 0;
+ r_addend = 0;
+ for (i = 0; i < num; i++)
+ /* Only count 24-bit relocs, others don't need stubs */
+ if (ELF32_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
+ (r_info != ELF32_R_SYM(rela[i].r_info) ||
+ r_addend != rela[i].r_addend)) {
+ _count_relocs++;
+ r_info = ELF32_R_SYM(rela[i].r_info);
+ r_addend = rela[i].r_addend;
+ }
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+ _count_relocs++; /* add one for ftrace_caller */
+#endif
+ return _count_relocs;
}
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+static int relacmp(const void *_x, const void *_y)
{
- vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
+ const Elf32_Rela *x, *y;
+
+ y = (Elf32_Rela *)_x;
+ x = (Elf32_Rela *)_y;
+
+ /* Compare the entire r_info (as opposed to ELF32_R_SYM(r_info) only) to
+ * make the comparison cheaper/faster. It won't affect the sorting or
+ * the counting algorithms' performance
+ */
+ if (x->r_info < y->r_info)
+ return -1;
+ else if (x->r_info > y->r_info)
+ return 1;
+ else if (x->r_addend < y->r_addend)
+ return -1;
+ else if (x->r_addend > y->r_addend)
+ return 1;
+ else
+ return 0;
}
-/* Count how many different relocations (different symbol, different
- addend) */
-static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
+static void relaswap(void *_x, void *_y, int size)
{
- unsigned int i, j, ret = 0;
-
- /* Sure, this is order(n^2), but it's usually short, and not
- time critical */
- for (i = 0; i < num; i++) {
- for (j = 0; j < i; j++) {
- /* If this addend appeared before, it's
- already been counted */
- if (ELF32_R_SYM(rela[i].r_info)
- == ELF32_R_SYM(rela[j].r_info)
- && rela[i].r_addend == rela[j].r_addend)
- break;
- }
- if (j == i) ret++;
+ uint32_t *x, *y, tmp;
+ int i;
+
+ y = (uint32_t *)_x;
+ x = (uint32_t *)_y;
+
+ for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) {
+ tmp = x[i];
+ x[i] = y[i];
+ y[i] = tmp;
}
- return ret;
}
/* Get the potential trampolines size required of the init and
@@ -99,6 +125,16 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
DEBUGP("Ptr: %p. Number: %u\n",
(void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size / sizeof(Elf32_Rela));
+
+ /* Sort the relocation information based on a symbol and
+ * addend key. This is a stable O(n*log n) complexity
+ * alogrithm but it will reduce the complexity of
+ * count_relocs() to linear complexity O(n)
+ */
+ sort((void *)hdr + sechdrs[i].sh_offset,
+ sechdrs[i].sh_size / sizeof(Elf32_Rela),
+ sizeof(Elf32_Rela), relacmp, relaswap);
+
ret += count_relocs((void *)hdr
+ sechdrs[i].sh_offset,
sechdrs[i].sh_size
@@ -137,21 +173,10 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
return 0;
}
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *module)
-{
- printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n",
- module->name);
- return -ENOEXEC;
-}
-
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
- if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
- && entry->jump[1] == 0x396b0000 + (val & 0xffff))
+ if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
+ && entry->jump[1] == 0x398c0000 + (val & 0xffff))
return 1;
return 0;
}
@@ -178,10 +203,9 @@ static uint32_t do_plt_call(void *location,
entry++;
}
- /* Stolen from Paul Mackerras as well... */
- entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */
- entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/
- entry->jump[2] = 0x7d6903a6; /* mtctr r11 */
+ entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
+ entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/
+ entry->jump[2] = 0x7d8903a6; /* mtctr r12 */
entry->jump[3] = 0x4e800420; /* bctr */
DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
@@ -223,7 +247,12 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
/* Low half of the symbol */
*(uint16_t *)location = value;
break;
-
+
+ case R_PPC_ADDR16_HI:
+ /* Higher half of the symbol */
+ *(uint16_t *)location = (value >> 16);
+ break;
+
case R_PPC_ADDR16_HA:
/* Sign-adjusted lower 16 bits: PPC ELF ABI says:
(((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF.
@@ -268,72 +297,11 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return -ENOEXEC;
}
}
+#ifdef CONFIG_DYNAMIC_FTRACE
+ module->arch.tramp =
+ do_plt_call(module->module_core,
+ (unsigned long)ftrace_caller,
+ sechdrs, module);
+#endif
return 0;
}
-
-static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- const char *name)
-{
- char *secstrings;
- unsigned int i;
-
- secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- for (i = 1; i < hdr->e_shnum; i++)
- if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
- return &sechdrs[i];
- return NULL;
-}
-
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- const Elf_Shdr *sect;
-
- me->arch.bug_table = NULL;
- me->arch.num_bugs = 0;
-
- /* Find the __bug_table section, if present */
- sect = find_section(hdr, sechdrs, "__bug_table");
- if (sect != NULL) {
- me->arch.bug_table = (void *) sect->sh_addr;
- me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
- }
-
- /*
- * Strictly speaking this should have a spinlock to protect against
- * traversals, but since we only traverse on BUG()s, a spinlock
- * could potentially lead to deadlock and thus be counter-productive.
- */
- list_add(&me->arch.bug_list, &module_bug_list);
-
- /* Apply feature fixups */
- sect = find_section(hdr, sechdrs, "__ftr_fixup");
- if (sect != NULL)
- do_feature_fixups(cur_cpu_spec->cpu_features,
- (void *)sect->sh_addr,
- (void *)sect->sh_addr + sect->sh_size);
-
- return 0;
-}
-
-void module_arch_cleanup(struct module *mod)
-{
- list_del(&mod->arch.bug_list);
-}
-
-struct bug_entry *module_find_bug(unsigned long bugaddr)
-{
- struct mod_arch_specific *mod;
- unsigned int i;
- struct bug_entry *bug;
-
- list_for_each_entry(mod, &module_bug_list, bug_list) {
- bug = mod->bug_table;
- for (i = 0; i < mod->num_bugs; ++i, ++bug)
- if (bugaddr == bug->bug_addr)
- return bug;
- }
- return NULL;
-}