diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/module.c | 43 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/sync-r4k.c | 26 |
3 files changed, 47 insertions, 26 deletions
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index a5066b1c3de..4f8c3cba8c0 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -39,8 +39,6 @@ struct mips_hi16 { Elf_Addr value; }; -static struct mips_hi16 *mips_hi16_list; - static LIST_HEAD(dbe_list); static DEFINE_SPINLOCK(dbe_lock); @@ -128,8 +126,8 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v) n->addr = (Elf_Addr *)location; n->value = v; - n->next = mips_hi16_list; - mips_hi16_list = n; + n->next = me->arch.r_mips_hi16_list; + me->arch.r_mips_hi16_list = n; return 0; } @@ -142,18 +140,28 @@ static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v) return 0; } +static void free_relocation_chain(struct mips_hi16 *l) +{ + struct mips_hi16 *next; + + while (l) { + next = l->next; + kfree(l); + l = next; + } +} + static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) { unsigned long insnlo = *location; + struct mips_hi16 *l; Elf_Addr val, vallo; /* Sign extend the addend we extract from the lo insn. */ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; - if (mips_hi16_list != NULL) { - struct mips_hi16 *l; - - l = mips_hi16_list; + if (me->arch.r_mips_hi16_list != NULL) { + l = me->arch.r_mips_hi16_list; while (l != NULL) { struct mips_hi16 *next; unsigned long insn; @@ -188,7 +196,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) l = next; } - mips_hi16_list = NULL; + me->arch.r_mips_hi16_list = NULL; } /* @@ -201,6 +209,9 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) return 0; out_danger: + free_relocation_chain(l); + me->arch.r_mips_hi16_list = NULL; + pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); return -ENOEXEC; @@ -273,6 +284,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, pr_debug("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); + me->arch.r_mips_hi16_list = NULL; for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr @@ -296,6 +308,19 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, return res; } + /* + * Normally the hi16 list should be deallocated at this point. A + * malformed binary however could contain a series of R_MIPS_HI16 + * relocations not followed by a R_MIPS_LO16 relocation. In that + * case, free up the list and return an error. + */ + if (me->arch.r_mips_hi16_list) { + free_relocation_chain(me->arch.r_mips_hi16_list); + me->arch.r_mips_hi16_list = NULL; + + return -ENOEXEC; + } + return 0; } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 31637d8c873..9005bf9fb85 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -130,7 +130,7 @@ asmlinkage __cpuinit void start_secondary(void) cpu_set(cpu, cpu_callin_map); - synchronise_count_slave(); + synchronise_count_slave(cpu); /* * irq will be enabled in ->smp_finish(), enabling it too early @@ -173,7 +173,6 @@ void smp_send_stop(void) void __init smp_cpus_done(unsigned int max_cpus) { mp_ops->cpus_done(); - synchronise_count_master(); } /* called from main before smp_init() */ @@ -206,6 +205,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); + synchronise_count_master(cpu); return 0; } diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index 842d55e411f..7f1eca3858d 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -28,12 +28,11 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); #define COUNTON 100 #define NR_LOOPS 5 -void __cpuinit synchronise_count_master(void) +void __cpuinit synchronise_count_master(int cpu) { int i; unsigned long flags; unsigned int initcount; - int nslaves; #ifdef CONFIG_MIPS_MT_SMTC /* @@ -43,8 +42,7 @@ void __cpuinit synchronise_count_master(void) return; #endif - printk(KERN_INFO "Synchronize counters across %u CPUs: ", - num_online_cpus()); + printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); local_irq_save(flags); @@ -52,7 +50,7 @@ void __cpuinit synchronise_count_master(void) * Notify the slaves that it's time to start */ atomic_set(&count_reference, read_c0_count()); - atomic_set(&count_start_flag, 1); + atomic_set(&count_start_flag, cpu); smp_wmb(); /* Count will be initialised to current timer for all CPU's */ @@ -69,10 +67,9 @@ void __cpuinit synchronise_count_master(void) * two CPUs. */ - nslaves = num_online_cpus()-1; for (i = 0; i < NR_LOOPS; i++) { - /* slaves loop on '!= ncpus' */ - while (atomic_read(&count_count_start) != nslaves) + /* slaves loop on '!= 2' */ + while (atomic_read(&count_count_start) != 1) mb(); atomic_set(&count_count_stop, 0); smp_wmb(); @@ -89,7 +86,7 @@ void __cpuinit synchronise_count_master(void) /* * Wait for all slaves to leave the synchronization point: */ - while (atomic_read(&count_count_stop) != nslaves) + while (atomic_read(&count_count_stop) != 1) mb(); atomic_set(&count_count_start, 0); smp_wmb(); @@ -97,6 +94,7 @@ void __cpuinit synchronise_count_master(void) } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); + atomic_set(&count_start_flag, 0); local_irq_restore(flags); @@ -108,11 +106,10 @@ void __cpuinit synchronise_count_master(void) printk("done.\n"); } -void __cpuinit synchronise_count_slave(void) +void __cpuinit synchronise_count_slave(int cpu) { int i; unsigned int initcount; - int ncpus; #ifdef CONFIG_MIPS_MT_SMTC /* @@ -127,16 +124,15 @@ void __cpuinit synchronise_count_slave(void) * so we first wait for the master to say everyone is ready */ - while (!atomic_read(&count_start_flag)) + while (atomic_read(&count_start_flag) != cpu) mb(); /* Count will be initialised to next expire for all CPU's */ initcount = atomic_read(&count_reference); - ncpus = num_online_cpus(); for (i = 0; i < NR_LOOPS; i++) { atomic_inc(&count_count_start); - while (atomic_read(&count_count_start) != ncpus) + while (atomic_read(&count_count_start) != 2) mb(); /* @@ -146,7 +142,7 @@ void __cpuinit synchronise_count_slave(void) write_c0_count(initcount); atomic_inc(&count_count_stop); - while (atomic_read(&count_count_stop) != ncpus) + while (atomic_read(&count_count_stop) != 2) mb(); } /* Arrange for an interrupt in a short while */ |