diff options
Diffstat (limited to 'arch/parisc')
27 files changed, 375 insertions, 294 deletions
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 5ddad7bd60a..0d428278356 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -77,7 +77,7 @@ libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ -PALO := $(shell if which palo; then : ; \ +PALO := $(shell if (which palo 2>&1); then : ; \ elif [ -x /sbin/palo ]; then echo /sbin/palo; \ fi) diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild index f88b252e419..2121d99f836 100644 --- a/arch/parisc/include/asm/Kbuild +++ b/arch/parisc/include/asm/Kbuild @@ -1,3 +1,4 @@ include include/asm-generic/Kbuild.asm unifdef-y += pdc.h +unifdef-y += swab.h diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 57fcc4a5ebb..edbfe25c5fc 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -155,14 +155,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #endif -/* Note that we need not lock read accesses - aligned word writes/reads - * are atomic, so a reader never sees unconsistent values. - * - * Cache-line alignment would conflict with, for example, linux/module.h +/* + * Note that we need not lock read accesses - aligned word writes/reads + * are atomic, so a reader never sees inconsistent values. */ -typedef struct { volatile int counter; } atomic_t; - /* It's possible to reduce all atomic operations to either * __atomic_add_return, atomic_set and atomic_read (the latter * is there only for consistency). @@ -260,8 +257,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -typedef struct { volatile s64 counter; } atomic64_t; - #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) static __inline__ int diff --git a/arch/parisc/include/asm/byteorder.h b/arch/parisc/include/asm/byteorder.h index db148313de5..da66029c4cb 100644 --- a/arch/parisc/include/asm/byteorder.h +++ b/arch/parisc/include/asm/byteorder.h @@ -1,82 +1,7 @@ #ifndef _PARISC_BYTEORDER_H #define _PARISC_BYTEORDER_H -#include <asm/types.h> -#include <linux/compiler.h> - -#ifdef __GNUC__ - -static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) -{ - __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */ - "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */ - : "=r" (x) - : "0" (x)); - return x; -} - -static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x) -{ - __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */ - "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */ - "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */ - : "=r" (x) - : "0" (x)); - return x; -} - -static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) -{ - unsigned int temp; - __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */ - "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */ - "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */ - : "=r" (x), "=&r" (temp) - : "0" (x)); - return x; -} - - -#if BITS_PER_LONG > 32 -/* -** From "PA-RISC 2.0 Architecture", HP Professional Books. -** See Appendix I page 8 , "Endian Byte Swapping". -** -** Pretty cool algorithm: (* == zero'd bits) -** PERMH 01234567 -> 67452301 into %0 -** HSHL 67452301 -> 7*5*3*1* into %1 -** HSHR 67452301 -> *6*4*2*0 into %0 -** OR %0 | %1 -> 76543210 into %0 (all done!) -*/ -static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) { - __u64 temp; - __asm__("permh,3210 %0, %0\n\t" - "hshl %0, 8, %1\n\t" - "hshr,u %0, 8, %0\n\t" - "or %1, %0, %0" - : "=r" (x), "=&r" (temp) - : "0" (x)); - return x; -} -#define __arch__swab64(x) ___arch__swab64(x) -#define __BYTEORDER_HAS_U64__ -#elif !defined(__STRICT_ANSI__) -static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) -{ - __u32 t1 = ___arch__swab32((__u32) x); - __u32 t2 = ___arch__swab32((__u32) (x >> 32)); - return (((__u64) t1 << 32) | t2); -} -#define __arch__swab64(x) ___arch__swab64(x) -#define __BYTEORDER_HAS_U64__ -#endif - -#define __arch__swab16(x) ___arch__swab16(x) -#define __arch__swab24(x) ___arch__swab24(x) -#define __arch__swab32(x) ___arch__swab32(x) - -#endif /* __GNUC__ */ - +#include <asm/swab.h> #include <linux/byteorder/big_endian.h> #endif /* _PARISC_BYTEORDER_H */ diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h index e9639ccc3fc..c84b2fcb18a 100644 --- a/arch/parisc/include/asm/checksum.h +++ b/arch/parisc/include/asm/checksum.h @@ -182,7 +182,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, #endif : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) - : "r19", "r20", "r21", "r22"); + : "r19", "r20", "r21", "r22", "memory"); return csum_fold(sum); } diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index 55ddb184210..d3031d1f9d0 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -4,12 +4,6 @@ #include <linux/types.h> #include <asm/pgtable.h> -extern unsigned long parisc_vmerge_boundary; -extern unsigned long parisc_vmerge_max_size; - -#define BIO_VMERGE_BOUNDARY parisc_vmerge_boundary -#define BIO_VMERGE_MAX_SIZE parisc_vmerge_max_size - #define virt_to_phys(a) ((unsigned long)__pa(a)) #define phys_to_virt(a) __va(a) #define virt_to_bus virt_to_phys @@ -182,9 +176,9 @@ static inline void __raw_writeq(unsigned long long b, volatile void __iomem *add /* readb can never be const, so use __fswab instead of le*_to_cpu */ #define readb(addr) __raw_readb(addr) -#define readw(addr) __fswab16(__raw_readw(addr)) -#define readl(addr) __fswab32(__raw_readl(addr)) -#define readq(addr) __fswab64(__raw_readq(addr)) +#define readw(addr) le16_to_cpu(__raw_readw(addr)) +#define readl(addr) le32_to_cpu(__raw_readl(addr)) +#define readq(addr) le64_to_cpu(__raw_readq(addr)) #define writeb(b, addr) __raw_writeb(b, addr) #define writew(b, addr) __raw_writew(cpu_to_le16(b), addr) #define writel(b, addr) __raw_writel(cpu_to_le32(b), addr) diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 85856c74ad1..354b2aca990 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -34,16 +34,21 @@ destroy_context(struct mm_struct *mm) mm->context = 0; } -static inline void load_context(mm_context_t context) +static inline unsigned long __space_to_prot(mm_context_t context) { - mtsp(context, 3); #if SPACEID_SHIFT == 0 - mtctl(context << 1,8); + return context << 1; #else - mtctl(context >> (SPACEID_SHIFT - 1),8); + return context >> (SPACEID_SHIFT - 1); #endif } +static inline void load_context(mm_context_t context) +{ + mtsp(context, 3); + mtctl(__space_to_prot(context), 8); +} + static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { diff --git a/arch/parisc/include/asm/module.h b/arch/parisc/include/asm/module.h index c2cb49e934c..1f4123427ea 100644 --- a/arch/parisc/include/asm/module.h +++ b/arch/parisc/include/asm/module.h @@ -23,8 +23,10 @@ struct mod_arch_specific { unsigned long got_offset, got_count, got_max; unsigned long fdesc_offset, fdesc_count, fdesc_max; - unsigned long stub_offset, stub_count, stub_max; - unsigned long init_stub_offset, init_stub_count, init_stub_max; + struct { + unsigned long stub_offset; + unsigned int stub_entries; + } *section; int unwind_section; struct unwind_table *unwind; }; diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 3c9d34844c8..9d64df8754b 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -17,6 +17,7 @@ #include <asm/ptrace.h> #include <asm/types.h> #include <asm/system.h> +#include <asm/percpu.h> #endif /* __ASSEMBLY__ */ #define KERNEL_STACK_SIZE (4*PAGE_SIZE) @@ -109,8 +110,7 @@ struct cpuinfo_parisc { }; extern struct system_cpuinfo_parisc boot_cpu_data; -extern struct cpuinfo_parisc cpu_data[NR_CPUS]; -#define current_cpu_data cpu_data[smp_processor_id()] +DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data); #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) diff --git a/arch/parisc/include/asm/swab.h b/arch/parisc/include/asm/swab.h new file mode 100644 index 00000000000..3ff16c5a335 --- /dev/null +++ b/arch/parisc/include/asm/swab.h @@ -0,0 +1,66 @@ +#ifndef _PARISC_SWAB_H +#define _PARISC_SWAB_H + +#include <asm/types.h> +#include <linux/compiler.h> + +#define __SWAB_64_THRU_32__ + +static inline __attribute_const__ __u16 __arch_swab16(__u16 x) +{ + __asm__("dep %0, 15, 8, %0\n\t" /* deposit 00ab -> 0bab */ + "shd %%r0, %0, 8, %0" /* shift 000000ab -> 00ba */ + : "=r" (x) + : "0" (x)); + return x; +} +#define __arch_swab16 __arch_swab16 + +static inline __attribute_const__ __u32 __arch_swab24(__u32 x) +{ + __asm__("shd %0, %0, 8, %0\n\t" /* shift xabcxabc -> cxab */ + "dep %0, 15, 8, %0\n\t" /* deposit cxab -> cbab */ + "shd %%r0, %0, 8, %0" /* shift 0000cbab -> 0cba */ + : "=r" (x) + : "0" (x)); + return x; +} + +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) +{ + unsigned int temp; + __asm__("shd %0, %0, 16, %1\n\t" /* shift abcdabcd -> cdab */ + "dep %1, 15, 8, %1\n\t" /* deposit cdab -> cbab */ + "shd %0, %1, 8, %0" /* shift abcdcbab -> dcba */ + : "=r" (x), "=&r" (temp) + : "0" (x)); + return x; +} +#define __arch_swab32 __arch_swab32 + +#if BITS_PER_LONG > 32 +/* +** From "PA-RISC 2.0 Architecture", HP Professional Books. +** See Appendix I page 8 , "Endian Byte Swapping". +** +** Pretty cool algorithm: (* == zero'd bits) +** PERMH 01234567 -> 67452301 into %0 +** HSHL 67452301 -> 7*5*3*1* into %1 +** HSHR 67452301 -> *6*4*2*0 into %0 +** OR %0 | %1 -> 76543210 into %0 (all done!) +*/ +static inline __attribute_const__ __u64 __arch_swab64(__u64 x) +{ + __u64 temp; + __asm__("permh,3210 %0, %0\n\t" + "hshl %0, 8, %1\n\t" + "hshr,u %0, 8, %0\n\t" + "or %1, %0, %0" + : "=r" (x), "=&r" (temp) + : "0" (x)); + return x; +} +#define __arch_swab64 __arch_swab64 +#endif /* BITS_PER_LONG > 32 */ + +#endif /* _PARISC_SWAB_H */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 4878b9501f2..1c6dbb6f6e5 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -241,4 +241,6 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user +int fixup_exception(struct pt_regs *regs); + #endif /* __PARISC_UACCESS_H */ diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 884b7ce16a3..994bcd98090 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -549,6 +549,38 @@ static int parisc_generic_match(struct device *dev, struct device_driver *drv) return match_device(to_parisc_driver(drv), to_parisc_device(dev)); } +static ssize_t make_modalias(struct device *dev, char *buf) +{ + const struct parisc_device *padev = to_parisc_device(dev); + const struct parisc_device_id *id = &padev->id; + + return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n", + (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev, + (u32)id->sversion); +} + +static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + const struct parisc_device *padev; + char modalias[40]; + + if (!dev) + return -ENODEV; + + padev = to_parisc_device(dev); + if (!padev) + return -ENODEV; + + if (add_uevent_var(env, "PARISC_NAME=%s", padev->name)) + return -ENOMEM; + + make_modalias(dev, modalias); + if (add_uevent_var(env, "MODALIAS=%s", modalias)) + return -ENOMEM; + + return 0; +} + #define pa_dev_attr(name, field, format_string) \ static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \ { \ @@ -566,12 +598,7 @@ pa_dev_attr_id(sversion, "0x%05x\n"); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct parisc_device *padev = to_parisc_device(dev); - struct parisc_device_id *id = &padev->id; - - return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n", - (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev, - (u32)id->sversion); + return make_modalias(dev, buf); } static struct device_attribute parisc_device_attrs[] = { @@ -587,6 +614,7 @@ static struct device_attribute parisc_device_attrs[] = { struct bus_type parisc_bus_type = { .name = "parisc", .match = parisc_generic_match, + .uevent = parisc_uevent, .dev_attrs = parisc_device_attrs, .probe = parisc_driver_probe, .remove = parisc_driver_remove, diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index 2cbf13b3ef1..5595a2f3118 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -80,6 +80,7 @@ END(hpmc_pim_data) .import intr_save, code ENTRY(os_hpmc) +.os_hpmc: /* * registers modified: @@ -295,5 +296,10 @@ os_hpmc_6: b . nop ENDPROC(os_hpmc) -ENTRY(os_hpmc_end) /* this label used to compute os_hpmc checksum */ +.os_hpmc_end: nop +.data +.align 4 + .export os_hpmc_size +os_hpmc_size: + .word .os_hpmc_end-.os_hpmc diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 4cea935e2f9..ac2c822928c 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -298,7 +298,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu) irq_desc[irq].affinity = cpumask_of_cpu(cpu); #endif - return cpu_data[cpu].txn_addr; + return per_cpu(cpu_data, cpu).txn_addr; } @@ -309,8 +309,9 @@ unsigned long txn_alloc_addr(unsigned int virt_irq) next_cpu++; /* assign to "next" CPU we want this bugger on */ /* validate entry */ - while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || - !cpu_online(next_cpu))) + while ((next_cpu < NR_CPUS) && + (!per_cpu(cpu_data, next_cpu).txn_addr || + !cpu_online(next_cpu))) next_cpu++; if (next_cpu >= NR_CPUS) @@ -359,7 +360,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", irq, smp_processor_id(), cpu); gsc_writel(irq + CPU_IRQ_BASE, - cpu_data[cpu].hpa); + per_cpu(cpu_data, cpu).hpa); goto set_out; } #endif @@ -421,5 +422,5 @@ void __init init_IRQ(void) void ack_bad_irq(unsigned int irq) { - printk("unexpected IRQ %d\n", irq); + printk(KERN_WARNING "unexpected IRQ %d\n", irq); } diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 44138c3e6ea..9013243cecc 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -6,6 +6,7 @@ * * Linux/PA-RISC Project (http://www.parisc-linux.org/) * Copyright (C) 2003 Randolph Chung <tausq at debian . org> + * Copyright (C) 2008 Helge Deller <deller@gmx.de> * * * This program is free software; you can redistribute it and/or modify @@ -24,6 +25,19 @@ * * * Notes: + * - PLT stub handling + * On 32bit (and sometimes 64bit) and with big kernel modules like xfs or + * ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may + * fail to reach their PLT stub if we only create one big stub array for + * all sections at the beginning of the core or init section. + * Instead we now insert individual PLT stub entries directly in front of + * of the code sections where the stubs are actually called. + * This reduces the distance between the PCREL location and the stub entry + * so that the relocations can be fulfilled. + * While calculating the final layout of the kernel module in memory, the + * kernel module loader calls arch_mod_section_prepend() to request the + * to be reserved amount of memory in front of each individual section. + * * - SEGREL32 handling * We are not doing SEGREL32 handling correctly. According to the ABI, we * should do a value offset, like this: @@ -58,9 +72,13 @@ #define DEBUGP(fmt...) #endif +#define RELOC_REACHABLE(val, bits) \ + (( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \ + ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \ + 0 : 1) + #define CHECK_RELOC(val, bits) \ - if ( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \ - ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) { \ + if (!RELOC_REACHABLE(val, bits)) { \ printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \ me->name, strtab + sym->st_name, (unsigned long)val, bits); \ return -ENOEXEC; \ @@ -92,13 +110,6 @@ static inline int in_local(struct module *me, void *loc) return in_init(me, loc) || in_core(me, loc); } -static inline int in_local_section(struct module *me, void *loc, void *dot) -{ - return (in_init(me, loc) && in_init(me, dot)) || - (in_core(me, loc) && in_core(me, dot)); -} - - #ifndef CONFIG_64BIT struct got_entry { Elf32_Addr addr; @@ -258,23 +269,42 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n) /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) { + kfree(mod->arch.section); + mod->arch.section = NULL; + vfree(module_region); /* FIXME: If module_region == mod->init_region, trim exception table entries. */ } +/* Additional bytes needed in front of individual sections */ +unsigned int arch_mod_section_prepend(struct module *mod, + unsigned int section) +{ + /* size needed for all stubs of this section (including + * one additional for correct alignment of the stubs) */ + return (mod->arch.section[section].stub_entries + 1) + * sizeof(struct stub_entry); +} + #define CONST int module_frob_arch_sections(CONST Elf_Ehdr *hdr, CONST Elf_Shdr *sechdrs, CONST char *secstrings, struct module *me) { - unsigned long gots = 0, fdescs = 0, stubs = 0, init_stubs = 0; + unsigned long gots = 0, fdescs = 0, len; unsigned int i; + len = hdr->e_shnum * sizeof(me->arch.section[0]); + me->arch.section = kzalloc(len, GFP_KERNEL); + if (!me->arch.section) + return -ENOMEM; + for (i = 1; i < hdr->e_shnum; i++) { - const Elf_Rela *rels = (void *)hdr + sechdrs[i].sh_offset; + const Elf_Rela *rels = (void *)sechdrs[i].sh_addr; unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels); + unsigned int count, s; if (strncmp(secstrings + sechdrs[i].sh_name, ".PARISC.unwind", 14) == 0) @@ -290,11 +320,23 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, */ gots += count_gots(rels, nrels); fdescs += count_fdescs(rels, nrels); - if(strncmp(secstrings + sechdrs[i].sh_name, - ".rela.init", 10) == 0) - init_stubs += count_stubs(rels, nrels); - else - stubs += count_stubs(rels, nrels); + + /* XXX: By sorting the relocs and finding duplicate entries + * we could reduce the number of necessary stubs and save + * some memory. */ + count = count_stubs(rels, nrels); + if (!count) + continue; + + /* so we need relocation stubs. reserve necessary memory. */ + /* sh_info gives the section for which we need to add stubs. */ + s = sechdrs[i].sh_info; + + /* each code section should only have one relocation section */ + WARN_ON(me->arch.section[s].stub_entries); + + /* store number of stubs we need for this section */ + me->arch.section[s].stub_entries += count; } /* align things a bit */ @@ -306,18 +348,8 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, me->arch.fdesc_offset = me->core_size; me->core_size += fdescs * sizeof(Elf_Fdesc); - me->core_size = ALIGN(me->core_size, 16); - me->arch.stub_offset = me->core_size; - me->core_size += stubs * sizeof(struct stub_entry); - - me->init_size = ALIGN(me->init_size, 16); - me->arch.init_stub_offset = me->init_size; - me->init_size += init_stubs * sizeof(struct stub_entry); - me->arch.got_max = gots; me->arch.fdesc_max = fdescs; - me->arch.stub_max = stubs; - me->arch.init_stub_max = init_stubs; return 0; } @@ -380,23 +412,27 @@ enum elf_stub_type { }; static Elf_Addr get_stub(struct module *me, unsigned long value, long addend, - enum elf_stub_type stub_type, int init_section) + enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec) { - unsigned long i; struct stub_entry *stub; - if(init_section) { - i = me->arch.init_stub_count++; - BUG_ON(me->arch.init_stub_count > me->arch.init_stub_max); - stub = me->module_init + me->arch.init_stub_offset + - i * sizeof(struct stub_entry); - } else { - i = me->arch.stub_count++; - BUG_ON(me->arch.stub_count > me->arch.stub_max); - stub = me->module_core + me->arch.stub_offset + - i * sizeof(struct stub_entry); + /* initialize stub_offset to point in front of the section */ + if (!me->arch.section[targetsec].stub_offset) { + loc0 -= (me->arch.section[targetsec].stub_entries + 1) * + sizeof(struct stub_entry); + /* get correct alignment for the stubs */ + loc0 = ALIGN(loc0, sizeof(struct stub_entry)); + me->arch.section[targetsec].stub_offset = loc0; } + /* get address of stub entry */ + stub = (void *) me->arch.section[targetsec].stub_offset; + me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry); + + /* do not write outside available stub area */ + BUG_ON(0 == me->arch.section[targetsec].stub_entries--); + + #ifndef CONFIG_64BIT /* for 32-bit the stub looks like this: * ldil L'XXX,%r1 @@ -489,15 +525,19 @@ int apply_relocate_add(Elf_Shdr *sechdrs, Elf32_Addr val; Elf32_Sword addend; Elf32_Addr dot; + Elf_Addr loc0; + unsigned int targetsec = sechdrs[relsec].sh_info; //unsigned long dp = (unsigned long)$global$; register unsigned long dp asm ("r27"); DEBUGP("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); + targetsec); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ - loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + loc = (void *)sechdrs[targetsec].sh_addr + rel[i].r_offset; + /* This is the start of the target section */ + loc0 = sechdrs[targetsec].sh_addr; /* This is the symbol it is referring to */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rel[i].r_info); @@ -569,19 +609,32 @@ int apply_relocate_add(Elf_Shdr *sechdrs, break; case R_PARISC_PCREL17F: /* 17-bit PC relative address */ - val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc)); + /* calculate direct call offset */ + val += addend; val = (val - dot - 8)/4; - CHECK_RELOC(val, 17) + if (!RELOC_REACHABLE(val, 17)) { + /* direct distance too far, create + * stub entry instead */ + val = get_stub(me, sym->st_value, addend, + ELF_STUB_DIRECT, loc0, targetsec); + val = (val - dot - 8)/4; + CHECK_RELOC(val, 17); + } *loc = (*loc & ~0x1f1ffd) | reassemble_17(val); break; case R_PARISC_PCREL22F: /* 22-bit PC relative address; only defined for pa20 */ - val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc)); - DEBUGP("STUB FOR %s loc %lx+%lx at %lx\n", - strtab + sym->st_name, (unsigned long)loc, addend, - val) + /* calculate direct call offset */ + val += addend; val = (val - dot - 8)/4; - CHECK_RELOC(val, 22); + if (!RELOC_REACHABLE(val, 22)) { + /* direct distance too far, create + * stub entry instead */ + val = get_stub(me, sym->st_value, addend, + ELF_STUB_DIRECT, loc0, targetsec); + val = (val - dot - 8)/4; + CHECK_RELOC(val, 22); + } *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); break; @@ -610,13 +663,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs, Elf64_Addr val; Elf64_Sxword addend; Elf64_Addr dot; + Elf_Addr loc0; + unsigned int targetsec = sechdrs[relsec].sh_info; DEBUGP("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); + targetsec); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ - loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + loc = (void *)sechdrs[targetsec].sh_addr + rel[i].r_offset; + /* This is the start of the target section */ + loc0 = sechdrs[targetsec].sh_addr; /* This is the symbol it is referring to */ sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + ELF64_R_SYM(rel[i].r_info); @@ -672,42 +729,40 @@ int apply_relocate_add(Elf_Shdr *sechdrs, DEBUGP("PCREL22F Symbol %s loc %p val %lx\n", strtab + sym->st_name, loc, val); + val += addend; /* can we reach it locally? */ - if(!in_local_section(me, (void *)val, (void *)dot)) { - - if (in_local(me, (void *)val)) - /* this is the case where the - * symbol is local to the - * module, but in a different - * section, so stub the jump - * in case it's more than 22 - * bits away */ - val = get_stub(me, val, addend, ELF_STUB_DIRECT, - in_init(me, loc)); - else if (strncmp(strtab + sym->st_name, "$$", 2) + if (in_local(me, (void *)val)) { + /* this is the case where the symbol is local + * to the module, but in a different section, + * so stub the jump in case it's more than 22 + * bits away */ + val = (val - dot - 8)/4; + if (!RELOC_REACHABLE(val, 22)) { + /* direct distance too far, create + * stub entry instead */ + val = get_stub(me, sym->st_value, + addend, ELF_STUB_DIRECT, + loc0, targetsec); + } else { + /* Ok, we can reach it directly. */ + val = sym->st_value; + val += addend; + } + } else { + val = sym->st_value; + if (strncmp(strtab + sym->st_name, "$$", 2) == 0) val = get_stub(me, val, addend, ELF_STUB_MILLI, - in_init(me, loc)); + loc0, targetsec); else val = get_stub(me, val, addend, ELF_STUB_GOT, - in_init(me, loc)); + loc0, targetsec); } DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n", strtab + sym->st_name, loc, sym->st_value, addend, val); - /* FIXME: local symbols work as long as the - * core and init pieces aren't separated too - * far. If this is ever broken, you will trip - * the check below. The way to fix it would - * be to generate local stubs to go between init - * and core */ - if((Elf64_Sxword)(val - dot - 8) > 0x800000 -1 || - (Elf64_Sxword)(val - dot - 8) < -0x800000) { - printk(KERN_ERR "Module %s, symbol %s is out of range for PCREL22F relocation\n", - me->name, strtab + sym->st_name); - return -ENOEXEC; - } val = (val - dot - 8)/4; + CHECK_RELOC(val, 22); *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); break; case R_PARISC_DIR64: @@ -794,12 +849,8 @@ int module_finalize(const Elf_Ehdr *hdr, addr = (u32 *)entry->addr; printk("INSNS: %x %x %x %x\n", addr[0], addr[1], addr[2], addr[3]); - printk("stubs used %ld, stubs max %ld\n" - "init_stubs used %ld, init stubs max %ld\n" - "got entries used %ld, gots max %ld\n" + printk("got entries used %ld, gots max %ld\n" "fdescs used %ld, fdescs max %ld\n", - me->arch.stub_count, me->arch.stub_max, - me->arch.init_stub_count, me->arch.init_stub_max, me->arch.got_count, me->arch.got_max, me->arch.fdesc_count, me->arch.fdesc_max); #endif @@ -829,7 +880,10 @@ int module_finalize(const Elf_Ehdr *hdr, |