diff options
Diffstat (limited to 'arch/parisc/kernel')
29 files changed, 693 insertions, 527 deletions
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 66ee3f12df5..ff87b4603e3 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -29,7 +29,9 @@ obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o obj-$(CONFIG_STACKTRACE)+= stacktrace.o +obj-$(CONFIG_AUDIT) += audit.o +obj64-$(CONFIG_AUDIT) += compat_audit.o # only supported for PCX-W/U in 64-bit mode at the moment -obj-$(CONFIG_64BIT) += perf.o perf_asm.o +obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y) obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o diff --git a/arch/parisc/kernel/audit.c b/arch/parisc/kernel/audit.c new file mode 100644 index 00000000000..eb64a6148c8 --- /dev/null +++ b/arch/parisc/kernel/audit.c @@ -0,0 +1,81 @@ +#include <linux/init.h> +#include <linux/types.h> +#include <linux/audit.h> +#include <asm/unistd.h> + +static unsigned dir_class[] = { +#include <asm-generic/audit_dir_write.h> +~0U +}; + +static unsigned read_class[] = { +#include <asm-generic/audit_read.h> +~0U +}; + +static unsigned write_class[] = { +#include <asm-generic/audit_write.h> +~0U +}; + +static unsigned chattr_class[] = { +#include <asm-generic/audit_change_attr.h> +~0U +}; + +static unsigned signal_class[] = { +#include <asm-generic/audit_signal.h> +~0U +}; + +int audit_classify_arch(int arch) +{ +#ifdef CONFIG_COMPAT + if (arch == AUDIT_ARCH_PARISC) + return 1; +#endif + return 0; +} + +int audit_classify_syscall(int abi, unsigned syscall) +{ +#ifdef CONFIG_COMPAT + extern int parisc32_classify_syscall(unsigned); + if (abi == AUDIT_ARCH_PARISC) + return parisc32_classify_syscall(syscall); +#endif + switch (syscall) { + case __NR_open: + return 2; + case __NR_openat: + return 3; + case __NR_execve: + return 5; + default: + return 0; + } +} + +static int __init audit_classes_init(void) +{ +#ifdef CONFIG_COMPAT + extern __u32 parisc32_dir_class[]; + extern __u32 parisc32_write_class[]; + extern __u32 parisc32_read_class[]; + extern __u32 parisc32_chattr_class[]; + extern __u32 parisc32_signal_class[]; + audit_register_class(AUDIT_CLASS_WRITE_32, parisc32_write_class); + audit_register_class(AUDIT_CLASS_READ_32, parisc32_read_class); + audit_register_class(AUDIT_CLASS_DIR_WRITE_32, parisc32_dir_class); + audit_register_class(AUDIT_CLASS_CHATTR_32, parisc32_chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL_32, parisc32_signal_class); +#endif + audit_register_class(AUDIT_CLASS_WRITE, write_class); + audit_register_class(AUDIT_CLASS_READ, read_class); + audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); + audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); + return 0; +} + +__initcall(audit_classes_init); diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 65fb4cbc3a0..f6448c7c62b 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -71,18 +71,27 @@ flush_cache_all_local(void) } EXPORT_SYMBOL(flush_cache_all_local); +/* Virtual address of pfn. */ +#define pfn_va(pfn) __va(PFN_PHYS(pfn)) + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - struct page *page = pte_page(*ptep); + unsigned long pfn = pte_pfn(*ptep); + struct page *page; - if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && - test_bit(PG_dcache_dirty, &page->flags)) { + /* We don't have pte special. As a result, we can be called with + an invalid pfn and we don't need to flush the kernel dcache page. + This occurs with FireGL card in C8000. */ + if (!pfn_valid(pfn)) + return; - flush_kernel_dcache_page(page); + page = pfn_to_page(pfn); + if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) { + flush_kernel_dcache_page_addr(pfn_va(pfn)); clear_bit(PG_dcache_dirty, &page->flags); } else if (parisc_requires_coherency()) - flush_kernel_dcache_page(page); + flush_kernel_dcache_page_addr(pfn_va(pfn)); } void @@ -314,7 +323,8 @@ void flush_dcache_page(struct page *page) * specifically accesses it, of course) */ flush_tlb_page(mpnt, addr); - if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { + if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) + != (addr & (SHM_COLOUR - 1))) { __flush_cache_page(mpnt, addr, page_to_phys(page)); if (old_addr) printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); @@ -379,41 +389,20 @@ void flush_kernel_dcache_page_addr(void *addr) } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); -void clear_user_page(void *vto, unsigned long vaddr, struct page *page) -{ - clear_page_asm(vto); - if (!parisc_requires_coherency()) - flush_kernel_dcache_page_asm(vto); -} -EXPORT_SYMBOL(clear_user_page); - void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg) { - /* Copy using kernel mapping. No coherency is needed - (all in kmap/kunmap) on machines that don't support - non-equivalent aliasing. However, the `from' page - needs to be flushed before it can be accessed through - the kernel mapping. */ + /* Copy using kernel mapping. No coherency is needed (all in + kunmap) for the `to' page. However, the `from' page needs to + be flushed through a mapping equivalent to the user mapping + before it can be accessed through the kernel mapping. */ preempt_disable(); flush_dcache_page_asm(__pa(vfrom), vaddr); preempt_enable(); copy_page_asm(vto, vfrom); - if (!parisc_requires_coherency()) - flush_kernel_dcache_page_asm(vto); } EXPORT_SYMBOL(copy_user_page); -#ifdef CONFIG_PA8X00 - -void kunmap_parisc(void *addr) -{ - if (parisc_requires_coherency()) - flush_kernel_dcache_page_addr(addr); -} -EXPORT_SYMBOL(kunmap_parisc); -#endif - void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) { unsigned long flags; @@ -440,8 +429,8 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, else { unsigned long flags; - mtsp(sid, 1); purge_tlb_start(flags); + mtsp(sid, 1); if (split_tlb) { while (npages--) { pdtlb(start); @@ -495,44 +484,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr) void flush_cache_mm(struct mm_struct *mm) { + struct vm_area_struct *vma; + pgd_t *pgd; + /* Flushing the whole cache on each cpu takes forever on rp3440, etc. So, avoid it if the mm isn't too big. */ - if (mm_total_size(mm) < parisc_cache_flush_threshold) { - struct vm_area_struct *vma; - - if (mm->context == mfsp(3)) { - for (vma = mm->mmap; vma; vma = vma->vm_next) { - flush_user_dcache_range_asm(vma->vm_start, - vma->vm_end); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm( - vma->vm_start, vma->vm_end); - } - } else { - pgd_t *pgd = mm->pgd; - - for (vma = mm->mmap; vma; vma = vma->vm_next) { - unsigned long addr; - - for (addr = vma->vm_start; addr < vma->vm_end; - addr += PAGE_SIZE) { - pte_t *ptep = get_ptep(pgd, addr); - if (ptep != NULL) { - pte_t pte = *ptep; - __flush_cache_page(vma, addr, - page_to_phys(pte_page(pte))); - } - } - } + if (mm_total_size(mm) >= parisc_cache_flush_threshold) { + flush_cache_all(); + return; + } + + if (mm->context == mfsp(3)) { + for (vma = mm->mmap; vma; vma = vma->vm_next) { + flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); + if ((vma->vm_flags & VM_EXEC) == 0) + continue; + flush_user_icache_range_asm(vma->vm_start, vma->vm_end); } return; } -#ifdef CONFIG_SMP - flush_cache_all(); -#else - flush_cache_all_local(); -#endif + pgd = mm->pgd; + for (vma = mm->mmap; vma; vma = vma->vm_next) { + unsigned long addr; + + for (addr = vma->vm_start; addr < vma->vm_end; + addr += PAGE_SIZE) { + unsigned long pfn; + pte_t *ptep = get_ptep(pgd, addr); + if (!ptep) + continue; + pfn = pte_pfn(*ptep); + if (!pfn_valid(pfn)) + continue; + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + } + } } void @@ -556,33 +543,32 @@ flush_user_icache_range(unsigned long start, unsigned long end) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { + unsigned long addr; + pgd_t *pgd; + BUG_ON(!vma->vm_mm->context); - if ((end - start) < parisc_cache_flush_threshold) { - if (vma->vm_mm->context == mfsp(3)) { - flush_user_dcache_range_asm(start, end); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(start, end); - } else { - unsigned long addr; - pgd_t *pgd = vma->vm_mm->pgd; - - for (addr = start & PAGE_MASK; addr < end; - addr += PAGE_SIZE) { - pte_t *ptep = get_ptep(pgd, addr); - if (ptep != NULL) { - pte_t pte = *ptep; - flush_cache_page(vma, - addr, pte_pfn(pte)); - } - } - } - } else { -#ifdef CONFIG_SMP + if ((end - start) >= parisc_cache_flush_threshold) { flush_cache_all(); -#else - flush_cache_all_local(); -#endif + return; + } + + if (vma->vm_mm->context == mfsp(3)) { + flush_user_dcache_range_asm(start, end); + if (vma->vm_flags & VM_EXEC) + flush_user_icache_range_asm(start, end); + return; + } + + pgd = vma->vm_mm->pgd; + for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { + unsigned long pfn; + pte_t *ptep = get_ptep(pgd, addr); + if (!ptep) + continue; + pfn = pte_pfn(*ptep); + if (pfn_valid(pfn)) + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); } } @@ -591,71 +577,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long { BUG_ON(!vma->vm_mm->context); - flush_tlb_page(vma, vmaddr); - __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); - -} - -#ifdef CONFIG_PARISC_TMPALIAS - -void clear_user_highpage(struct page *page, unsigned long vaddr) -{ - void *vto; - unsigned long flags; - - /* Clear using TMPALIAS region. The page doesn't need to - be flushed but the kernel mapping needs to be purged. */ - - vto = kmap_atomic(page); - - /* The PA-RISC 2.0 Architecture book states on page F-6: - "Before a write-capable translation is enabled, *all* - non-equivalently-aliased translations must be removed - from the page table and purged from the TLB. (Note - that the caches are not required to be flushed at this - time.) Before any non-equivalent aliased translation - is re-enabled, the virtual address range for the writeable - page (the entire page) must be flushed from the cache, - and the write-capable translation removed from the page - table and purged from the TLB." */ - - purge_kernel_dcache_page_asm((unsigned long)vto); - purge_tlb_start(flags); - pdtlb_kernel(vto); - purge_tlb_end(flags); - preempt_disable(); - clear_user_page_asm(vto, vaddr); - preempt_enable(); - - pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ -} - -void copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr, struct vm_area_struct *vma) -{ - void *vfrom, *vto; - unsigned long flags; - - /* Copy using TMPALIAS region. This has the advantage - that the `from' page doesn't need to be flushed. However, - the `to' page must be flushed in copy_user_page_asm since - it can be used to bring in executable code. */ - - vfrom = kmap_atomic(from); - vto = kmap_atomic(to); - - purge_kernel_dcache_page_asm((unsigned long)vto); - purge_tlb_start(flags); - pdtlb_kernel(vto); - pdtlb_kernel(vfrom); - purge_tlb_end(flags); - preempt_disable(); - copy_user_page_asm(vto, vfrom, vaddr); - flush_dcache_page_asm(__pa(vto), vaddr); - preempt_enable(); - - pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */ - pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ + if (pfn_valid(pfn)) { + flush_tlb_page(vma, vmaddr); + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); + } } - -#endif /* CONFIG_PARISC_TMPALIAS */ diff --git a/arch/parisc/kernel/compat_audit.c b/arch/parisc/kernel/compat_audit.c new file mode 100644 index 00000000000..c74478f6bc7 --- /dev/null +++ b/arch/parisc/kernel/compat_audit.c @@ -0,0 +1,40 @@ +#include <asm/unistd.h> + +unsigned int parisc32_dir_class[] = { +#include <asm-generic/audit_dir_write.h> +~0U +}; + +unsigned int parisc32_chattr_class[] = { +#include <asm-generic/audit_change_attr.h> +~0U +}; + +unsigned int parisc32_write_class[] = { +#include <asm-generic/audit_write.h> +~0U +}; + +unsigned int parisc32_read_class[] = { +#include <asm-generic/audit_read.h> +~0U +}; + +unsigned int parisc32_signal_class[] = { +#include <asm-generic/audit_signal.h> +~0U +}; + +int parisc32_classify_syscall(unsigned syscall) +{ + switch (syscall) { + case __NR_open: + return 2; + case __NR_openat: + return 3; + case __NR_execve: + return 5; + default: + return 1; + } +} diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 14285caec71..dba508fe168 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -282,18 +282,6 @@ find_pa_parent_type(const struct parisc_device *padev, int type) return NULL; } -#ifdef CONFIG_PCI -static inline int is_pci_dev(struct device *dev) -{ - return dev->bus == &pci_bus_type; -} -#else -static inline int is_pci_dev(struct device *dev) -{ - return 0; -} -#endif - /* * get_node_path fills in @path with the firmware path to the device. * Note that if @node is a parisc device, we don't fill in the 'mod' field. @@ -306,7 +294,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path) int i = 5; memset(&path->bc, -1, 6); - if (is_pci_dev(dev)) { + if (dev_is_pci(dev)) { unsigned int devfn = to_pci_dev(dev)->devfn; path->mod = PCI_FUNC(devfn); path->bc[i--] = PCI_SLOT(devfn); @@ -314,7 +302,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path) } while (dev != &root) { - if (is_pci_dev(dev)) { + if (dev_is_pci(dev)) { unsigned int devfn = to_pci_dev(dev)->devfn; path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5); } else if (dev->bus == &parisc_bus_type) { @@ -695,7 +683,7 @@ static int check_parent(struct device * dev, void * data) if (dev->bus == &parisc_bus_type) { if (match_parisc_device(dev, d->index, d->modpath)) d->dev = dev; - } else if (is_pci_dev(dev)) { + } else if (dev_is_pci(dev)) { if (match_pci_device(dev, d->index, d->modpath)) d->dev = dev; } else if (dev->bus == NULL) { @@ -753,7 +741,7 @@ struct device *hwpath_to_device(struct hardware_path *modpath) if (!parent) return NULL; } - if (is_pci_dev(parent)) /* pci devices already parse MOD */ + if (dev_is_pci(parent)) /* pci devices already parse MOD */ return parent; else return parse_tree_node(parent, 6, modpath); @@ -772,7 +760,7 @@ void device_to_hwpath(struct device *dev, struct hardware_path *path) padev = to_parisc_device(dev); get_node_path(dev->parent, path); path->mod = padev->hw_path; - } else if (is_pci_dev(dev)) { + } else if (dev_is_pci(dev)) { get_node_path(dev, path); } } diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index f65fa480c90..22395901d47 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -150,7 +150,7 @@ static void convert_to_wide(unsigned long *addr) } #ifdef CONFIG_64BIT -void __cpuinit set_firmware_width_unlocked(void) +void set_firmware_width_unlocked(void) { int ret; @@ -167,7 +167,7 @@ void __cpuinit set_firmware_width_unlocked(void) * This function must be called before any pdc_* function that uses the * convert_to_wide function. */ -void __cpuinit set_firmware_width(void) +void set_firmware_width(void) { unsigned long flags; spin_lock_irqsave(&pdc_lock, flags); @@ -175,11 +175,13 @@ void __cpuinit set_firmware_width(void) spin_unlock_irqrestore(&pdc_lock, flags); } #else -void __cpuinit set_firmware_width_unlocked(void) { +void set_firmware_width_unlocked(void) +{ return; } -void __cpuinit set_firmware_width(void) { +void set_firmware_width(void) +{ return; } #endif /*CONFIG_64BIT*/ @@ -301,7 +303,7 @@ int pdc_chassis_warn(unsigned long *warn) return retval; } -int __cpuinit pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info) +int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info) { int ret; @@ -322,7 +324,7 @@ int __cpuinit pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info) * This PDC call returns the presence and status of all the coprocessors * attached to the processor. */ -int __cpuinit pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info) +int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info) { int ret; unsigned long flags; diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c index 9e2d2e40852..af3bc359dc7 100644 --- a/arch/parisc/kernel/hardware.c +++ b/arch/parisc/kernel/hardware.c @@ -36,6 +36,9 @@ * HP PARISC Hardware Database * Access to this database is only possible during bootup * so don't reference this table after starting the init process + * + * NOTE: Product names which are listed here and ends with a '?' + * are guessed. If you know the correct name, please let us know. */ static struct hp_hardware hp_hardware_list[] = { @@ -222,7 +225,7 @@ static struct hp_hardware hp_hardware_list[] = { {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"}, {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"}, {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"}, - {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"}, + {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)?"}, {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"}, {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"}, {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"}, @@ -276,9 +279,11 @@ static struct hp_hardware hp_hardware_list[] = { {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"}, {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"}, {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"}, + {HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast?"}, {HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"}, {HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"}, {HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"}, + {HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+?"}, {HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"}, {HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"}, {HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"}, @@ -1205,6 +1210,8 @@ static struct hp_hardware hp_hardware_list[] = { {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, + {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"}, + {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"}, {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, @@ -1366,7 +1373,7 @@ const char *parisc_hardware_description(struct parisc_device_id *id) /* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */ -enum cpu_type __cpuinit +enum cpu_type parisc_get_cpu_type(unsigned long hversion) { struct hp_cpu_type_mask *ptr; diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 37aabd772fb..d4dc588c0dc 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -41,9 +41,7 @@ END(boot_args) .import fault_vector_11,code /* IVA parisc 1.1 32 bit */ .import $global$ /* forward declaration */ #endif /*!CONFIG_64BIT*/ - .export _stext,data /* Kernel want it this way! */ -_stext: -ENTRY(stext) +ENTRY(parisc_kernel_start) .proc .callinfo @@ -195,6 +193,8 @@ common_stext: ldw MEM_PDC_HI(%r0),%r6 depd %r6, 31, 32, %r3 /* move to upper word */ + mfctl %cr30,%r6 /* PCX-W2 firmware bug */ + ldo PDC_PSW(%r0),%arg0 /* 21 */ ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */ ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */ @@ -203,6 +203,8 @@ common_stext: copy %r0,%arg3 stext_pdc_ret: + mtctl %r6,%cr30 /* restore task thread info */ + /* restore rfi target address*/ ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10 tophys_r1 %r10 @@ -343,7 +345,7 @@ smp_slave_stext: .procend #endif /* CONFIG_SMP */ -ENDPROC(stext) +ENDPROC(parisc_kernel_start) #ifndef CONFIG_64BIT .section .data..read_mostly diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index 3295ef4a185..f0b6722fc70 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c @@ -211,6 +211,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index) /* REVISIT: who is the consumer of this? not sure yet... */ dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */ dev->pmod_loc = pa_pdc_cell->mod_location; + dev->mod0 = pa_pdc_cell->mod[0]; register_parisc_device(dev); /* advertise device */ diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 2e6443b1e92..cfe056fe7f5 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -117,7 +117,7 @@ int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) return -EINVAL; /* whatever mask they set, we just allow one CPU */ - cpu_dest = first_cpu(*dest); + cpu_dest = cpumask_first_and(dest, cpu_online_mask); return cpu_dest; } @@ -179,10 +179,6 @@ int arch_show_interrupts(struct seq_file *p, int prec) for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); seq_puts(p, " Rescheduling interrupts\n"); - seq_printf(p, "%*s: ", prec, "CAL"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); - seq_puts(p, " Function call interrupts\n"); #endif seq_printf(p, "%*s: ", prec, "UAH"); for_each_online_cpu(j) @@ -499,22 +495,9 @@ static void execute_on_irq_stack(void *func, unsigned long param1) *irq_stack_in_use = 1; } -asmlinkage void do_softirq(void) +void do_softirq_own_stack(void) { - __u32 pending; - unsigned long flags; - - if (in_interrupt()) - return; - - local_irq_save(flags); - - pending = local_softirq_pending(); - - if (pending) - execute_on_irq_stack(__do_softirq, 0); - - local_irq_restore(flags); + execute_on_irq_stack(__do_softirq, 0); } #endif /* CONFIG_IRQSTACKS */ diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 2a625fb063e..50dfafc3f2c 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -219,7 +219,7 @@ void *module_alloc(unsigned long size) * init_data correctly */ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, GFP_KERNEL | __GFP_HIGHMEM, - PAGE_KERNEL_RWX, -1, + PAGE_KERNEL_RWX, NUMA_NO_NODE, __builtin_return_address(0)); } diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 36d7f402e48..b743a80eaba 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm) #endif ldil L%dcache_stride, %r1 - ldw R%dcache_stride(%r1), %r1 + ldw R%dcache_stride(%r1), r31 #ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 @@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm) depwi,z 1, 31-PAGE_SHIFT,1, %r25 #endif add %r28, %r25, %r25 - sub %r25, %r1, %r25 - - -1: fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) - fdc,m %r1(%r28) + sub %r25, r31, %r25 + + +1: fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) + fdc,m r31(%r28) cmpb,COND(<<) %r28, %r25,1b - fdc,m %r1(%r28) + fdc,m r31(%r28) sync @@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm) #endif ldil L%icache_stride, %r1 - ldw R%icache_stride(%r1), %r1 + ldw R%icache_stride(%r1), %r31 #ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 @@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm) depwi,z 1, 31-PAGE_SHIFT,1, %r25 #endif add %r28, %r25, %r25 - sub %r25, %r1, %r25 + sub %r25, %r31, %r25 /* fic only has the type 26 form on PA1.1, requiring an * explicit space specification, so use %sr4 */ -1: fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) - fic,m %r1(%sr4,%r28) +1: fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) + fic,m %r31(%sr4,%r28) cmpb,COND(<<) %r28, %r25,1b - fic,m %r1(%sr4,%r28) + fic,m %r31(%sr4,%r28) sync diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 60309051875..64f2764a8ce 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -220,6 +220,33 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, } +int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine) +{ + unsigned long prot; + + /* + * I/O space can be accessed via normal processor loads and stores on + * this platform but for now we elect not to do this and portable + * drivers should not do this anyway. + */ + if (mmap_state == pci_mmap_io) + return -EINVAL; + + if (write_combine) + return -EINVAL; + + /* + * Ignore write-combine; for now only return uncached mappings. + */ + prot = pgprot_val(vma->vm_page_prot); + prot |= _PAGE_NO_CACHE; + vma->vm_page_prot = __pgprot(prot); + + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot); +} + /* * A driver is enabling the device. We make sure that all the appropriate * bits are set to allow the device to operate as the driver is expecting. diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 55f92b61418..0bbbf0d3f60 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -13,7 +13,7 @@ * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org> * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org> * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> - * Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org> + * Copyright (C) 2001-2014 Helge Deller <deller@gmx.de> * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> * * @@ -49,6 +49,7 @@ #include <linux/kallsyms.h> #include <linux/uaccess.h> #include <linux/rcupdate.h> +#include <linux/random.h> #include <asm/io.h> #include <asm/asm-offsets.h> @@ -286,3 +287,21 @@ void *dereference_function_descriptor(void *ptr) return ptr; } #endif + +static inline unsigned long brk_rnd(void) +{ + /* 8MB for 32bit, 1GB for 64bit */ + if (is_32bit_task()) + return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; + else + return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); + + if (ret < mm->brk) + return mm->brk; + return ret; +} diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index c8fb61ed32f..b68d977ce30 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -73,7 +73,7 @@ extern int update_cr16_clocksource(void); /* from time.c */ * * FIXME: doesn't do much yet... */ -static void __cpuinit +static void init_percpu_prof(unsigned long cpunum) { struct cpuinfo_parisc *p; @@ -92,7 +92,7 @@ init_percpu_prof(unsigned long cpunum) * (return 1). If so, initialize the chip and tell other partners in crime * they have work to do. */ -static int __cpuinit processor_probe(struct parisc_device *dev) +static int processor_probe(struct parisc_device *dev) { unsigned long txn_addr; unsigned long cpuid; @@ -299,7 +299,7 @@ void __init collect_boot_cpu_data(void) * * o Enable CPU profiling hooks. */ -int __cpuinit init_per_cpu(int cpunum) +int init_per_cpu(int cpunum) { int ret; struct pdc_coproc_cfg coproc_cfg; @@ -371,10 +371,23 @@ show_cpuinfo (struct seq_file *m, void *v) seq_printf(m, "capabilities\t:"); if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32) - seq_printf(m, " os32"); + seq_puts(m, " os32"); if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64) - seq_printf(m, " os64"); - seq_printf(m, "\n"); + seq_puts(m, " os64"); + if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) + seq_puts(m, " iopdir_fdc"); + switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) { + case PDC_MODEL_NVA_SUPPORTED: + seq_puts(m, " nva_supported"); + break; + case PDC_MODEL_NVA_SLOW: + seq_puts(m, " nva_slow"); + break; + case PDC_MODEL_NVA_UNSUPPORTED: + seq_puts(m, " needs_equivalent_aliasing"); + break; + } + seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities); seq_printf(m, "model\t\t: %s\n" "model name\t: %s\n", diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 534abd4936e..e842ee233db 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -19,6 +19,7 @@ #include <linux/security.h> #include <linux/compat.h> #include <linux/signal.h> +#include <linux/audit.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -267,11 +268,28 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long do_syscall_trace_enter(struct pt_regs *regs) { + long ret = 0; + if (test_thread_flag(TIF_SYSCALL_TRACE) && tracehook_report_syscall_entry(regs)) - return -1L; - - return regs->gr[20]; + ret = -1L; + +#ifdef CONFIG_64BIT + if (!is_compat_task()) + audit_syscall_entry(AUDIT_ARCH_PARISC64, + regs->gr[20], + regs->gr[26], regs->gr[25], + regs->gr[24], regs->gr[23]); + else +#endif + audit_syscall_entry(AUDIT_ARCH_PARISC, + regs->gr[20] & 0xffffffff, + regs->gr[26] & 0xffffffff, + regs->gr[25] & 0xffffffff, + regs->gr[24] & 0xffffffff, + regs->gr[23] & 0xffffffff); + + return ret ? : regs->gr[20]; } void do_syscall_trace_exit(struct pt_regs *regs) @@ -279,6 +297,8 @@ void do_syscall_trace_exit(struct pt_regs *regs) int stepping = test_thread_flag(TIF_SINGLESTEP) || test_thread_flag(TIF_BLOCKSTEP); + audit_syscall_exit(regs); + if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, stepping); } diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 1e95b2000ce..72a3c658ad7 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -156,7 +156,7 @@ void __init setup_arch(char **cmdline_p) #endif #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) - conswitchp = &dummy_con; /* we use take_over_console() later ! */ + conswitchp = &dummy_con; /* we use do_take_over_console() later ! */ #endif } @@ -318,8 +318,12 @@ static int __init parisc_init(void) pdc_stable_write(0x40, &osid, sizeof(osid)); processor_init(); - printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n", - num_present_cpus(), +#ifdef CONFIG_SMP + pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n", + num_online_cpus(), num_present_cpus(), +#else + pr_info("CPU(s): 1 x %s at %d.%06d MHz\n", +#endif boot_cpu_data.cpu_name, boot_cpu_data.cpu_hz / 1000000, boot_cpu_data.cpu_hz % 1000000 ); diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 940188d1942..1cba8f29bb4 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -56,13 +56,6 @@ #define A(__x) ((unsigned long)(__x)) /* - * Atomically swap in the new signal mask, and wait for a signal. - */ -#ifdef CONFIG_64BIT -#include "sys32.h" -#endif - -/* * Do a signal return - restore sigcontext. */ @@ -85,7 +78,7 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq)); err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq)); err |= __get_user(regs->sar, &sc->sc_sar); - DBG(2,"restore_sigcontext: iaoq is 0x%#lx / 0x%#lx\n", + DBG(2,"restore_sigcontext: iaoq is %#lx / %#lx\n", regs->iaoq[0],regs->iaoq[1]); DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]); return err; diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index 33eca1b0492..984abbee71c 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c @@ -34,7 +34,6 @@ #include <asm/uaccess.h> #include "signal32.h" -#include "sys32.h" #define DEBUG_COMPAT_SIG 0 #define DEBUG_COMPAT_SIG_LEVEL 2 @@ -320,7 +319,7 @@ copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from) } int -copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from) +copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from) { compat_uptr_t addr; compat_int_t val; diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h index 72ab41a51f3..af51d4ccee4 100644 --- a/arch/parisc/kernel/signal32.h +++ b/arch/parisc/kernel/signal32.h @@ -34,7 +34,7 @@ struct compat_ucontext { /* ELF32 signal handling */ -int copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from); +int copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from); int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from); /* In a deft move of uber-hackery, we decide to carry the top half of all diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index e3614fb343e..ceda229ea6c 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -62,9 +62,9 @@ static int smp_debug_lvl = 0; volatile struct task_struct *smp_init_current_idle_task; /* track which CPU is booting */ -static volatile int cpu_now_booting __cpuinitdata; +static volatile int cpu_now_booting; -static int parisc_max_cpus __cpuinitdata = 1; +static int parisc_max_cpus = 1; static DEFINE_PER_CPU(spinlock_t, ipi_lock); @@ -72,7 +72,6 @@ enum ipi_message_type { IPI_NOP=0, IPI_RESCHEDULE=1, IPI_CALL_FUNC, - IPI_CALL_FUNC_SINGLE, IPI_CPU_START, IPI_CPU_STOP, IPI_CPU_TEST @@ -126,11 +125,6 @@ ipi_interrupt(int irq, void *dev_id) unsigned long ops; unsigned long flags; - /* Count this now; we may make a call that never returns. */ - inc_irq_stat(irq_call_count); - - mb(); /* Order interrupt and bit testing. */ - for (;;) { spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); spin_lock_irqsave(lock, flags); @@ -164,11 +158,6 @@ ipi_interrupt(int irq, void *dev_id) generic_smp_call_function_interrupt(); break; - case IPI_CALL_FUNC_SINGLE: - smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu); - generic_smp_call_function_single_interrupt(); - break; - case IPI_CPU_START: smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); break; @@ -260,7 +249,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_single_ipi(int cpu) { - send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); + send_IPI_single(cpu, IPI_CALL_FUNC); } /* @@ -328,7 +317,7 @@ void __init smp_callin(void) /* * Bring one cpu online. */ -int __cpuinit smp_boot_one_cpu(int cpuid, struct task_struct *idle) +int smp_boot_one_cpu(int cpuid, struct task_struct *idle) { const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); long timeout; @@ -424,7 +413,7 @@ void smp_cpus_done(unsigned int cpu_max) } -int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { if (cpu != 0 && cpu < parisc_max_cpus) smp_boot_one_cpu(cpu, tidle); diff --git a/arch/parisc/kernel/sys32.h b/arch/parisc/kernel/sys32.h deleted file mode 100644 index 60dd470f39f..00000000000 --- a/arch/parisc/kernel/sys32.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org> - * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org> - * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef _PARISC64_KERNEL_SYS32_H -#define _PARISC64_KERNEL_SYS32_H - -#include <linux/compat.h> - -/* Call a kernel syscall which will use kernel space instead of user - * space for its copy_to/from_user. - */ -#define KERNEL_SYSCALL(ret, syscall, args...) \ -{ \ - mm_segment_t old_fs = get_fs(); \ - set_fs(KERNEL_DS); \ - ret = syscall(args); \ - set_fs (old_fs); \ -} - -#endif diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 5dfd248e3f1..e1ffea2f9a0 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -5,6 +5,7 @@ * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org> + * Copyright (C) 1999-2014 Helge Deller <deller@gmx.de> * * * This program is free software; you can redistribute it and/or modify @@ -23,6 +24,7 @@ */ #include <asm/uaccess.h> +#include <asm/elf.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/linkage.h> @@ -32,73 +34,230 @@ #include <linux/syscalls.h> #include <linux/utsname.h> #include <linux/personality.h> +#include <linux/random.h> -static unsigned long get_unshared_area(unsigned long addr, unsigned long len) +/* we construct an artificial offset for the mapping based on the physical + * address of the kernel mapping variable */ +#define GET_LAST_MMAP(filp) \ + (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL) +#define SET_LAST_MMAP(filp, val) \ + { /* nothing */ } + +static int get_offset(unsigned int last_mmap) { - struct vm_unmapped_area_info info; + return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT; +} - info.flags = 0; - info.length = len; - info.low_limit = PAGE_ALIGN(addr); - info.high_limit = TASK_SIZE; - info.align_mask = 0; - info.align_offset = 0; - return vm_unmapped_area(&info); +static unsigned long shared_align_offset(unsigned int last_mmap, + unsigned long pgoff) +{ + return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT; +} + +static inline unsigned long COLOR_ALIGN(unsigned long addr, + unsigned int last_mmap, unsigned long pgoff) +{ + unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1); + unsigned long off = (SHM_COLOUR-1) & + (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT); + + return base + off; } /* - * We need to know the offset to use. Old scheme was to look for - * existing mapping and use the same offset. New scheme is to use the - * address of the kernel data structure as the seed for the offset. - * We'll see how that works... - * - * The mapping is cacheline aligned, so there's no information in the bottom - * few bits of the address. We're looking for 10 bits (4MB / 4k), so let's - * drop the bottom 8 bits and use bits 8-17. + * Top of mmap area (just below the process stack). */ -static int get_offset(struct address_space *mapping) + +static unsigned long mmap_upper_limit(void) { - return (unsigned long) mapping >> 8; + unsigned long stack_base; + + /* Limit stack size - see setup_arg_pages() in fs/exec.c */ + stack_base = rlimit_max(RLIMIT_STACK); + if (stack_base > STACK_SIZE_MAX) + stack_base = STACK_SIZE_MAX; + + return PAGE_ALIGN(STACK_TOP - stack_base); } -static unsigned long get_shared_area(struct address_space *mapping, - unsigned long addr, unsigned long len, unsigned long pgoff) + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) { + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long task_size = TASK_SIZE; + int do_color_align, last_mmap; struct vm_unmapped_area_info info; + if (len > task_size) + return -ENOMEM; + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + last_mmap = GET_LAST_MMAP(filp); + + if (flags & MAP_FIXED) { + if ((flags & MAP_SHARED) && last_mmap && + (addr - shared_align_offset(last_mmap, pgoff)) + & (SHM_COLOUR - 1)) + return -EINVAL; + goto found_addr; + } + + if (addr) { + if (do_color_align && last_mmap) + addr = COLOR_ALIGN(addr, last_mmap, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (task_size - len >= addr && + (!vma || addr + len <= vma->vm_start)) + goto found_addr; + } + info.flags = 0; info.length = len; - info.low_limit = PAGE_ALIGN(addr); - info.high_limit = TASK_SIZE; - info.align_mask = PAGE_MASK & (SHMLBA - 1); - info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT; - return vm_unmapped_area(&info); + info.low_limit = mm->mmap_legacy_base; + info.high_limit = mmap_upper_limit(); + info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; + info.align_offset = shared_align_offset(last_mmap, pgoff); + addr = vm_unmapped_area(&info); + +found_addr: + if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) + SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); + + return addr; } -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) { + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + int do_color_align, last_mmap; + struct vm_unmapped_area_info info; + +#ifdef CONFIG_64BIT + /* This should only ever run for 32-bit processes. */ + BUG_ON(!test_thread_flag(TIF_32BIT)); +#endif + + /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + last_mmap = GET_LAST_MMAP(filp); + if (flags & MAP_FIXED) { - if ((flags & MAP_SHARED) && - (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) + if ((flags & MAP_SHARED) && last_mmap && + (addr - shared_align_offset(last_mmap, pgoff)) + & (SHM_COLOUR - 1)) return -EINVAL; - return addr; + goto found_addr; } - if (!addr) - addr = TASK_UNMAPPED_BASE; - if (filp) { - addr = get_shared_area(filp->f_mapping, addr, len, pgoff); - } else if(flags & MAP_SHARED) { - addr = get_shared_area(NULL, addr, len, pgoff); - } else { - addr = get_unshared_area(addr, len); + /* requesting a specific address */ + if (addr) { + if (do_color_align && last_mmap) + addr = COLOR_ALIGN(addr, last_mmap, pgoff); + else + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + goto found_addr; } + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; + info.align_offset = shared_align_offset(last_mmap, pgoff); + addr = vm_unmapped_area(&info); + if (!(addr & ~PAGE_MASK)) + goto found_addr; + VM_BUG_ON(addr != -ENOMEM); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + +found_addr: + if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) + SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); + return addr; } +static int mmap_is_legacy(void) +{ + if (current->personality & ADDR_COMPAT_LAYOUT) + return 1; + + /* parisc stack always grows up - so a unlimited stack should + * not be an indicator to use the legacy memory layout. + * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) + * return 1; + */ + + return sysctl_legacy_va_layout; +} + +static unsigned long mmap_rnd(void) +{ + unsigned long rnd = 0; + + /* + * 8 bits of randomness in 32bit mmaps, 20 address space bits + * 28 bits of randomness in 64bit mmaps, 40 address space bits + */ + if (current->flags & PF_RANDOMIZE) { + if (is_32bit_task()) + rnd = get_random_int() % (1<<8); + else + rnd = get_random_int() % (1<<28); + } + return rnd << PAGE_SHIFT; +} + +static unsigned long mmap_legacy_base(void) +{ + return TASK_UNMAPPED_BASE + mmap_rnd(); +} + +/* + * This function, called very early during the creation of a new + * process VM image, sets up which VM layout function to use: + */ +void arch_pick_mmap_layout(struct mm_struct *mm) +{ + mm->mmap_legacy_base = mmap_legacy_base(); + mm->mmap_base = mmap_upper_limit(); + + if (mmap_is_legacy()) { + mm->mmap_base = mm->mmap_legacy_base; + mm->get_unmapped_area = arch_get_unmapped_area; + } else { + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + } +} + + asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index a134ff4da12..93c1963d76f 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c @@ -4,6 +4,7 @@ * Copyright (C) 2000-2001 Hewlett Packard Company * Copyright (C) 2000 John Marvin * Copyright (C) 2001 Matthew Wilcox + * Copyright (C) 2014 Helge Deller <deller@gmx.de> * * These routines maintain argument size conversion between 32bit and 64bit * environment. Based heavily on sys_ia32.c and sys_sparc32.c. @@ -11,46 +12,8 @@ #include <linux/compat.h> #include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/fs.h> -#include <linux/mm.h> -#include <linux/file.h> -#include <linux/signal.h> -#include <linux/resource.h> -#include <linux/times.h> -#include <linux/time.h> -#include <linux/smp.h> -#include <linux/sem.h> -#include <linux/shm.h> -#include <linux/slab.h> -#include <linux/uio.h> -#include <linux/ncp_fs.h> -#include <linux/poll.h> -#include <linux/personality.h> -#include <linux/stat.h> -#include <linux/highmem.h> -#include <linux/highuid.h> -#include <linux/mman.h> -#include <linux/binfmts.h> -#include <linux/namei.h> -#include <linux/vfs.h> -#include <linux/ptrace.h> -#include <linux/swap.h> #include <linux/syscalls.h> -#include <asm/types.h> -#include <asm/uaccess.h> -#include <asm/mmu_context.h> - -#include "sys32.h" - -#undef DEBUG - -#ifdef DEBUG -#define DBG(x) printk x -#else -#define DBG(x) -#endif asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, int r22, int r21, int r20) @@ -59,3 +22,12 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, current->comm, current->pid, r20); return -ENOSYS; } + +asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags, + compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd, + const char __user * pathname) +{ + return sys_fanotify_mark(fanotify_fd, flags, + ((__u64)mask1 << 32) | mask0, + dfd, pathname); +} diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index e767ab733e3..83878601103 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -589,10 +589,13 @@ cas_nocontend: # endif /* ENABLE_LWS_DEBUG */ + rsm PSW_SM_I, %r0 /* Disable interrupts */ + /* COW breaks can cause contention on UP systems */ LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */ cas_wouldblock: ldo 2(%r0), %r28 /* 2nd case */ + ssm PSW_SM_I, %r0 b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ @@ -619,15 +622,17 @@ cas_action: stw %r1, 4(%sr2,%r20) #endif /* The load and store could fail */ -1: ldw 0(%sr3,%r26), %r28 +1: ldw,ma 0(%sr3,%r26), %r28 sub,<> %r28, %r25, %r0 -2: stw %r24, 0(%sr3,%r26) +2: stw,ma %r24, 0(%sr3,%r26) /* Free lock */ - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) #endif + /* Enable interrupts */ + ssm PSW_SM_I, %r0 /* Return to userspace, set no error */ b lws_exit copy %r0, %r21 @@ -639,6 +644,7 @@ cas_action: #if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) #endif + ssm PSW_SM_I, %r0 b lws_exit ldo -EFAULT(%r0),%r21 /* set errno */ nop @@ -649,10 +655,8 @@ cas_action: /* Two exception table entries, one for the load, the other for the store. Either return -EFAULT. Each of the entries must be relocated. */ - .section __ex_table,"aw" - ASM_ULONG_INSN (1b - linux_gateway_page), (3b - linux_gateway_page) - ASM_ULONG_INSN (2b - linux_gateway_page), (3b - linux_gateway_page) - .previous + ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) /* Make sure nothing else is placed on this page */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 0c9107285e6..84c5d3a58fa 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -392,7 +392,7 @@ ENTRY_COMP(vmsplice) ENTRY_COMP(move_pages) /* 295 */ ENTRY_SAME(getcpu) - ENTRY_SAME(epoll_pwait) + ENTRY_COMP(epoll_pwait) ENTRY_COMP(statfs64) ENTRY_COMP(fstatfs64) ENTRY_COMP(kexec_load) /* 300 */ @@ -418,7 +418,7 @@ ENTRY_SAME(accept4) /* 320 */ ENTRY_SAME(prlimit64) ENTRY_SAME(fanotify_init) - ENTRY_COMP(fanotify_mark) + ENTRY_DIFF(fanotify_mark) ENTRY_COMP(clock_adjtime) ENTRY_SAME(name_to_handle_at) /* 325 */ ENTRY_COMP(open_by_handle_at) @@ -429,6 +429,10 @@ ENTRY_COMP(process_vm_writev) ENTRY_SAME(kcmp) ENTRY_SAME(finit_module) + ENTRY_SAME(sched_setattr) + ENTRY_SAME(sched_getattr) /* 335 */ + ENTRY_COMP(utimes) + ENTRY_SAME(renameat2) /* Nothing yet */ diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 04e47c6a456..47ee620d15d 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/console.h> #include <linux/bug.h> +#include <linux/ratelimit.h> #include <asm/assembly.h> #include <asm/uaccess.h> @@ -42,9 +43,6 @@ #include "../math-emu/math-emu.h" /* for handle_fpe() */ -#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ - /* dumped to the console via printk) */ - #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) DEFINE_SPINLOCK(pa_dbit_lock); #endif @@ -160,6 +158,17 @@ void show_regs(struct pt_regs *regs) } } +static DEFINE_RATELIMIT_STATE(_hppa_rs, + DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); + +#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \ + if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \ + printk(fmt, ##__VA_ARGS__); \ + show_regs(regs); \ + } \ +} + + static void do_show_stack(struct unwind_frame_info *info) { int i = 1; @@ -229,12 +238,10 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) if (err == 0) return; /* STFU */ - printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", + parisc_printk_ratelimited(1, regs, + KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); -#ifdef PRINT_USER_FAULTS - /* XXX for debugging only */ - show_regs(regs); -#endif + return; } @@ -291,11 +298,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) do_exit(SIGSEGV); } -int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs) -{ - return syscall(regs); -} - /* gdb uses break 4,8 */ #define GDB_BREAK_INSN 0x10004 static void handle_gdb_break(struct pt_regs *regs, int wot) @@ -326,14 +328,11 @@ static void handle_break(struct pt_regs *regs) (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0); } -#ifdef PRINT_USER_FAULTS - if (unlikely(iir != GDB_BREAK_INSN)) { - printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", + if (unlikely(iir != GDB_BREAK_INSN)) + parisc_printk_ratelimited(0, regs, + KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", iir & 31, (iir>>13) & ((1<<13)-1), task_pid_nr(current), current->comm); - show_regs(regs); - } -#endif /* send standard GDB signal */ handle_gdb_break(regs, TRAP_BRKPT); @@ -763,11 +762,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs) default: if (user_mode(regs)) { -#ifdef PRINT_USER_FAULTS - printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n", - task_pid_nr(current), current->comm); - show_regs(regs); -#endif + parisc_printk_ratelimited(0, regs, KERN_DEBUG + "handle_interruption() pid=%d command='%s'\n", + task_pid_nr(current), current->comm); /* SIGBUS, for lack of a better one. */ si.si_signo = SIGBUS; si.si_code = BUS_OBJERR; @@ -784,16 +781,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs) if (user_mode(regs)) { if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) { -#ifdef PRINT_USER_FAULTS - if (fault_space == 0) - printk(KERN_DEBUG "User Fault on Kernel Space "); - else - printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ", - code); - printk(KERN_CONT "pid=%d command='%s'\n", - task_pid_nr(current), current->comm); - show_regs(regs); -#endif + parisc_printk_ratelimited(0, regs, KERN_DEBUG + "User fault %d on space 0x%08lx, pid=%d command='%s'\n", + code, fault_space, + task_pid_nr(current), current->comm); si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SEGV_MAPERR; @@ -805,14 +796,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs) else { /* - * The kernel should never fault on its own address space. + * The kernel should never fault on its own address space, + * unless pagefault_disable() was called before. */ - if (fault_space == 0) + if (fault_space == 0 && !in_atomic()) { pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); parisc_terminate("Kernel Fault", regs, code, fault_address); - } } diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index 76ed62ed785..ddd988b267a 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -168,7 +168,7 @@ void unwind_table_remove(struct unwind_table *table) } /* Called from setup_arch to import the kernel unwind info */ -int unwind_init(void) +int __init unwind_init(void) { long start, stop; register unsigned long gp __asm__ ("r27"); @@ -233,7 +233,6 @@ static void unwind_frame_regs(struct unwind_frame_info *info) e = find_unwind_entry(info->ip); if (e == NULL) { unsigned long sp; - extern char _stext[], _etext[]; dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); @@ -281,8 +280,7 @@ static void unwind_frame_regs(struct unwind_frame_info *info) break; info->prev_ip = tmp; sp = info->prev_sp; - } while (info->prev_ip < (unsigned long)_stext || - info->prev_ip > (unsigned long)_etext); + } while (!kernel_text_address(info->prev_ip)); info->rp = 0; @@ -435,9 +433,8 @@ unsigned long return_address(unsigned int level) do { if (unwind_once(&info) < 0 || info.ip == 0) return 0; - if (!__kernel_text_address(info.ip)) { + if (!kernel_text_address(info.ip)) return 0; - } } while (info.ip && level--); return info.ip; diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 4bb095a2f6f..0dacc5ca555 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -6,24 +6,19 @@ * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org> - * Copyright (C) 2006 Helge Deller <deller@gmx.de> - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Copyright (C) 2006-2013 Helge Deller <deller@gmx.de> + */ + +/* + * Put page table entries (swapper_pg_dir) as the first thing in .bss. This + * will ensure that it has .bss alignment (PAGE_SIZE). */ +#define BSS_FIRST_SECTIONS *(.data..vm0.pmd) \ + *(.data..vm0.pgd) \ + *(.data..vm0.pte) + #include <asm-generic/vmlinux.lds.h> + /* needed for the processor specific cache alignment size */ #include <asm/cache.h> #include <asm/page.h> @@ -39,7 +34,7 @@ OUTPUT_FORMAT("elf64-hppa-linux") OUTPUT_ARCH(hppa:hppa2.0w) #endif -ENTRY(_stext) +ENTRY(parisc_kernel_start) #ifndef CONFIG_64BIT jiffies = jiffies_64 + 4; #else @@ -49,11 +44,29 @@ SECTIONS { . = KERNEL_BINARY_TEXT_START; + __init_begin = .; + HEAD_TEXT_SECTION + INIT_TEXT_SECTION(8) + + . = ALIGN(PAGE_SIZE); + INIT_DATA_SECTION(PAGE_SIZE) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + .exit.data : + { + EXIT_DATA + } + PERCPU_SECTION(8) + . = ALIGN(PAGE_SIZE); + __init_end = .; + /* freed after init ends here */ + _text = .; /* Text and read-only data */ - .head ALIGN(16) : { - HEAD_TEXT - } = 0 - .text ALIGN(16) : { + _stext = .; + .text ALIGN(PAGE_SIZE) : { TEXT_TEXT SCHED_TEXT LOCK_TEXT @@ -68,21 +81,28 @@ SECTIONS *(.lock.text) /* out-of-line lock text */ *(.gnu.warning) } - /* End of text section */ + . = ALIGN(PAGE_SIZE); _etext = .; + /* End of text section */ /* Start of data section */ _sdata = .; - RODATA + RO_DATA_SECTION(8) - /* writeable */ - /* Make sure this is page aligned so - * that we can properly leave these - * as writable - */ - . = ALIGN(PAGE_SIZE); - data_start = .; +#ifdef CONFIG_64BIT + . = ALIGN(16); + /* Linkage tables */ + .opd : { + *(.opd) + } PROVIDE (__gp = .); + .plt : { + *(.plt) + } + .dlt : { + *(.dlt) + } +#endif /* unwind info */ .PARISC.unwind : { @@ -91,7 +111,15 @@ SECTIONS __stop___unwind = .; } - EXCEPTION_TABLE(16) + /* writeable */ + /* Make sure this is page aligned so + * that we can properly leave these + * as writable + */ + . = ALIGN(PAGE_SIZE); + data_start = .; + + EXCEPTION_TABLE(8) NOTES /* Data */ @@ -107,54 +135,8 @@ SECTIONS _edata = .; /* BSS */ - __bss_start = .; - /* page table entries need to be PAGE_SIZE aligned */ - . = ALIGN(PAGE_SIZE); - .data..vmpages : { - *(.data..vm0.pmd) - *(.data..vm0.pgd) - *(.data..vm0.pte) - } - .bss : { - *(.bss) - *(COMMON) - } - __bss_stop = .; - -#ifdef CONFIG_64BIT - . = ALIGN(16); - /* Linkage tables */ - .opd : { - *(.opd) - } PROVIDE (__gp = .); - .plt : { - *(.plt) - } - .dlt : { - *(.dlt) - } -#endif + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) - /* reserve space for interrupt stack by aligning __init* to 16k */ - . = ALIGN(16384); - __init_begin = .; - INIT_TEXT_SECTION(16384) - . = ALIGN(PAGE_SIZE); - INIT_DATA_SECTION(16) - /* we have to discard exit text and such at runtime, not link time */ - .exit.text : - { - EXIT_TEXT - } - .exit.data : - { - EXIT_DATA - } - - PERCPU_SECTION(L1_CACHE_BYTES) - . = ALIGN(PAGE_SIZE); - __init_end = .; - /* freed after init ends here */ _end = . ; STABS_DEBUG |
