aboutsummaryrefslogtreecommitdiff
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/.gitignore1
-rw-r--r--arch/parisc/kernel/Makefile26
-rw-r--r--arch/parisc/kernel/asm-offsets.c31
-rw-r--r--arch/parisc/kernel/audit.c81
-rw-r--r--arch/parisc/kernel/binfmt_elf32.c3
-rw-r--r--arch/parisc/kernel/cache.c328
-rw-r--r--arch/parisc/kernel/compat_audit.c40
-rw-r--r--arch/parisc/kernel/drivers.c69
-rw-r--r--arch/parisc/kernel/entry.S936
-rw-r--r--arch/parisc/kernel/firmware.c105
-rw-r--r--arch/parisc/kernel/ftrace.c185
-rw-r--r--arch/parisc/kernel/hardware.c17
-rw-r--r--arch/parisc/kernel/head.S24
-rw-r--r--arch/parisc/kernel/hpmc.S12
-rw-r--r--arch/parisc/kernel/init_task.c76
-rw-r--r--arch/parisc/kernel/inventory.c48
-rw-r--r--arch/parisc/kernel/irq.c317
-rw-r--r--arch/parisc/kernel/module.c304
-rw-r--r--arch/parisc/kernel/pacache.S651
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c19
-rw-r--r--arch/parisc/kernel/pci-dma.c46
-rw-r--r--arch/parisc/kernel/pci.c136
-rw-r--r--arch/parisc/kernel/pdc_chassis.c47
-rw-r--r--arch/parisc/kernel/pdc_cons.c139
-rw-r--r--arch/parisc/kernel/perf.c6
-rw-r--r--arch/parisc/kernel/perf_asm.S2
-rw-r--r--arch/parisc/kernel/process.c219
-rw-r--r--arch/parisc/kernel/processor.c115
-rw-r--r--arch/parisc/kernel/ptrace.c491
-rw-r--r--arch/parisc/kernel/real2.S12
-rw-r--r--arch/parisc/kernel/semaphore.c102
-rw-r--r--arch/parisc/kernel/setup.c53
-rw-r--r--arch/parisc/kernel/signal.c160
-rw-r--r--arch/parisc/kernel/signal32.c150
-rw-r--r--arch/parisc/kernel/signal32.h77
-rw-r--r--arch/parisc/kernel/smp.c255
-rw-r--r--arch/parisc/kernel/stacktrace.c63
-rw-r--r--arch/parisc/kernel/sys32.h48
-rw-r--r--arch/parisc/kernel/sys_parisc.c311
-rw-r--r--arch/parisc/kernel/sys_parisc32.c463
-rw-r--r--arch/parisc/kernel/syscall.S180
-rw-r--r--arch/parisc/kernel/syscall_table.S100
-rw-r--r--arch/parisc/kernel/time.c157
-rw-r--r--arch/parisc/kernel/topology.c4
-rw-r--r--arch/parisc/kernel/traps.c172
-rw-r--r--arch/parisc/kernel/unaligned.c21
-rw-r--r--arch/parisc/kernel/unwind.c64
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S242
48 files changed, 3471 insertions, 3637 deletions
diff --git a/arch/parisc/kernel/.gitignore b/arch/parisc/kernel/.gitignore
new file mode 100644
index 00000000000..c5f676c3c22
--- /dev/null
+++ b/arch/parisc/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 27827bc3717..ff87b4603e3 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -2,22 +2,36 @@
# Makefile for arch/parisc/kernel
#
-extra-y := init_task.o head.o vmlinux.lds
-
-AFLAGS_entry.o := -traditional
-AFLAGS_pacache.o := -traditional
+extra-y := head.o vmlinux.lds
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
- ptrace.o hardware.o inventory.o drivers.o semaphore.o \
+ ptrace.o hardware.o inventory.o drivers.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
topology.o
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_cache.o = -pg
+CFLAGS_REMOVE_irq.o = -pg
+CFLAGS_REMOVE_pacache.o = -pg
+CFLAGS_REMOVE_perf.o = -pg
+CFLAGS_REMOVE_traps.o = -pg
+CFLAGS_REMOVE_unaligned.o = -pg
+CFLAGS_REMOVE_unwind.o = -pg
+endif
+
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PA11) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o
+obj-$(CONFIG_STACKTRACE)+= stacktrace.o
+obj-$(CONFIG_AUDIT) += audit.o
+obj64-$(CONFIG_AUDIT) += compat_audit.o
# only supported for PCX-W/U in 64-bit mode at the moment
-obj-$(CONFIG_64BIT) += perf.o perf_asm.o
+obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y)
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index eaa79bc14d9..dcd55103a4b 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -32,6 +32,7 @@
#include <linux/thread_info.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
+#include <linux/kbuild.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
@@ -39,18 +40,17 @@
#include <asm/pdc.h>
#include <asm/uaccess.h>
-#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
#ifdef CONFIG_64BIT
#define FRAME_SIZE 128
#else
#define FRAME_SIZE 64
#endif
+#define FRAME_ALIGN 64
-#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
+/* Add FRAME_SIZE to the size x and align it to y. All definitions
+ * that use align_frame will include space for a frame.
+ */
+#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
int main(void)
{
@@ -150,7 +150,8 @@ int main(void)
DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
BLANK();
DEFINE(TASK_SZ, sizeof(struct task_struct));
- DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64));
+ /* TASK_SZ_ALGN includes space for a stack frame. */
+ DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
BLANK();
DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
@@ -237,7 +238,8 @@ int main(void)
DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
DEFINE(PT_SIZE, sizeof(struct pt_regs));
- DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64));
+ /* PT_SZ_ALGN includes space for a stack frame. */
+ DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
BLANK();
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
@@ -246,10 +248,8 @@ int main(void)
DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_SZ, sizeof(struct thread_info));
- DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64));
- BLANK();
- DEFINE(IRQSTAT_SIRQ_PEND, offsetof(irq_cpustat_t, __softirq_pending));
- DEFINE(IRQSTAT_SZ, sizeof(irq_cpustat_t));
+ /* THREAD_SZ_ALGN includes space for a stack frame. */
+ DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
@@ -274,8 +274,8 @@ int main(void)
DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
BLANK();
- DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT);
- DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT);
+ DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
+ DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
BLANK();
DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
@@ -294,5 +294,8 @@ int main(void)
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
+ BLANK();
+ DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long));
+ BLANK();
return 0;
}
diff --git a/arch/parisc/kernel/audit.c b/arch/parisc/kernel/audit.c
new file mode 100644
index 00000000000..eb64a6148c8
--- /dev/null
+++ b/arch/parisc/kernel/audit.c
@@ -0,0 +1,81 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+static unsigned write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static unsigned signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_arch(int arch)
+{
+#ifdef CONFIG_COMPAT
+ if (arch == AUDIT_ARCH_PARISC)
+ return 1;
+#endif
+ return 0;
+}
+
+int audit_classify_syscall(int abi, unsigned syscall)
+{
+#ifdef CONFIG_COMPAT
+ extern int parisc32_classify_syscall(unsigned);
+ if (abi == AUDIT_ARCH_PARISC)
+ return parisc32_classify_syscall(syscall);
+#endif
+ switch (syscall) {
+ case __NR_open:
+ return 2;
+ case __NR_openat:
+ return 3;
+ case __NR_execve:
+ return 5;
+ default:
+ return 0;
+ }
+}
+
+static int __init audit_classes_init(void)
+{
+#ifdef CONFIG_COMPAT
+ extern __u32 parisc32_dir_class[];
+ extern __u32 parisc32_write_class[];
+ extern __u32 parisc32_read_class[];
+ extern __u32 parisc32_chattr_class[];
+ extern __u32 parisc32_signal_class[];
+ audit_register_class(AUDIT_CLASS_WRITE_32, parisc32_write_class);
+ audit_register_class(AUDIT_CLASS_READ_32, parisc32_read_class);
+ audit_register_class(AUDIT_CLASS_DIR_WRITE_32, parisc32_dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR_32, parisc32_chattr_class);
+ audit_register_class(AUDIT_CLASS_SIGNAL_32, parisc32_signal_class);
+#endif
+ audit_register_class(AUDIT_CLASS_WRITE, write_class);
+ audit_register_class(AUDIT_CLASS_READ, read_class);
+ audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+ audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
+ return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c
index ecb10a4f63c..00dc66f9c2b 100644
--- a/arch/parisc/kernel/binfmt_elf32.c
+++ b/arch/parisc/kernel/binfmt_elf32.c
@@ -85,7 +85,8 @@ struct elf_prpsinfo32
* could set a processor dependent flag in the thread_struct.
*/
-#define SET_PERSONALITY(ex, ibcs2) \
+#undef SET_PERSONALITY
+#define SET_PERSONALITY(ex) \
set_thread_flag(TIF_32BIT); \
current->thread.map_base = DEFAULT_MAP_BASE32; \
current->thread.task_size = DEFAULT_TASK_SIZE32 \
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index e10d25d2d9c..f6448c7c62b 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -1,5 +1,4 @@
-/* $Id: cache.c,v 1.4 2000/01/25 00:11:38 prumpf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -23,17 +22,21 @@
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sections.h>
+#include <asm/shmparam.h>
int split_tlb __read_mostly;
int dcache_stride __read_mostly;
int icache_stride __read_mostly;
EXPORT_SYMBOL(dcache_stride);
+void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+EXPORT_SYMBOL(flush_dcache_page_asm);
+void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
+
/* On some machines (e.g. ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
@@ -51,12 +54,12 @@ static struct pdc_btlb_info btlb_info __read_mostly;
void
flush_data_cache(void)
{
- on_each_cpu(flush_data_cache_local, NULL, 1, 1);
+ on_each_cpu(flush_data_cache_local, NULL, 1);
}
void
flush_instruction_cache(void)
{
- on_each_cpu(flush_instruction_cache_local, NULL, 1, 1);
+ on_each_cpu(flush_instruction_cache_local, NULL, 1);
}
#endif
@@ -68,18 +71,27 @@ flush_cache_all_local(void)
}
EXPORT_SYMBOL(flush_cache_all_local);
+/* Virtual address of pfn. */
+#define pfn_va(pfn) __va(PFN_PHYS(pfn))
+
void
-update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
- struct page *page = pte_page(pte);
+ unsigned long pfn = pte_pfn(*ptep);
+ struct page *page;
- if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
- test_bit(PG_dcache_dirty, &page->flags)) {
+ /* We don't have pte special. As a result, we can be called with
+ an invalid pfn and we don't need to flush the kernel dcache page.
+ This occurs with FireGL card in C8000. */
+ if (!pfn_valid(pfn))
+ return;
- flush_kernel_dcache_page(page);
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
- flush_kernel_dcache_page(page);
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
}
void
@@ -172,14 +184,14 @@ parisc_cache_init(void)
cache_info.ic_conf.cc_cst,
cache_info.ic_conf.cc_hv);
- printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
+ printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
cache_info.dt_conf.tc_sh,
cache_info.dt_conf.tc_page,
cache_info.dt_conf.tc_cst,
cache_info.dt_conf.tc_aid,
cache_info.dt_conf.tc_pad1);
- printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
+ printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
cache_info.it_conf.tc_sh,
cache_info.it_conf.tc_page,
cache_info.it_conf.tc_cst,
@@ -260,93 +272,24 @@ void disable_sr_hashing(void)
panic("SpaceID hashing is still on!\n");
}
-/* Simple function to work out if we have an existing address translation
- * for a user space vma. */
-static inline int translation_exists(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn)
-{
- pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
- pmd_t *pmd;
- pte_t pte;
-
- if(pgd_none(*pgd))
- return 0;
-
- pmd = pmd_offset(pgd, addr);
- if(pmd_none(*pmd) || pmd_bad(*pmd))
- return 0;
-
- /* We cannot take the pte lock here: flush_cache_page is usually
- * called with pte lock already held. Whereas flush_dcache_page
- * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
- * the vma itself is secure, but the pte might come or go racily.
- */
- pte = *pte_offset_map(pmd, addr);
- /* But pte_unmap() does nothing on this architecture */
-
- /* Filter out coincidental file entries and swap entries */
- if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
- return 0;
-
- return pte_pfn(pte) == pfn;
-}
-
-/* Private function to flush a page from the cache of a non-current
- * process. cr25 contains the Page Directory of the current user
- * process; we're going to hijack both it and the user space %sr3 to
- * temporarily make the non-current process current. We have to do
- * this because cache flushing may cause a non-access tlb miss which
- * the handlers have to fill in from the pgd of the non-current
- * process. */
static inline void
-flush_user_cache_page_non_current(struct vm_area_struct *vma,
- unsigned long vmaddr)
+__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+ unsigned long physaddr)
{
- /* save the current process space and pgd */
- unsigned long space = mfsp(3), pgd = mfctl(25);
-
- /* we don't mind taking interrupts since they may not
- * do anything with user space, but we can't
- * be preempted here */
preempt_disable();
-
- /* make us current */
- mtctl(__pa(vma->vm_mm->pgd), 25);
- mtsp(vma->vm_mm->context, 3);
-
- flush_user_dcache_page(vmaddr);
- if(vma->vm_flags & VM_EXEC)
- flush_user_icache_page(vmaddr);
-
- /* put the old current process back */
- mtsp(space, 3);
- mtctl(pgd, 25);
+ flush_dcache_page_asm(physaddr, vmaddr);
+ if (vma->vm_flags & VM_EXEC)
+ flush_icache_page_asm(physaddr, vmaddr);
preempt_enable();
}
-
-static inline void
-__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
-{
- if (likely(vma->vm_mm->context == mfsp(3))) {
- flush_user_dcache_page(vmaddr);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_page(vmaddr);
- } else {
- flush_user_cache_page_non_current(vma, vmaddr);
- }
-}
-
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct vm_area_struct *mpnt;
- struct prio_tree_iter iter;
unsigned long offset;
- unsigned long addr;
+ unsigned long addr, old_addr = 0;
pgoff_t pgoff;
- unsigned long pfn = page_to_pfn(page);
-
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &page->flags);
@@ -366,24 +309,26 @@ void flush_dcache_page(struct page *page)
* to flush one address here for them all to become coherent */
flush_dcache_mmap_lock(mapping);
- vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
- /* Flush instructions produce non access tlb misses.
- * On PA, we nullify these instructions rather than
- * taking a page fault if the pte doesn't exist.
- * This is just for speed. If the page translation
- * isn't there, there's no point exciting the
- * nadtlb handler into a nullification frenzy.
- *
- * Make sure we really have this page: the private
- * mappings may cover this area but have COW'd this
- * particular page.
- */
- if (translation_exists(mpnt, addr, pfn)) {
- __flush_cache_page(mpnt, addr);
- break;
+ /* The TLB is the engine of coherence on parisc: The
+ * CPU is entitled to speculate any page with a TLB
+ * mapping, so here we kill the mapping then flush the
+ * page along a special flush only alias mapping.
+ * This guarantees that the page is no-longer in the
+ * cache for any process and nor may it be
+ * speculatively read in (until the user or kernel
+ * specifically accesses it, of course) */
+
+ flush_tlb_page(mpnt, addr);
+ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+ != (addr & (SHM_COLOUR - 1))) {
+ __flush_cache_page(mpnt, addr, page_to_phys(page));
+ if (old_addr)
+ printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
+ old_addr = addr;
}
}
flush_dcache_mmap_unlock(mapping);
@@ -396,16 +341,6 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
EXPORT_SYMBOL(flush_data_cache_local);
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
-void clear_user_page_asm(void *page, unsigned long vaddr)
-{
- /* This function is implemented in assembly in pacache.S */
- extern void __clear_user_page_asm(void *page, unsigned long vaddr);
-
- purge_tlb_start();
- __clear_user_page_asm(page, vaddr);
- purge_tlb_end();
-}
-
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
@@ -439,47 +374,49 @@ void __init parisc_setup_cache_timing(void)
printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
}
-extern void purge_kernel_dcache_page(unsigned long);
-extern void clear_user_page_asm(void *page, unsigned long vaddr);
-
-void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
-{
- purge_kernel_dcache_page((unsigned long)page);
- purge_tlb_start();
- pdtlb_kernel(page);
- purge_tlb_end();
- clear_user_page_asm(page, vaddr);
-}
-EXPORT_SYMBOL(clear_user_page);
+extern void purge_kernel_dcache_page_asm(unsigned long);
+extern void clear_user_page_asm(void *, unsigned long);
+extern void copy_user_page_asm(void *, void *, unsigned long);
void flush_kernel_dcache_page_addr(void *addr)
{
+ unsigned long flags;
+
flush_kernel_dcache_page_asm(addr);
- purge_tlb_start();
+ purge_tlb_start(flags);
pdtlb_kernel(addr);
- purge_tlb_end();
+ purge_tlb_end(flags);
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg)
+ struct page *pg)
{
- /* no coherency needed (all in kmap/kunmap) */
- copy_user_page_asm(vto, vfrom);
- if (!parisc_requires_coherency())
- flush_kernel_dcache_page_asm(vto);
+ /* Copy using kernel mapping. No coherency is needed (all in
+ kunmap) for the `to' page. However, the `from' page needs to
+ be flushed through a mapping equivalent to the user mapping
+ before it can be accessed through the kernel mapping. */
+ preempt_disable();
+ flush_dcache_page_asm(__pa(vfrom), vaddr);
+ preempt_enable();
+ copy_page_asm(vto, vfrom);
}
EXPORT_SYMBOL(copy_user_page);
-#ifdef CONFIG_PA8X00
-
-void kunmap_parisc(void *addr)
+void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
- if (parisc_requires_coherency())
- flush_kernel_dcache_page_addr(addr);
+ unsigned long flags;
+
+ /* Note: purge_tlb_entries can be called at startup with
+ no context. */
+
+ purge_tlb_start(flags);
+ mtsp(mm->context, 1);
+ pdtlb(addr);
+ pitlb(addr);
+ purge_tlb_end(flags);
}
-EXPORT_SYMBOL(kunmap_parisc);
-#endif
+EXPORT_SYMBOL(purge_tlb_entries);
void __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end)
@@ -490,8 +427,10 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
+ unsigned long flags;
+
+ purge_tlb_start(flags);
mtsp(sid, 1);
- purge_tlb_start();
if (split_tlb) {
while (npages--) {
pdtlb(start);
@@ -504,7 +443,7 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
start += PAGE_SIZE;
}
}
- purge_tlb_end();
+ purge_tlb_end(flags);
}
}
@@ -515,16 +454,72 @@ static void cacheflush_h_tmp_function(void *dummy)
void flush_cache_all(void)
{
- on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
+ on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
+}
+
+static inline unsigned long mm_total_size(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ unsigned long usize = 0;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ usize += vma->vm_end - vma->vm_start;
+ return usize;
+}
+
+static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
+{
+ pte_t *ptep = NULL;
+
+ if (!pgd_none(*pgd)) {
+ pud_t *pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud)) {
+ pmd_t *pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd))
+ ptep = pte_offset_map(pmd, addr);
+ }
+ }
+ return ptep;
}
void flush_cache_mm(struct mm_struct *mm)
{
-#ifdef CONFIG_SMP
- flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ struct vm_area_struct *vma;
+ pgd_t *pgd;
+
+ /* Flushing the whole cache on each cpu takes forever on
+ rp3440, etc. So, avoid it if the mm isn't too big. */
+ if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ flush_cache_all();
+ return;
+ }
+
+ if (mm->context == mfsp(3)) {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
+ if ((vma->vm_flags & VM_EXEC) == 0)
+ continue;
+ flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+ }
+ return;
+ }
+
+ pgd = mm->pgd;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end;
+ addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (!pfn_valid(pfn))
+ continue;
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ }
+ }
}
void
@@ -545,23 +540,35 @@ flush_user_icache_range(unsigned long start, unsigned long end)
flush_instruction_cache();
}
-
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- int sr3;
+ unsigned long addr;
+ pgd_t *pgd;
- if (!vma->vm_mm->context) {
- BUG();
+ BUG_ON(!vma->vm_mm->context);
+
+ if ((end - start) >= parisc_cache_flush_threshold) {
+ flush_cache_all();
return;
}
- sr3 = mfsp(3);
- if (vma->vm_mm->context == sr3) {
- flush_user_dcache_range(start,end);
- flush_user_icache_range(start,end);
- } else {
- flush_cache_all();
+ if (vma->vm_mm->context == mfsp(3)) {
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+ return;
+ }
+
+ pgd = vma->vm_mm->pgd;
+ for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
@@ -570,7 +577,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
- if (likely(translation_exists(vma, vmaddr, pfn)))
- __flush_cache_page(vma, vmaddr);
-
+ if (pfn_valid(pfn)) {
+ flush_tlb_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
}
diff --git a/arch/parisc/kernel/compat_audit.c b/arch/parisc/kernel/compat_audit.c
new file mode 100644
index 00000000000..c74478f6bc7
--- /dev/null
+++ b/arch/parisc/kernel/compat_audit.c
@@ -0,0 +1,40 @@
+#include <asm/unistd.h>
+
+unsigned int parisc32_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned int parisc32_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+unsigned int parisc32_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned int parisc32_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+unsigned int parisc32_signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int parisc32_classify_syscall(unsigned syscall)
+{
+ switch (syscall) {
+ case __NR_open:
+ return 2;
+ case __NR_openat:
+ return 3;
+ case __NR_execve:
+ return 5;
+ default:
+ return 1;
+ }
+}
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 2ca654bd632..dba508fe168 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/export.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/pdc.h>
@@ -43,7 +44,7 @@ struct hppa_dma_ops *hppa_dma_ops __read_mostly;
EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = {
- .bus_id = "parisc",
+ .init_name = "parisc",
};
static inline int check_dev(struct device *dev)
@@ -281,18 +282,6 @@ find_pa_parent_type(const struct parisc_device *padev, int type)
return NULL;
}
-#ifdef CONFIG_PCI
-static inline int is_pci_dev(struct device *dev)
-{
- return dev->bus == &pci_bus_type;
-}
-#else
-static inline int is_pci_dev(struct device *dev)
-{
- return 0;
-}
-#endif
-
/*
* get_node_path fills in @path with the firmware path to the device.
* Note that if @node is a parisc device, we don't fill in the 'mod' field.
@@ -305,7 +294,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path)
int i = 5;
memset(&path->bc, -1, 6);
- if (is_pci_dev(dev)) {
+ if (dev_is_pci(dev)) {
unsigned int devfn = to_pci_dev(dev)->devfn;
path->mod = PCI_FUNC(devfn);
path->bc[i--] = PCI_SLOT(devfn);
@@ -313,7 +302,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path)
}
while (dev != &root) {
- if (is_pci_dev(dev)) {
+ if (dev_is_pci(dev)) {
unsigned int devfn = to_pci_dev(dev)->devfn;
path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);
} else if (dev->bus == &parisc_bus_type) {
@@ -393,7 +382,8 @@ EXPORT_SYMBOL(print_pci_hwpath);
static void setup_bus_id(struct parisc_device *padev)
{
struct hardware_path path;
- char *output = padev->dev.bus_id;
+ char name[28];
+ char *output = name;
int i;
get_node_path(padev->dev.parent, &path);
@@ -404,6 +394,7 @@ static void setup_bus_id(struct parisc_device *padev)
output += sprintf(output, "%u:", (unsigned char) path.bc[i]);
}
sprintf(output, "%u", (unsigned char) padev->hw_path);
+ dev_set_name(&padev->dev, name);
}
struct parisc_device * create_tree_node(char id, struct device *parent)
@@ -547,6 +538,38 @@ static int parisc_generic_match(struct device *dev, struct device_driver *drv)
return match_device(to_parisc_driver(drv), to_parisc_device(dev));
}
+static ssize_t make_modalias(struct device *dev, char *buf)
+{
+ const struct parisc_device *padev = to_parisc_device(dev);
+ const struct parisc_device_id *id = &padev->id;
+
+ return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
+ (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
+ (u32)id->sversion);
+}
+
+static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct parisc_device *padev;
+ char modalias[40];
+
+ if (!dev)
+ return -ENODEV;
+
+ padev = to_parisc_device(dev);
+ if (!padev)
+ return -ENODEV;
+
+ if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
+ return -ENOMEM;
+
+ make_modalias(dev, modalias);
+ if (add_uevent_var(env, "MODALIAS=%s", modalias))
+ return -ENOMEM;
+
+ return 0;
+}
+
#define pa_dev_attr(name, field, format_string) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
@@ -564,12 +587,7 @@ pa_dev_attr_id(sversion, "0x%05x\n");
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct parisc_device *padev = to_parisc_device(dev);
- struct parisc_device_id *id = &padev->id;
-
- return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
- (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
- (u32)id->sversion);
+ return make_modalias(dev, buf);
}
static struct device_attribute parisc_device_attrs[] = {
@@ -585,6 +603,7 @@ static struct device_attribute parisc_device_attrs[] = {
struct bus_type parisc_bus_type = {
.name = "parisc",
.match = parisc_generic_match,
+ .uevent = parisc_uevent,
.dev_attrs = parisc_device_attrs,
.probe = parisc_driver_probe,
.remove = parisc_driver_remove,
@@ -664,7 +683,7 @@ static int check_parent(struct device * dev, void * data)
if (dev->bus == &parisc_bus_type) {
if (match_parisc_device(dev, d->index, d->modpath))
d->dev = dev;
- } else if (is_pci_dev(dev)) {
+ } else if (dev_is_pci(dev)) {
if (match_pci_device(dev, d->index, d->modpath))
d->dev = dev;
} else if (dev->bus == NULL) {
@@ -722,7 +741,7 @@ struct device *hwpath_to_device(struct hardware_path *modpath)
if (!parent)
return NULL;
}
- if (is_pci_dev(parent)) /* pci devices already parse MOD */
+ if (dev_is_pci(parent)) /* pci devices already parse MOD */
return parent;
else
return parse_tree_node(parent, 6, modpath);
@@ -741,7 +760,7 @@ void device_to_hwpath(struct device *dev, struct hardware_path *path)
padev = to_parisc_device(dev);
get_node_path(dev->parent, path);
path->mod = padev->hw_path;
- } else if (is_pci_dev(dev)) {
+ } else if (dev_is_pci(dev)) {
get_node_path(dev, path);
}
}
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 111d47284ea..e8f07dd2840 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -40,16 +40,8 @@
#include <linux/linkage.h>
#ifdef CONFIG_64BIT
-#define CMPIB cmpib,*
-#define CMPB cmpb,*
-#define COND(x) *x
-
.level 2.0w
#else
-#define CMPIB cmpib,
-#define CMPB cmpb,
-#define COND(x) x
-
.level 2.0
#endif
@@ -73,15 +65,11 @@
rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
mtsp %r0, %sr4
mtsp %r0, %sr5
- mfsp %sr7, %r1
- or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
- mtsp %r1, %sr3
+ mtsp %r0, %sr6
tovirt_r1 %r29
load32 KERNEL_PSW, %r1
rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
- mtsp %r0, %sr6
- mtsp %r0, %sr7
mtctl %r0, %cr17 /* Clear IIASQ tail */
mtctl %r0, %cr17 /* Clear IIASQ head */
mtctl %r1, %ipsw
@@ -127,17 +115,20 @@
/* we save the registers in the task struct */
+ copy %r30, %r17
mfctl %cr30, %r1
+ ldo THREAD_SZ_ALGN(%r1), %r30
+ mtsp %r0,%sr7
+ mtsp %r16,%sr3
tophys %r1,%r9
LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
tophys %r1,%r9
ldo TASK_REGS(%r9),%r9
- STREG %r30, PT_GR30(%r9)
+ STREG %r17,PT_GR30(%r9)
STREG %r29,PT_GR29(%r9)
STREG %r26,PT_GR26(%r9)
+ STREG %r16,PT_SR7(%r9)
copy %r9,%r29
- mfctl %cr30, %r1
- ldo THREAD_SZ_ALGN(%r1), %r30
.endm
.macro get_stack_use_r30
@@ -145,10 +136,12 @@
/* we put a struct pt_regs on the stack and save the registers there */
tophys %r30,%r9
- STREG %r30,PT_GR30(%r9)
+ copy %r30,%r1
ldo PT_SZ_ALGN(%r30),%r30
+ STREG %r1,PT_GR30(%r9)
STREG %r29,PT_GR29(%r9)
STREG %r26,PT_GR26(%r9)
+ STREG %r16,PT_SR7(%r9)
copy %r9,%r29
.endm
@@ -195,8 +188,8 @@
/* Register definitions for tlb miss handler macros */
- va = r8 /* virtual address for which the trap occured */
- spc = r24 /* space for which the trap occured */
+ va = r8 /* virtual address for which the trap occurred */
+ spc = r24 /* space for which the trap occurred */
#ifndef CONFIG_64BIT
@@ -233,22 +226,13 @@
#ifndef CONFIG_64BIT
/*
* naitlb miss interruption handler (parisc 1.1 - 32 bit)
- *
- * Note: naitlb misses will be treated
- * as an ordinary itlb miss for now.
- * However, note that naitlb misses
- * have the faulting address in the
- * IOR/ISR.
*/
.macro naitlb_11 code
mfctl %isr,spc
- b itlb_miss_11
+ b naitlb_miss_11
mfctl %ior,va
- /* FIXME: If user causes a naitlb miss, the priv level may not be in
- * lower bits of va, where the itlb miss handler is expecting them
- */
.align 32
.endm
@@ -256,26 +240,17 @@
/*
* naitlb miss interruption handler (parisc 2.0)
- *
- * Note: naitlb misses will be treated
- * as an ordinary itlb miss for now.
- * However, note that naitlb misses
- * have the faulting address in the
- * IOR/ISR.
*/
.macro naitlb_20 code
mfctl %isr,spc
#ifdef CONFIG_64BIT
- b itlb_miss_20w
+ b naitlb_miss_20w
#else
- b itlb_miss_20
+ b naitlb_miss_20
#endif
mfctl %ior,va
- /* FIXME: If user causes a naitlb miss, the priv level may not be in
- * lower bits of va, where the itlb miss handler is expecting them
- */
.align 32
.endm
@@ -372,32 +347,6 @@
.align 32
.endm
- /* The following are simple 32 vs 64 bit instruction
- * abstractions for the macros */
- .macro EXTR reg1,start,length,reg2
-#ifdef CONFIG_64BIT
- extrd,u \reg1,32+\start,\length,\reg2
-#else
- extrw,u \reg1,\start,\length,\reg2
-#endif
- .endm
-
- .macro DEP reg1,start,length,reg2
-#ifdef CONFIG_64BIT
- depd \reg1,32+\start,\length,\reg2
-#else
- depw \reg1,\start,\length,\reg2
-#endif
- .endm
-
- .macro DEPI val,start,length,reg
-#ifdef CONFIG_64BIT
- depdi \val,32+\start,\length,\reg
-#else
- depwi \val,\start,\length,\reg
-#endif
- .endm
-
/* In LP64, the space contains part of the upper 32 bits of the
* fault. We have to extract this and place it in the va,
* zeroing the corresponding bits in the space register */
@@ -450,19 +399,27 @@
*/
.macro L2_ptep pmd,pte,index,va,fault
#if PT_NLEVELS == 3
- EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+ extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else
- EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+# if defined(CONFIG_64BIT)
+ extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+ #else
+ # if PAGE_SIZE > 4096
+ extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
+ # else
+ extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+ # endif
+# endif
#endif
- DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
copy %r0,\pte
ldw,s \index(\pmd),\pmd
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
- DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
+ dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
copy \pmd,%r9
SHLREG %r9,PxD_VALUE_SHIFT,\pmd
- EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
- DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
LDREG %r0(\pmd),\pte /* pmd is now pte */
bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
@@ -496,9 +453,41 @@
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
+ /* Acquire pa_dbit_lock lock. */
+ .macro dbit_lock spc,tmp,tmp1
+#ifdef CONFIG_SMP
+ cmpib,COND(=),n 0,\spc,2f
+ load32 PA(pa_dbit_lock),\tmp
+1: LDCW 0(\tmp),\tmp1
+ cmpib,COND(=) 0,\tmp1,1b
+ nop
+2:
+#endif
+ .endm
+
+ /* Release pa_dbit_lock lock without reloading lock address. */
+ .macro dbit_unlock0 spc,tmp
+#ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ stw \spc,0(\tmp)
+#endif
+ .endm
+
+ /* Release pa_dbit_lock lock. */
+ .macro dbit_unlock1 spc,tmp
+#ifdef CONFIG_SMP
+ load32 PA(pa_dbit_lock),\tmp
+ dbit_unlock0 \spc,\tmp
+#endif
+ .endm
+
/* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
* don't needlessly dirty the cache line if it was already set */
- .macro update_ptep ptep,pte,tmp,tmp1
+ .macro update_ptep spc,ptep,pte,tmp,tmp1
+#ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ LDREG 0(\ptep),\pte
+#endif
ldi _PAGE_ACCESSED,\tmp1
or \tmp1,\pte,\tmp
and,COND(<>) \tmp1,\pte,%r0
@@ -507,12 +496,28 @@
/* Set the dirty bit (and accessed bit). No need to be
* clever, this is only used from the dirty fault */
- .macro update_dirty ptep,pte,tmp
+ .macro update_dirty spc,ptep,pte,tmp
+#ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ LDREG 0(\ptep),\pte
+#endif
ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
or \tmp,\pte,\pte
STREG \pte,0(\ptep)
.endm
+ /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
+ * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
+ #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
+
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ .macro convert_for_tlb_insert20 pte
+ extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+ 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_SHIFT,\pte
+ .endm
+
/* Convert the pte and prot to tlb insertion values. How
* this happens is quite subtle, read below */
.macro make_insert_tlb spc,pte,prot
@@ -523,7 +528,7 @@
* B <-> _PAGE_DMB (memory break)
*
* Then incredible subtlety: The access rights are
- * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
+ * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
* See 3-14 of the parisc 2.0 manual
*
* Finally, _PAGE_READ goes in the top bit of PL1 (so we
@@ -533,7 +538,7 @@
/* PAGE_USER indicates the page can be read with user privileges,
* so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
- * contains _PAGE_READ */
+ * contains _PAGE_READ) */
extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
depdi 7,11,3,\prot
/* If we're a gateway page, drop PL2 back to zero for promotion
@@ -549,11 +554,10 @@
* on most of those machines only handles cache transactions.
*/
extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
- depi 1,12,1,\prot
+ depdi 1,12,1,\prot
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
- extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
- depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
+ convert_for_tlb_insert20 \pte
.endm
/* Identical macro to make_insert_tlb above, except it
@@ -571,8 +575,8 @@
/* Get rid of prot bits and convert to page addr for iitlba */
- depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
- extru \pte,24,25,\pte
+ depi 0,31,ASM_PFN_PTE_SHIFT,\pte
+ SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
.endm
/* This is for ILP32 PA2.0 only. The TLB insertion needs
@@ -593,7 +597,7 @@
* entry (identifying the physical page) and %r23 up with
* the from tlb entry (or nothing if only a to entry---for
* clear_user_page_asm) */
- .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
+ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
cmpib,COND(<>),n 0,\spc,\fault
ldil L%(TMPALIAS_MAP_START),\tmp
#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
@@ -602,10 +606,35 @@
depdi 0,31,32,\tmp
#endif
copy \va,\tmp1
- DEPI 0,31,23,\tmp1
+ depi 0,31,23,\tmp1
cmpb,COND(<>),n \tmp,\tmp1,\fault
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
+ mfctl %cr19,\tmp /* iir */
+ /* get the opcode (first six bits) into \tmp */
+ extrw,u \tmp,5,6,\tmp
+ /*
+ * Only setting the T bit prevents data cache movein
+ * Setting access rights to zero prevents instruction cache movein
+ *
+ * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
+ * to type field and _PAGE_READ goes to top bit of PL1
+ */
+ ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
+ /*
+ * so if the opcode is one (i.e. this is a memory management
+ * instruction) nullify the next load so \prot is only T.
+ * Otherwise this is a normal data operation
+ */
+ cmpiclr,= 0x01,\tmp,%r0
+ ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
+.ifc \patype,20
depd,z \prot,8,7,\prot
+.else
+.ifc \patype,11
+ depw,z \prot,8,7,\prot
+.else
+ .error "undefined PA type to do_alias"
+.endif
+.endif
/*
* OK, it is in the temp alias region, check whether "from" or "to".
* Check "subtle" note in pacache.S re: r23/r26.
@@ -631,7 +660,7 @@
.text
- .align PAGE_SIZE
+ .align 4096
ENTRY(fault_vector_20)
/* First vector is invalid (0) */
@@ -654,11 +683,7 @@ ENTRY(fault_vector_20)
def 13
def 14
dtlb_20 15
-#if 0
naitlb_20 16
-#else
- def 16
-#endif
nadtlb_20 17
def 18
def 19
@@ -701,11 +726,7 @@ ENTRY(fault_vector_11)
def 13
def 14
dtlb_11 15
-#if 0
naitlb_11 16
-#else
- def 16
-#endif
nadtlb_11 17
def 18
def 19
@@ -724,64 +745,17 @@ ENTRY(fault_vector_11)
END(fault_vector_11)
#endif
+ /* Fault vector is separately protected and *must* be on its own page */
+ .align PAGE_SIZE
+ENTRY(end_fault_vector)
.import handle_interruption,code
.import do_cpu_irq_mask,code
/*
- * r26 = function to be called
- * r25 = argument to pass in
- * r24 = flags for do_fork()
- *
- * Kernel threads don't ever return, so they don't need
- * a true register context. We just save away the arguments
- * for copy_thread/ret_ to properly set up the child.
- */
-
-#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
-#define CLONE_UNTRACED 0x00800000
-
- .import do_fork
-ENTRY(__kernel_thread)
- STREG %r2, -RP_OFFSET(%r30)
-
- copy %r30, %r1
- ldo PT_SZ_ALGN(%r30),%r30
-#ifdef CONFIG_64BIT
- /* Yo, function pointers in wide mode are little structs... -PB */
- ldd 24(%r26), %r2
- STREG %r2, PT_GR27(%r1) /* Store childs %dp */
- ldd 16(%r26), %r26
-
- STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
- copy %r0, %r22 /* user_tid */
-#endif
- STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
- STREG %r25, PT_GR25(%r1)
- ldil L%CLONE_UNTRACED, %r26
- ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
- or %r26, %r24, %r26 /* will have kernel mappings. */
- ldi 1, %r25 /* stack_start, signals kernel thread */
- stw %r0, -52(%r30) /* user_tid */
-#ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
-#endif
- BL do_fork, %r2
- copy %r1, %r24 /* pt_regs */
-
- /* Parent Returns here */
-
- LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
- ldo -PT_SZ_ALGN(%r30), %r30
- bv %r0(%r2)
- nop
-ENDPROC(__kernel_thread)
-
- /*
* Child Returns here
*
- * copy_thread moved args from temp save area set up above
- * into task save area.
+ * copy_thread moved args into task save area.
*/
ENTRY(ret_from_kernel_thread)
@@ -790,51 +764,17 @@ ENTRY(ret_from_kernel_thread)
BL schedule_tail, %r2
nop
- LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
LDREG TASK_PT_GR25(%r1), %r26
#ifdef CONFIG_64BIT
LDREG TASK_PT_GR27(%r1), %r27
- LDREG TASK_PT_GR22(%r1), %r22
#endif
LDREG TASK_PT_GR26(%r1), %r1
ble 0(%sr7, %r1)
copy %r31, %r2
-
-#ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
- loadgp /* Thread could have been in a module */
-#endif
-#ifndef CONFIG_64BIT
- b sys_exit
-#else
- load32 sys_exit, %r1
- bv %r0(%r1)
-#endif
- ldi 0, %r26
-ENDPROC(ret_from_kernel_thread)
-
- .import sys_execve, code
-ENTRY(__execve)
- copy %r2, %r15
- copy %r30, %r16
- ldo PT_SZ_ALGN(%r30), %r30
- STREG %r26, PT_GR26(%r16)
- STREG %r25, PT_GR25(%r16)
- STREG %r24, PT_GR24(%r16)
-#ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
-#endif
- BL sys_execve, %r2
- copy %r16, %r26
-
- cmpib,=,n 0,%r28,intr_return /* forward */
-
- /* yes, this will trap and die. */
- copy %r15, %r2
- copy %r16, %r30
- bv %r0(%r2)
+ b finish_child_return
nop
-ENDPROC(__execve)
+ENDPROC(ret_from_kernel_thread)
/*
@@ -914,7 +854,7 @@ ENTRY(syscall_exit_rfi)
* (we don't store them in the sigcontext), so set them
* to "proper" values now (otherwise we'll wind up restoring
* whatever was last stored in the task structure, which might
- * be inconsistent if an interrupt occured while on the gateway
+ * be inconsistent if an interrupt occurred while on the gateway
* page). Note that we may be "trashing" values the user put in
* them, but we don't support the user changing them.
*/
@@ -930,11 +870,6 @@ ENTRY(syscall_exit_rfi)
STREG %r19,PT_SR7(%r16)
intr_return:
- /* NOTE: Need to enable interrupts incase we schedule. */
- ssm PSW_SM_I, %r0
-
-intr_check_resched:
-
/* check for reschedule */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
@@ -945,7 +880,7 @@ intr_check_sig:
/* As above */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19
- ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
+ ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
and,COND(<>) %r19, %r20, %r0
b,n intr_restore /* skip past if we've nothing to do */
@@ -957,9 +892,14 @@ intr_check_sig:
* Only do signals if we are returning to user space
*/
LDREG PT_IASQ0(%r16), %r20
- CMPIB=,n 0,%r20,intr_restore /* backward */
+ cmpib,COND(=),n 0,%r20,intr_restore /* backward */
LDREG PT_IASQ1(%r16), %r20
- CMPIB=,n 0,%r20,intr_restore /* backward */
+ cmpib,COND(=),n 0,%r20,intr_restore /* backward */
+
+ /* NOTE: We need to enable interrupts if we have to deliver
+ * signals. We used to do this earlier but it caused kernel
+ * stack overflows. */
+ ssm PSW_SM_I, %r0
copy %r0, %r25 /* long in_syscall = 0 */
#ifdef CONFIG_64BIT
@@ -994,13 +934,6 @@ intr_restore:
rfi
nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
#ifndef CONFIG_PREEMPT
# define intr_do_preempt intr_restore
@@ -1013,12 +946,16 @@ intr_do_resched:
* we jump back to intr_restore.
*/
LDREG PT_IASQ0(%r16), %r20
- CMPIB= 0, %r20, intr_do_preempt
+ cmpib,COND(=) 0, %r20, intr_do_preempt
nop
LDREG PT_IASQ1(%r16), %r20
- CMPIB= 0, %r20, intr_do_preempt
+ cmpib,COND(=) 0, %r20, intr_do_preempt
nop
+ /* NOTE: We need to enable interrupts if we schedule. We used
+ * to do this earlier but it caused kernel stack overflows. */
+ ssm PSW_SM_I, %r0
+
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
@@ -1045,7 +982,7 @@ intr_do_preempt:
/* current_thread_info()->preempt_count */
mfctl %cr30, %r1
LDREG TI_PRE_COUNT(%r1), %r19
- CMPIB<> 0, %r19, intr_restore /* if preempt_count > 0 */
+ cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
nop /* prev insn branched backwards */
/* check if we interrupted a critical path */
@@ -1064,7 +1001,7 @@ intr_do_preempt:
*/
intr_extint:
- CMPIB=,n 0,%r16,1f
+ cmpib,COND(=),n 0,%r16,1f
get_stack_use_cr30
b,n 2f
@@ -1099,7 +1036,7 @@ ENDPROC(syscall_exit_rfi)
ENTRY(intr_save) /* for os_hpmc */
mfsp %sr7,%r16
- CMPIB=,n 0,%r16,1f
+ cmpib,COND(=),n 0,%r16,1f
get_stack_use_cr30
b 2f
copy %r8,%r26
@@ -1121,7 +1058,7 @@ ENTRY(intr_save) /* for os_hpmc */
* adjust isr/ior below.
*/
- CMPIB=,n 6,%r26,skip_save_ior
+ cmpib,COND(=),n 6,%r26,skip_save_ior
mfctl %cr20, %r16 /* isr */
@@ -1195,11 +1132,11 @@ ENDPROC(intr_save)
*/
t0 = r1 /* temporary register 0 */
- va = r8 /* virtual address for which the trap occured */
+ va = r8 /* virtual address for which the trap occurred */
t1 = r9 /* temporary register 1 */
pte = r16 /* pte/phys page # */
prot = r17 /* prot bits */
- spc = r24 /* space for which the trap occured */
+ spc = r24 /* space for which the trap occurred */
ptp = r25 /* page directory/page table pointer */
#ifdef CONFIG_64BIT
@@ -1211,17 +1148,19 @@ dtlb_miss_20w:
L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
dtlb_check_alias_20w:
- do_alias spc,t0,t1,va,pte,prot,dtlb_fault
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
idtlbt pte,prot
@@ -1233,29 +1172,22 @@ nadtlb_miss_20w:
get_pgd spc,ptp
space_check spc,t0,nadtlb_fault
- L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
+ L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
-nadtlb_check_flush_20w:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
-
- /* Insert a "flush only" translation */
-
- depdi,z 7,7,3,prot
- depdi 1,10,1,prot
+nadtlb_check_alias_20w:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- depdi 0,63,12,pte
- extrd,u pte,56,52,pte
idtlbt pte,prot
rfir
@@ -1270,7 +1202,8 @@ dtlb_miss_11:
L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1281,30 +1214,13 @@ dtlb_miss_11:
idtlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
dtlb_check_alias_11:
-
- /* Check to see if fault is in the temporary alias region */
-
- cmpib,<>,n 0,spc,dtlb_fault /* forward */
- ldil L%(TMPALIAS_MAP_START),t0
- copy va,t1
- depwi 0,31,23,t1
- cmpb,<>,n t0,t1,dtlb_fault /* forward */
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
- depw,z prot,8,7,prot
-
- /*
- * OK, it is in the temp alias region, check whether "from" or "to".
- * Check "subtle" note in pacache.S re: r23/r26.
- */
-
- extrw,u,= va,9,1,r0
- or,tr %r23,%r0,pte /* If "from" use "from" page */
- or %r26,%r0,pte /* else "to", use "to" page */
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
idtlba pte,(va)
idtlbp prot,(va)
@@ -1317,9 +1233,10 @@ nadtlb_miss_11:
space_check spc,t0,nadtlb_fault
- L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
+ L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1331,30 +1248,16 @@ nadtlb_miss_11:
idtlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
-nadtlb_check_flush_11:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
-
- /* Insert a "flush only" translation */
-
- zdepi 7,7,3,prot
- depi 1,10,1,prot
-
- /* Get rid of prot bits and convert to page addr for idtlba */
+nadtlb_check_alias_11:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
- depi 0,31,12,pte
- extru pte,24,25,pte
-
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
- mtsp spc,%sr1
-
- idtlba pte,(%sr1,va)
- idtlbp prot,(%sr1,va)
-
- mtsp t0, %sr1 /* Restore sr1 */
+ idtlba pte,(va)
+ idtlbp prot,(va)
rfir
nop
@@ -1366,19 +1269,21 @@ dtlb_miss_20:
L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
dtlb_check_alias_20:
- do_alias spc,t0,t1,va,pte,prot,dtlb_fault
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
idtlbt pte,prot
@@ -1390,35 +1295,29 @@ nadtlb_miss_20:
space_check spc,t0,nadtlb_fault
- L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
+ L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
idtlbt pte,prot
+ dbit_unlock1 spc,t0
rfir
nop
-nadtlb_check_flush_20:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
-
- /* Insert a "flush only" translation */
+nadtlb_check_alias_20:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
- depdi,z 7,7,3,prot
- depdi 1,10,1,prot
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- depdi 0,63,12,pte
- extrd,u pte,56,32,pte
idtlbt pte,prot
rfir
nop
+
#endif
nadtlb_emulate:
@@ -1450,11 +1349,11 @@ nadtlb_emulate:
bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
BL get_register,%r25
extrw,u %r9,15,5,%r8 /* Get index register # */
- CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
+ cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
copy %r1,%r24
BL get_register,%r25
extrw,u %r9,10,5,%r8 /* Get base register # */
- CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
+ cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
BL set_register,%r25
add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
@@ -1486,7 +1385,7 @@ nadtlb_probe_check:
cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
BL get_register,%r25 /* Find the target register */
extrw,u %r9,31,5,%r8 /* Get target register */
- CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
+ cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
BL set_register,%r25
copy %r0,%r1 /* Write zero to target register */
b nadtlb_nullify /* Nullify return insn */
@@ -1507,11 +1406,45 @@ itlb_miss_20w:
L3_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
iitlbt pte,prot
+ dbit_unlock1 spc,t0
+
+ rfir
+ nop
+
+naitlb_miss_20w:
+
+ /*
+ * I miss is a little different, since we allow users to fault
+ * on the gateway page which is in the kernel address space.
+ */
+
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,naitlb_fault
+
+ L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
+
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ iitlbt pte,prot
+ dbit_unlock1 spc,t0
+
+ rfir
+ nop
+
+naitlb_check_alias_20w:
+ do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
+
+ iitlbt pte,prot
rfir
nop
@@ -1525,7 +1458,8 @@ itlb_miss_11:
L2_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
@@ -1536,10 +1470,45 @@ itlb_miss_11:
iitlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
rfir
nop
+naitlb_miss_11:
+ get_pgd spc,ptp
+
+ space_check spc,t0,naitlb_fault
+
+ L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
+
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ iitlba pte,(%sr1,va)
+ iitlbp prot,(%sr1,va)
+
+ mtsp t0, %sr1 /* Restore sr1 */
+ dbit_unlock1 spc,t0
+
+ rfir
+ nop
+
+naitlb_check_alias_11:
+ do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
+
+ iitlba pte,(%sr0, va)
+ iitlbp prot,(%sr0, va)
+
+ rfir
+ nop
+
+
itlb_miss_20:
get_pgd spc,ptp
@@ -1547,13 +1516,43 @@ itlb_miss_20:
L2_ptep ptp,pte,t0,va,itlb_fault
- update_ptep ptp,pte,t0,t1
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
iitlbt pte,prot
+ dbit_unlock1 spc,t0
+
+ rfir
+ nop
+
+naitlb_miss_20:
+ get_pgd spc,ptp
+
+ space_check spc,t0,naitlb_fault
+
+ L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
+
+ dbit_lock spc,t0,t1
+ update_ptep spc,ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t0
+
+ iitlbt pte,prot
+ dbit_unlock1 spc,t0
+
+ rfir
+ nop
+
+naitlb_check_alias_20:
+ do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
+
+ iitlbt pte,prot
rfir
nop
@@ -1569,29 +1568,13 @@ dbit_trap_20w:
L3_ptep ptp,pte,t0,va,dbit_fault
-#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nolock_20w
- load32 PA(pa_dbit_lock),t0
-
-dbit_spin_20w:
- LDCW 0(t0),t1
- cmpib,= 0,t1,dbit_spin_20w
- nop
-
-dbit_nolock_20w:
-#endif
- update_dirty ptp,pte,t1
+ dbit_lock spc,t0,t1
+ update_dirty spc,ptp,pte,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
-#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nounlock_20w
- ldi 1,t1
- stw t1,0(t0)
-
-dbit_nounlock_20w:
-#endif
+ dbit_unlock0 spc,t0
rfir
nop
@@ -1605,18 +1588,8 @@ dbit_trap_11:
L2_ptep ptp,pte,t0,va,dbit_fault
-#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nolock_11
- load32 PA(pa_dbit_lock),t0
-
-dbit_spin_11:
- LDCW 0(t0),t1
- cmpib,= 0,t1,dbit_spin_11
- nop
-
-dbit_nolock_11:
-#endif
- update_dirty ptp,pte,t1
+ dbit_lock spc,t0,t1
+ update_dirty spc,ptp,pte,t1
make_insert_tlb_11 spc,pte,prot
@@ -1627,13 +1600,7 @@ dbit_nolock_11:
idtlbp prot,(%sr1,va)
mtsp t1, %sr1 /* Restore sr1 */
-#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nounlock_11
- ldi 1,t1
- stw t1,0(t0)
-
-dbit_nounlock_11:
-#endif
+ dbit_unlock0 spc,t0
rfir
nop
@@ -1645,32 +1612,15 @@ dbit_trap_20:
L2_ptep ptp,pte,t0,va,dbit_fault
-#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nolock_20
- load32 PA(pa_dbit_lock),t0
-
-dbit_spin_20:
- LDCW 0(t0),t1
- cmpib,= 0,t1,dbit_spin_20
- nop
-
-dbit_nolock_20:
-#endif
- update_dirty ptp,pte,t1
+ dbit_lock spc,t0,t1
+ update_dirty spc,ptp,pte,t1
make_insert_tlb spc,pte,prot
f_extend pte,t1
idtlbt pte,prot
-
-#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nounlock_20
- ldi 1,t1
- stw t1,0(t0)
-
-dbit_nounlock_20:
-#endif
+ dbit_unlock0 spc,t0
rfir
nop
@@ -1694,6 +1644,10 @@ nadtlb_fault:
b intr_save
ldi 17,%r8
+naitlb_fault:
+ b intr_save
+ ldi 16,%r8
+
dtlb_fault:
b intr_save
ldi 15,%r8
@@ -1758,151 +1712,37 @@ dtlb_fault:
LDREG PT_GR18(\regs),%r18
.endm
-ENTRY(sys_fork_wrapper)
+ .macro fork_like name
+ENTRY(sys_\name\()_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
ldo TASK_REGS(%r1),%r1
reg_save %r1
- mfctl %cr27, %r3
- STREG %r3, PT_CR27(%r1)
-
- STREG %r2,-RP_OFFSET(%r30)
- ldo FRAME_SIZE(%r30),%r30
-#ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
-#endif
-
- /* These are call-clobbered registers and therefore
- also syscall-clobbered (we hope). */
- STREG %r2,PT_GR19(%r1) /* save for child */
- STREG %r30,PT_GR21(%r1)
-
- LDREG PT_GR30(%r1),%r25
- copy %r1,%r24
- BL sys_clone,%r2
- ldi SIGCHLD,%r26
-
- LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
-wrapper_exit:
- ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r1 /* get pt regs */
-
- LDREG PT_CR27(%r1), %r3
- mtctl %r3, %cr27
- reg_restore %r1
+ mfctl %cr27, %r28
+ ldil L%sys_\name, %r31
+ be R%sys_\name(%sr4,%r31)
+ STREG %r28, PT_CR27(%r1)
+ENDPROC(sys_\name\()_wrapper)
+ .endm
- /* strace expects syscall # to be preserved in r20 */
- ldi __NR_fork,%r20
- bv %r0(%r2)
- STREG %r20,PT_GR20(%r1)
-ENDPROC(sys_fork_wrapper)
+fork_like clone
+fork_like fork
+fork_like vfork
/* Set the return value for the child */
ENTRY(child_return)
BL schedule_tail, %r2
nop
+finish_child_return:
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
- LDREG TASK_PT_GR19(%r1),%r2
- b wrapper_exit
+ LDREG PT_CR27(%r1), %r3
+ mtctl %r3, %cr27
+ reg_restore %r1
+ b syscall_exit
copy %r0,%r28
ENDPROC(child_return)
-
-ENTRY(sys_clone_wrapper)
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r1 /* get pt regs */
- reg_save %r1
- mfctl %cr27, %r3
- STREG %r3, PT_CR27(%r1)
-
- STREG %r2,-RP_OFFSET(%r30)
- ldo FRAME_SIZE(%r30),%r30
-#ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
-#endif
-
- /* WARNING - Clobbers r19 and r21, userspace must save these! */
- STREG %r2,PT_GR19(%r1) /* save for child */
- STREG %r30,PT_GR21(%r1)
- BL sys_clone,%r2
- copy %r1,%r24
-
- b wrapper_exit
- LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
-ENDPROC(sys_clone_wrapper)
-
-
-ENTRY(sys_vfork_wrapper)
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r1 /* get pt regs */
- reg_save %r1
- mfctl %cr27, %r3
- STREG %r3, PT_CR27(%r1)
-
- STREG %r2,-RP_OFFSET(%r30)
- ldo FRAME_SIZE(%r30),%r30
-#ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
-#endif
-
- STREG %r2,PT_GR19(%r1) /* save for child */
- STREG %r30,PT_GR21(%r1)
-
- BL sys_vfork,%r2
- copy %r1,%r26
-
- b wrapper_exit
- LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
-ENDPROC(sys_vfork_wrapper)
-
-
- .macro execve_wrapper execve
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r1 /* get pt regs */
-
- /*
- * Do we need to save/restore r3-r18 here?
- * I don't think so. why would new thread need old
- * threads registers?
- */
-
- /* %arg0 - %arg3 are already saved for us. */
-
- STREG %r2,-RP_OFFSET(%r30)
- ldo FRAME_SIZE(%r30),%r30
-#ifdef CONFIG_64BIT
- ldo -16(%r30),%r29 /* Reference param save area */
-#endif
- BL \execve,%r2
- copy %r1,%arg0
-
- ldo -FRAME_SIZE(%r30),%r30
- LDREG -RP_OFFSET(%r30),%r2
-
- /* If exec succeeded we need to load the args */
-
- ldo -1024(%r0),%r1
- cmpb,>>= %r28,%r1,error_\execve
- copy %r2,%r19
-
-error_\execve:
- bv %r0(%r19)
- nop
- .endm
-
- .import sys_execve
-ENTRY(sys_execve_wrapper)
- execve_wrapper sys_execve
-ENDPROC(sys_execve_wrapper)
-
-#ifdef CONFIG_64BIT
- .import sys32_execve
-ENTRY(sys32_execve_wrapper)
- execve_wrapper sys32_execve
-ENDPROC(sys32_execve_wrapper)
-#endif
-
ENTRY(sys_rt_sigreturn_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
ldo TASK_REGS(%r26),%r26 /* get pt regs */
@@ -1933,44 +1773,6 @@ ENTRY(sys_rt_sigreturn_wrapper)
LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
ENDPROC(sys_rt_sigreturn_wrapper)
-ENTRY(sys_sigaltstack_wrapper)
- /* Get the user stack pointer */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldo TASK_REGS(%r1),%r24 /* get pt regs */
- LDREG TASK_PT_GR30(%r24),%r24
- STREG %r2, -RP_OFFSET(%r30)
-#ifdef CONFIG_64BIT
- ldo FRAME_SIZE(%r30), %r30
- BL do_sigaltstack,%r2
- ldo -16(%r30),%r29 /* Reference param save area */
-#else
- BL do_sigaltstack,%r2
- ldo FRAME_SIZE(%r30), %r30
-#endif
-
- ldo -FRAME_SIZE(%r30), %r30
- LDREG -RP_OFFSET(%r30), %r2
- bv %r0(%r2)
- nop
-ENDPROC(sys_sigaltstack_wrapper)
-
-#ifdef CONFIG_64BIT
-ENTRY(sys32_sigaltstack_wrapper)
- /* Get the user stack pointer */
- LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
- LDREG TASK_PT_GR30(%r24),%r24
- STREG %r2, -RP_OFFSET(%r30)
- ldo FRAME_SIZE(%r30), %r30
- BL do_sigaltstack32,%r2
- ldo -16(%r30),%r29 /* Reference param save area */
-
- ldo -FRAME_SIZE(%r30), %r30
- LDREG -RP_OFFSET(%r30), %r2
- bv %r0(%r2)
- nop
-ENDPROC(sys32_sigaltstack_wrapper)
-#endif
-
ENTRY(syscall_exit)
/* NOTE: HP-UX syscalls also come through here
* after hpux_syscall_exit fixes up return
@@ -1994,7 +1796,7 @@ ENTRY(syscall_exit)
/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
ldo -PER_HPUX(%r19), %r19
- CMPIB<>,n 0,%r19,1f
+ cmpib,COND(<>),n 0,%r19,1f
/* Save other hpux returns if personality is PER_HPUX */
STREG %r22,TASK_PT_GR22(%r1)
@@ -2018,7 +1820,7 @@ syscall_check_resched:
.import do_signal,code
syscall_check_sig:
LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
- ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
+ ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
and,COND(<>) %r19, %r26, %r0
b,n syscall_restore /* skip past if we've nothing to do */
@@ -2046,12 +1848,13 @@ syscall_do_signal:
b,n syscall_check_sig
syscall_restore:
- /* Are we being ptraced? */
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
- ldw TASK_PTRACE(%r1), %r19
- bb,< %r19,31,syscall_restore_rfi
- nop
+ /* Are we being ptraced? */
+ ldw TASK_FLAGS(%r1),%r19
+ ldi _TIF_SYSCALL_TRACE_MASK,%r2
+ and,COND(=) %r19,%r2,%r0
+ b,n syscall_restore_rfi
ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
rest_fp %r19
@@ -2074,9 +1877,10 @@ syscall_restore:
LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
/* NOTE: We use rsm/ssm pair to make this operation atomic */
+ LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
rsm PSW_SM_I, %r0
- LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
- mfsp %sr3,%r1 /* Get users space id */
+ copy %r1,%r30 /* Restore user sp */
+ mfsp %sr3,%r1 /* Get user space id */
mtsp %r1,%sr7 /* Restore sr7 */
ssm PSW_SM_I, %r0
@@ -2112,16 +1916,16 @@ syscall_restore_rfi:
ldi 0x0b,%r20 /* Create new PSW */
depi -1,13,1,%r20 /* C, Q, D, and I bits */
- /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
- * set in include/linux/ptrace.h and converted to PA bitmap
+ /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
+ * set in thread_info.h and converted to PA bitmap
* numbers in asm-offsets.c */
- /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
- extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
+ /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
+ extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
depi -1,27,1,%r20 /* R bit */
- /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
- extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
+ /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
+ extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
depi -1,7,1,%r20 /* T bit */
STREG %r20,TASK_PT_PSW(%r1)
@@ -2161,15 +1965,23 @@ syscall_restore_rfi:
/* sr2 should be set to zero for userspace syscalls */
STREG %r0,TASK_PT_SR2(%r1)
-pt_regs_ok:
LDREG TASK_PT_GR31(%r1),%r2
- depi 3,31,2,%r2 /* ensure return to user mode. */
- STREG %r2,TASK_PT_IAOQ0(%r1)
+ depi 3,31,2,%r2 /* ensure return to user mode. */
+ STREG %r2,TASK_PT_IAOQ0(%r1)
ldo 4(%r2),%r2
STREG %r2,TASK_PT_IAOQ1(%r1)
+ b intr_restore
copy %r25,%r16
+
+pt_regs_ok:
+ LDREG TASK_PT_IAOQ0(%r1),%r2
+ depi 3,31,2,%r2 /* ensure return to user mode. */
+ STREG %r2,TASK_PT_IAOQ0(%r1)
+ LDREG TASK_PT_IAOQ1(%r1),%r2
+ depi 3,31,2,%r2
+ STREG %r2,TASK_PT_IAOQ1(%r1)
b intr_restore
- nop
+ copy %r25,%r16
.import schedule,code
syscall_do_resched:
@@ -2184,6 +1996,74 @@ syscall_do_resched:
ENDPROC(syscall_exit)
+#ifdef CONFIG_FUNCTION_TRACER
+ .import ftrace_function_trampoline,code
+ENTRY(_mcount)
+ copy %r3, %arg2
+ b ftrace_function_trampoline
+ nop
+ENDPROC(_mcount)
+
+ENTRY(return_to_handler)
+ load32 return_trampoline, %rp
+ copy %ret0, %arg0
+ copy %ret1, %arg1
+ b ftrace_return_to_handler
+ nop
+return_trampoline:
+ copy %ret0, %rp
+ copy %r23, %ret0
+ copy %r24, %ret1
+
+.globl ftrace_stub
+ftrace_stub:
+ bv %r0(%rp)
+ nop
+ENDPROC(return_to_handler)
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#ifdef CONFIG_IRQSTACKS
+/* void call_on_stack(unsigned long param1, void *func,
+ unsigned long new_stack) */
+ENTRY(call_on_stack)
+ copy %sp, %r1
+
+ /* Regarding the HPPA calling conventions for function pointers,
+ we assume the PIC register is not changed across call. For
+ CONFIG_64BIT, the argument pointer is left to point at the
+ argument region allocated for the call to call_on_stack. */
+# ifdef CONFIG_64BIT
+ /* Switch to new stack. We allocate two 128 byte frames. */
+ ldo 256(%arg2), %sp
+ /* Save previous stack pointer and return pointer in frame marker */
+ STREG %rp, -144(%sp)
+ /* Calls always use function descriptor */
+ LDREG 16(%arg1), %arg1
+ bve,l (%arg1), %rp
+ STREG %r1, -136(%sp)
+ LDREG -144(%sp), %rp
+ bve (%rp)
+ LDREG -136(%sp), %sp
+# else
+ /* Switch to new stack. We allocate two 64 byte frames. */
+ ldo 128(%arg2), %sp
+ /* Save previous stack pointer and return pointer in frame marker */
+ STREG %r1, -68(%sp)
+ STREG %rp, -84(%sp)
+ /* Calls use function descriptor if PLABEL bit is set */
+ bb,>=,n %arg1, 30, 1f
+ depwi 0,31,2, %arg1
+ LDREG 0(%arg1), %arg1
+1:
+ be,l 0(%sr4,%arg1), %sr0, %r31
+ copy %r31, %rp
+ LDREG -84(%sp), %rp
+ bv (%rp)
+ LDREG -68(%sp), %sp
+# endif /* CONFIG_64BIT */
+ENDPROC(call_on_stack)
+#endif /* CONFIG_IRQSTACKS */
+
get_register:
/*
* get_register is used by the non access tlb miss handlers to
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 7177a6cd1b7..22395901d47 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -67,12 +67,11 @@
#include <asm/page.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
-#include <asm/system.h>
#include <asm/processor.h> /* for boot_cpu_data */
static DEFINE_SPINLOCK(pdc_lock);
-static unsigned long pdc_result[32] __attribute__ ((aligned (8)));
-static unsigned long pdc_result2[32] __attribute__ ((aligned (8)));
+extern unsigned long pdc_result[NUM_PDC_RESULT];
+extern unsigned long pdc_result2[NUM_PDC_RESULT];
#ifdef CONFIG_64BIT
#define WIDE_FIRMWARE 0x1
@@ -150,26 +149,42 @@ static void convert_to_wide(unsigned long *addr)
#endif
}
+#ifdef CONFIG_64BIT
+void set_firmware_width_unlocked(void)
+{
+ int ret;
+
+ ret = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES,
+ __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ if (pdc_result[0] != NARROW_FIRMWARE)
+ parisc_narrow_firmware = 0;
+}
+
/**
* set_firmware_width - Determine if the firmware is wide or narrow.
*
- * This function must be called before any pdc_* function that uses the convert_to_wide
- * function.
+ * This function must be called before any pdc_* function that uses the
+ * convert_to_wide function.
*/
-void __init set_firmware_width(void)
+void set_firmware_width(void)
{
-#ifdef CONFIG_64BIT
- int retval;
unsigned long flags;
+ spin_lock_irqsave(&pdc_lock, flags);
+ set_firmware_width_unlocked();
+ spin_unlock_irqrestore(&pdc_lock, flags);
+}
+#else
+void set_firmware_width_unlocked(void)
+{
+ return;
+}
- spin_lock_irqsave(&pdc_lock, flags);
- retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0);
- convert_to_wide(pdc_result);
- if(pdc_result[0] != NARROW_FIRMWARE)
- parisc_narrow_firmware = 0;
- spin_unlock_irqrestore(&pdc_lock, flags);
-#endif
+void set_firmware_width(void)
+{
+ return;
}
+#endif /*CONFIG_64BIT*/
/**
* pdc_emergency_unlock - Unlock the linux pdc lock
@@ -288,6 +303,20 @@ int pdc_chassis_warn(unsigned long *warn)
return retval;
}
+int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
+{
+ int ret;
+
+ ret = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result));
+ convert_to_wide(pdc_result);
+ pdc_coproc_info->ccr_functional = pdc_result[0];
+ pdc_coproc_info->ccr_present = pdc_result[1];
+ pdc_coproc_info->revision = pdc_result[17];
+ pdc_coproc_info->model = pdc_result[18];
+
+ return ret;
+}
+
/**
* pdc_coproc_cfg - To identify coprocessors attached to the processor.
* @pdc_coproc_info: Return buffer address.
@@ -295,21 +324,16 @@ int pdc_chassis_warn(unsigned long *warn)
* This PDC call returns the presence and status of all the coprocessors
* attached to the processor.
*/
-int __init pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
+int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
{
- int retval;
+ int ret;
unsigned long flags;
- spin_lock_irqsave(&pdc_lock, flags);
- retval = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result));
- convert_to_wide(pdc_result);
- pdc_coproc_info->ccr_functional = pdc_result[0];
- pdc_coproc_info->ccr_present = pdc_result[1];
- pdc_coproc_info->revision = pdc_result[17];
- pdc_coproc_info->model = pdc_result[18];
- spin_unlock_irqrestore(&pdc_lock, flags);
+ spin_lock_irqsave(&pdc_lock, flags);
+ ret = pdc_coproc_cfg_unlocked(pdc_coproc_info);
+ spin_unlock_irqrestore(&pdc_lock, flags);
- return retval;
+ return ret;
}
/**
@@ -504,7 +528,11 @@ int pdc_model_capabilities(unsigned long *capabilities)
pdc_result[0] = 0; /* preset zero (call may not be implemented!) */
retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0);
convert_to_wide(pdc_result);
- *capabilities = pdc_result[0];
+ if (retval == PDC_OK) {
+ *capabilities = pdc_result[0];
+ } else {
+ *capabilities = PDC_MODEL_OS32;
+ }
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
@@ -1096,42 +1124,23 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096];
*/
int pdc_iodc_print(const unsigned char *str, unsigned count)
{
- static int posx; /* for simple TAB-Simulation... */
unsigned int i;
unsigned long flags;
- for (i = 0; i < count && i < 79;) {
+ for (i = 0; i < count;) {
switch(str[i]) {
case '\n':
iodc_dbuf[i+0] = '\r';
iodc_dbuf[i+1] = '\n';
i += 2;
- posx = 0;
goto print;
- case '\t':
- while (posx & 7) {
- iodc_dbuf[i] = ' ';
- i++, posx++;
- }
- break;
- case '\b': /* BS */
- posx -= 2;
default:
iodc_dbuf[i] = str[i];
- i++, posx++;
+ i++;
break;
}
}
- /* if we're at the end of line, and not already inserting a newline,
- * insert one anyway. iodc console doesn't claim to support >79 char
- * lines. don't account for this in the return value.
- */
- if (i == 79 && iodc_dbuf[i-1] != '\n') {
- iodc_dbuf[i+0] = '\r';
- iodc_dbuf[i+1] = '\n';
- }
-
print:
spin_lock_irqsave(&pdc_lock, flags);
real32_call(PAGE0->mem_cons.iodc_io,
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
new file mode 100644
index 00000000000..5beb97bafbb
--- /dev/null
+++ b/arch/parisc/kernel/ftrace.c
@@ -0,0 +1,185 @@
+/*
+ * Code for tracing calls in Linux kernel.
+ * Copyright (C) 2009 Helge Deller <deller@gmx.de>
+ *
+ * based on code for x86 which is:
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ * future possible enhancements:
+ * - add CONFIG_DYNAMIC_FTRACE
+ * - add CONFIG_STACK_TRACER
+ */
+
+#include <linux/init.h>
+#include <linux/ftrace.h>
+
+#include <asm/sections.h>
+#include <asm/ftrace.h>
+
+
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/* Add a function return address to the trace stack on thread info.*/
+static int push_return_trace(unsigned long ret, unsigned long long time,
+ unsigned long func, int *depth)
+{
+ int index;
+
+ if (!current->ret_stack)
+ return -EBUSY;
+
+ /* The return trace stack is full */
+ if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+ atomic_inc(&current->trace_overrun);
+ return -EBUSY;
+ }
+
+ index = ++current->curr_ret_stack;
+ barrier();
+ current->ret_stack[index].ret = ret;
+ current->ret_stack[index].func = func;
+ current->ret_stack[index].calltime = time;
+ *depth = index;
+
+ return 0;
+}
+
+/* Retrieve a function return address to the trace stack on thread info.*/
+static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+{
+ int index;
+
+ index = current->curr_ret_stack;
+
+ if (unlikely(index < 0)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ /* Might as well panic, otherwise we have no where to go */
+ *ret = (unsigned long)
+ dereference_function_descriptor(&panic);
+ return;
+ }
+
+ *ret = current->ret_stack[index].ret;
+ trace->func = current->ret_stack[index].func;
+ trace->calltime = current->ret_stack[index].calltime;
+ trace->overrun = atomic_read(&current->trace_overrun);
+ trace->depth = index;
+ barrier();
+ current->curr_ret_stack--;
+
+}
+
+/*
+ * Send the trace to the ring-buffer.
+ * @return the original return address.
+ */
+unsigned long ftrace_return_to_handler(unsigned long retval0,
+ unsigned long retval1)
+{
+ struct ftrace_graph_ret trace;
+ unsigned long ret;
+
+ pop_return_trace(&trace, &ret);
+ trace.rettime = local_clock();
+ ftrace_graph_return(&trace);
+
+ if (unlikely(!ret)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ /* Might as well panic. What else to do? */
+ ret = (unsigned long)
+ dereference_function_descriptor(&panic);
+ }
+
+ /* HACK: we hand over the old functions' return values
+ in %r23 and %r24. Assembly in entry.S will take care
+ and move those to their final registers %ret0 and %ret1 */
+ asm( "copy %0, %%r23 \n\t"
+ "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
+
+ return ret;
+}
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ unsigned long long calltime;
+ struct ftrace_graph_ent trace;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ old = *parent;
+ *parent = (unsigned long)
+ dereference_function_descriptor(&return_to_handler);
+
+ if (unlikely(!__kernel_text_address(old))) {
+ ftrace_graph_stop();
+ *parent = old;
+ WARN_ON(1);
+ return;
+ }
+
+ calltime = local_clock();
+
+ if (push_return_trace(old, calltime,
+ self_addr, &trace.depth) == -EBUSY) {
+ *parent = old;
+ return;
+ }
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ *parent = old;
+ }
+}
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+
+void ftrace_function_trampoline(unsigned long parent,
+ unsigned long self_addr,
+ unsigned long org_sp_gr3)
+{
+ extern ftrace_func_t ftrace_trace_function;
+
+ if (function_trace_stop)
+ return;
+
+ if (ftrace_trace_function != ftrace_stub) {
+ ftrace_trace_function(parent, self_addr);
+ return;
+ }
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if (ftrace_graph_entry && ftrace_graph_return) {
+ unsigned long sp;
+ unsigned long *parent_rp;
+
+ asm volatile ("copy %%r30, %0" : "=r"(sp));
+ /* sanity check: is stack pointer which we got from
+ assembler function in entry.S in a reasonable
+ range compared to current stack pointer? */
+ if ((sp - org_sp_gr3) > 0x400)
+ return;
+
+ /* calculate pointer to %rp in stack */
+ parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
+ /* sanity check: parent_rp should hold parent */
+ if (*parent_rp != parent)
+ return;
+
+ prepare_ftrace_return(parent_rp, self_addr);
+ return;
+ }
+#endif
+}
+
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index f48a640b55f..af3bc359dc7 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -36,9 +36,12 @@
* HP PARISC Hardware Database
* Access to this database is only possible during bootup
* so don't reference this table after starting the init process
+ *
+ * NOTE: Product names which are listed here and ends with a '?'
+ * are guessed. If you know the correct name, please let us know.
*/
-static struct hp_hardware hp_hardware_list[] __devinitdata = {
+static struct hp_hardware hp_hardware_list[] = {
{HPHW_NPROC,0x01,0x4,0x0,"Indigo (840, 930)"},
{HPHW_NPROC,0x8,0x4,0x01,"Firefox(825,925)"},
{HPHW_NPROC,0xA,0x4,0x01,"Top Gun (835,834,935,635)"},
@@ -222,6 +225,7 @@ static struct hp_hardware hp_hardware_list[] __devinitdata = {
{HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
{HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
{HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
+ {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)?"},
{HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
{HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
{HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
@@ -275,9 +279,11 @@ static struct hp_hardware hp_hardware_list[] __devinitdata = {
{HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
{HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
{HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"},
+ {HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast?"},
{HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"},
{HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"},
{HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"},
+ {HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+?"},
{HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"},
{HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"},
{HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"},
@@ -1204,6 +1210,8 @@ static struct hp_hardware hp_hardware_list[] __devinitdata = {
{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
+ {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
+ {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"},
{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
@@ -1230,7 +1238,7 @@ static struct hp_cpu_type_mask {
unsigned short model;
unsigned short mask;
enum cpu_type cpu;
-} hp_cpu_type_mask_list[] __devinitdata = {
+} hp_cpu_type_mask_list[] = {
{ 0x0000, 0x0ff0, pcx }, /* 0x0000 - 0x000f */
{ 0x0048, 0x0ff0, pcxl }, /* 0x0040 - 0x004f */
@@ -1327,8 +1335,7 @@ const char * const cpu_name_version[][2] = {
[mako2] = { "PA8900 (Shortfin)", "2.0" }
};
-const char * __devinit
-parisc_hardware_description(struct parisc_device_id *id)
+const char *parisc_hardware_description(struct parisc_device_id *id)
{
struct hp_hardware *listptr;
@@ -1366,7 +1373,7 @@ parisc_hardware_description(struct parisc_device_id *id)
/* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */
-enum cpu_type __cpuinit
+enum cpu_type
parisc_get_cpu_type(unsigned long hversion)
{
struct hp_cpu_type_mask *ptr;
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index ec2482dc1be..d4dc588c0dc 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -32,7 +32,8 @@ ENTRY(boot_args)
.word 0 /* arg3 */
END(boot_args)
- .section .text.head
+ __HEAD
+
.align 4
.import init_thread_union,data
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */
@@ -40,9 +41,7 @@ END(boot_args)
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
.import $global$ /* forward declaration */
#endif /*!CONFIG_64BIT*/
- .export _stext,data /* Kernel want it this way! */
-_stext:
-ENTRY(stext)
+ENTRY(parisc_kernel_start)
.proc
.callinfo
@@ -105,8 +104,9 @@ $bss_loop:
#endif
- /* Now initialize the PTEs themselves */
- ldo 0+_PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
+ /* Now initialize the PTEs themselves. We use RWX for
+ * everything ... it will get remapped correctly later */
+ ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
load32 PA(pg0),%r1
@@ -120,7 +120,7 @@ $pgt_fill_loop:
copy %r0,%r2
/* And the RFI Target address too */
- load32 start_kernel,%r11
+ load32 start_parisc,%r11
/* And the initial task pointer */
load32 init_thread_union,%r6
@@ -130,7 +130,7 @@ $pgt_fill_loop:
ldo THREAD_SZ_ALGN(%r6),%sp
#ifdef CONFIG_SMP
- /* Set the smp rendevous address into page zero.
+ /* Set the smp rendezvous address into page zero.
** It would be safer to do this in init_smp_config() but
** it's just way easier to deal with here because
** of 64-bit function ptrs and the address is local to this file.
@@ -193,6 +193,8 @@ common_stext:
ldw MEM_PDC_HI(%r0),%r6
depd %r6, 31, 32, %r3 /* move to upper word */
+ mfctl %cr30,%r6 /* PCX-W2 firmware bug */
+
ldo PDC_PSW(%r0),%arg0 /* 21 */
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
@@ -201,6 +203,8 @@ common_stext:
copy %r0,%arg3
stext_pdc_ret:
+ mtctl %r6,%cr30 /* restore task thread info */
+
/* restore rfi target address*/
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
@@ -341,10 +345,10 @@ smp_slave_stext:
.procend
#endif /* CONFIG_SMP */
-ENDPROC(stext)
+ENDPROC(parisc_kernel_start)
#ifndef CONFIG_64BIT
- .section .data.read_mostly
+ .section .data..read_mostly
.align 4
.export $global$,data
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index 2cbf13b3ef1..e158b6fbf1b 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -55,13 +55,13 @@
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
*/
- .align PAGE_SIZE
+ .align 4096
hpmc_stack:
.block 16384
#define HPMC_IODC_BUF_SIZE 0x8000
- .align PAGE_SIZE
+ .align 4096
hpmc_iodc_buf:
.block HPMC_IODC_BUF_SIZE
@@ -80,6 +80,7 @@ END(hpmc_pim_data)
.import intr_save, code
ENTRY(os_hpmc)
+.os_hpmc:
/*
* registers modified:
@@ -295,5 +296,10 @@ os_hpmc_6:
b .
nop
ENDPROC(os_hpmc)
-ENTRY(os_hpmc_end) /* this label used to compute os_hpmc checksum */
+.os_hpmc_end:
nop
+.data
+.align 4
+ .export os_hpmc_size
+os_hpmc_size:
+ .word .os_hpmc_end-.os_hpmc
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c
deleted file mode 100644
index 26198a074d6..00000000000
--- a/arch/parisc/kernel/init_task.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Static declaration of "init" task data structure.
- *
- * Copyright (C) 2000 Paul Bame <bame at parisc-linux.org>
- * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
- * Copyright (C) 2001 Helge Deller <deller @ parisc-linux.org>
- * Copyright (C) 2002 Matthew Wilcox <willy with parisc-linux.org>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/init_task.h>
-#include <linux/mqueue.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
-/*
- * Initial task structure.
- *
- * We need to make sure that this is 16384-byte aligned due to the
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
-union thread_union init_thread_union
- __attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
-
-#if PT_NLEVELS == 3
-/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
- * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
- * guarantee that global objects will be laid out in memory in the same order
- * as the order of declaration, so put these in different sections and use
- * the linker script to order them. */
-pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE)));
-#endif
-
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE)));
-pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE)));
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-EXPORT_SYMBOL(init_task);
-
-__asm__(".data");
-struct task_struct init_task = INIT_TASK(init_task);
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 4845a644463..f0b6722fc70 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -93,7 +93,7 @@ void __init setup_pdc(void)
case 0x6: /* 705, 710 */
case 0x7: /* 715, 725 */
case 0x8: /* 745, 747, 742 */
- case 0xA: /* 712 and similiar */
+ case 0xA: /* 712 and similar */
case 0xC: /* 715/64, at least */
pdc_type = PDC_TYPE_SNAKE;
@@ -170,24 +170,30 @@ static void __init pagezero_memconfig(void)
static int __init
pat_query_module(ulong pcell_loc, ulong mod_index)
{
- pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
+ pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
unsigned long bytecnt;
unsigned long temp; /* 64-bit scratch value */
long status; /* PDC return value status */
struct parisc_device *dev;
+ pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
+ if (!pa_pdc_cell)
+ panic("couldn't allocate memory for PDC_PAT_CELL!");
+
/* return cell module (PA or Processor view) */
status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
- PA_VIEW, &pa_pdc_cell);
+ PA_VIEW, pa_pdc_cell);
if (status != PDC_OK) {
/* no more cell modules or error */
+ kfree(pa_pdc_cell);
return status;
}
- temp = pa_pdc_cell.cba;
- dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
+ temp = pa_pdc_cell->cba;
+ dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
if (!dev) {
+ kfree(pa_pdc_cell);
return PDC_OK;
}
@@ -203,8 +209,9 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
/* save generic info returned from the call */
/* REVISIT: who is the consumer of this? not sure yet... */
- dev->mod_info = pa_pdc_cell.mod_info; /* pass to PAT_GET_ENTITY() */
- dev->pmod_loc = pa_pdc_cell.mod_location;
+ dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
+ dev->pmod_loc = pa_pdc_cell->mod_location;
+ dev->mod0 = pa_pdc_cell->mod[0];
register_parisc_device(dev); /* advertise device */
@@ -216,14 +223,14 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
case PAT_ENTITY_PROC:
printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
- pa_pdc_cell.mod[0]);
+ pa_pdc_cell->mod[0]);
break;
case PAT_ENTITY_MEM:
printk(KERN_DEBUG
"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
- pa_pdc_cell.mod[0], pa_pdc_cell.mod[1],
- pa_pdc_cell.mod[2]);
+ pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
+ pa_pdc_cell->mod[2]);
break;
case PAT_ENTITY_CA:
printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
@@ -243,23 +250,26 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
print_ranges:
pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
IO_VIEW, &io_pdc_cell);
- printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell.mod[1]);
- for (i = 0; i < pa_pdc_cell.mod[1]; i++) {
+ printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
+ for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
printk(KERN_DEBUG
" PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
- i, pa_pdc_cell.mod[2 + i * 3], /* type */
- pa_pdc_cell.mod[3 + i * 3], /* start */
- pa_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
+ i, pa_pdc_cell->mod[2 + i * 3], /* type */
+ pa_pdc_cell->mod[3 + i * 3], /* start */
+ pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
printk(KERN_DEBUG
" IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
- i, io_pdc_cell.mod[2 + i * 3], /* type */
- io_pdc_cell.mod[3 + i * 3], /* start */
- io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
+ i, io_pdc_cell->mod[2 + i * 3], /* type */
+ io_pdc_cell->mod[3 + i * 3], /* start */
+ io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
}
printk(KERN_DEBUG "\n");
break;
}
#endif /* DEBUG_PAT */
+
+ kfree(pa_pdc_cell);
+
return PDC_OK;
}
@@ -499,7 +509,7 @@ add_system_map_addresses(struct parisc_device *dev, int num_addrs,
dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
if(!dev->addr) {
printk(KERN_ERR "%s %s(): memory allocation failure\n",
- __FILE__, __FUNCTION__);
+ __FILE__, __func__);
return;
}
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 23ef950df00..cfe056fe7f5 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -27,11 +27,11 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/smp.h>
+#include <asm/ldcw.h>
#undef PARISC_IRQ_CR16_COUNTS
@@ -52,9 +52,9 @@ static volatile unsigned long cpu_eiem = 0;
*/
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
-static void cpu_disable_irq(unsigned int irq)
+static void cpu_mask_irq(struct irq_data *d)
{
- unsigned long eirr_bit = EIEM_MASK(irq);
+ unsigned long eirr_bit = EIEM_MASK(d->irq);
cpu_eiem &= ~eirr_bit;
/* Do nothing on the other CPUs. If they get this interrupt,
@@ -63,7 +63,7 @@ static void cpu_disable_irq(unsigned int irq)
* then gets disabled */
}
-static void cpu_enable_irq(unsigned int irq)
+static void __cpu_unmask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
@@ -75,18 +75,14 @@ static void cpu_enable_irq(unsigned int irq)
smp_send_all_nop();
}
-static unsigned int cpu_startup_irq(unsigned int irq)
+static void cpu_unmask_irq(struct irq_data *d)
{
- cpu_enable_irq(irq);
- return 0;
+ __cpu_unmask_irq(d->irq);
}
-void no_ack_irq(unsigned int irq) { }
-void no_end_irq(unsigned int irq) { }
-
-void cpu_ack_irq(unsigned int irq)
+void cpu_ack_irq(struct irq_data *d)
{
- unsigned long mask = EIEM_MASK(irq);
+ unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* Clear in EIEM so we can no longer process */
@@ -99,9 +95,9 @@ void cpu_ack_irq(unsigned int irq)
mtctl(mask, 23);
}
-void cpu_end_irq(unsigned int irq)
+void cpu_eoi_irq(struct irq_data *d)
{
- unsigned long mask = EIEM_MASK(irq);
+ unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* set it in the eiems---it's no longer in process */
@@ -112,51 +108,93 @@ void cpu_end_irq(unsigned int irq)
}
#ifdef CONFIG_SMP
-int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
+int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
{
int cpu_dest;
/* timer and ipi have to always be received on all CPUs */
- if (CHECK_IRQ_PER_CPU(irq)) {
- /* Bad linux design decision. The mask has already
- * been set; we must reset it */
- irq_desc[irq].affinity = CPU_MASK_ALL;
+ if (irqd_is_per_cpu(d))
return -EINVAL;
- }
/* whatever mask they set, we just allow one CPU */
- cpu_dest = first_cpu(*dest);
- *dest = cpumask_of_cpu(cpu_dest);
+ cpu_dest = cpumask_first_and(dest, cpu_online_mask);
- return 0;
+ return cpu_dest;
}
-static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
+static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
+ bool force)
{
- if (cpu_check_affinity(irq, &dest))
- return;
+ int cpu_dest;
+
+ cpu_dest = cpu_check_affinity(d, dest);
+ if (cpu_dest < 0)
+ return -1;
- irq_desc[irq].affinity = dest;
+ cpumask_copy(d->affinity, dest);
+
+ return 0;
}
#endif
-static struct hw_interrupt_type cpu_interrupt_type = {
- .typename = "CPU",
- .startup = cpu_startup_irq,
- .shutdown = cpu_disable_irq,
- .enable = cpu_enable_irq,
- .disable = cpu_disable_irq,
- .ack = cpu_ack_irq,
- .end = cpu_end_irq,
+static struct irq_chip cpu_interrupt_type = {
+ .name = "CPU",
+ .irq_mask = cpu_mask_irq,
+ .irq_unmask = cpu_unmask_irq,
+ .irq_ack = cpu_ack_irq,
+ .irq_eoi = cpu_eoi_irq,
#ifdef CONFIG_SMP
- .set_affinity = cpu_set_affinity_irq,
+ .irq_set_affinity = cpu_set_affinity_irq,
#endif
/* XXX: Needs to be written. We managed without it so far, but
* we really ought to write it.
*/
- .retrigger = NULL,
+ .irq_retrigger = NULL,
};
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+#define irq_stats(x) (&per_cpu(irq_stat, x))
+
+/*
+ * /proc/interrupts printing for arch specific interrupts
+ */
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ int j;
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ seq_printf(p, "%*s: ", prec, "STK");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
+ seq_puts(p, " Kernel stack usage\n");
+# ifdef CONFIG_IRQSTACKS
+ seq_printf(p, "%*s: ", prec, "IST");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
+ seq_puts(p, " Interrupt stack usage\n");
+# endif
+#endif
+#ifdef CONFIG_SMP
+ seq_printf(p, "%*s: ", prec, "RES");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
+ seq_puts(p, " Rescheduling interrupts\n");
+#endif
+ seq_printf(p, "%*s: ", prec, "UAH");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
+ seq_puts(p, " Unaligned access handler traps\n");
+ seq_printf(p, "%*s: ", prec, "FPA");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
+ seq_puts(p, " Floating point assist traps\n");
+ seq_printf(p, "%*s: ", prec, "TLB");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
+ seq_puts(p, " TLB shootdowns\n");
+ return 0;
+}
+
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
@@ -174,21 +212,22 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
+ struct irq_desc *desc = irq_to_desc(i);
struct irqaction *action;
- spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ action = desc->action;
if (!action)
goto skip;
seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->typename);
+ seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
@@ -220,9 +259,12 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
+ if (i == NR_IRQS)
+ arch_show_interrupts(p, 3);
+
return 0;
}
@@ -238,15 +280,16 @@ int show_interrupts(struct seq_file *p, void *v)
int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
{
- if (irq_desc[irq].action)
+ if (irq_has_action(irq))
return -EBUSY;
- if (irq_desc[irq].chip != &cpu_interrupt_type)
+ if (irq_get_chip(irq) != &cpu_interrupt_type)
return -EBUSY;
+ /* for iosapic interrupts */
if (type) {
- irq_desc[irq].chip = type;
- irq_desc[irq].chip_data = data;
- cpu_interrupt_type.enable(irq);
+ irq_set_chip_and_handler(irq, type, handle_percpu_irq);
+ irq_set_chip_data(irq, data);
+ __cpu_unmask_irq(irq);
}
return 0;
}
@@ -295,10 +338,11 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
- irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+ struct irq_data *d = irq_get_irq_data(irq);
+ cpumask_copy(d->affinity, cpumask_of(cpu));
#endif
- return cpu_data[cpu].txn_addr;
+ return per_cpu(cpu_data, cpu).txn_addr;
}
@@ -309,11 +353,12 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */
- while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr ||
- !cpu_online(next_cpu)))
+ while ((next_cpu < nr_cpu_ids) &&
+ (!per_cpu(cpu_data, next_cpu).txn_addr ||
+ !cpu_online(next_cpu)))
next_cpu++;
- if (next_cpu >= NR_CPUS)
+ if (next_cpu >= nr_cpu_ids)
next_cpu = 0; /* nothing else, assign monarch */
return txn_affinity_addr(virt_irq, next_cpu);
@@ -331,6 +376,131 @@ static inline int eirr_to_irq(unsigned long eirr)
return (BITS_PER_LONG - bit) + TIMER_IRQ;
}
+#ifdef CONFIG_IRQSTACKS
+/*
+ * IRQ STACK - used for irq handler
+ */
+#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
+
+union irq_stack_union {
+ unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+ volatile unsigned int slock[4];
+ volatile unsigned int lock[1];
+};
+
+DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+ .slock = { 1,1,1,1 },
+ };
+#endif
+
+
+int sysctl_panic_on_stackoverflow = 1;
+
+static inline void stack_overflow_check(struct pt_regs *regs)
+{
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ #define STACK_MARGIN (256*6)
+
+ /* Our stack starts directly behind the thread_info struct. */
+ unsigned long stack_start = (unsigned long) current_thread_info();
+ unsigned long sp = regs->gr[30];
+ unsigned long stack_usage;
+ unsigned int *last_usage;
+ int cpu = smp_processor_id();
+
+ /* if sr7 != 0, we interrupted a userspace process which we do not want
+ * to check for stack overflow. We will only check the kernel stack. */
+ if (regs->sr[7])
+ return;
+
+ /* calculate kernel stack usage */
+ stack_usage = sp - stack_start;
+#ifdef CONFIG_IRQSTACKS
+ if (likely(stack_usage <= THREAD_SIZE))
+ goto check_kernel_stack; /* found kernel stack */
+
+ /* check irq stack usage */
+ stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
+ stack_usage = sp - stack_start;
+
+ last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
+ if (unlikely(stack_usage > *last_usage))
+ *last_usage = stack_usage;
+
+ if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
+ return;
+
+ pr_emerg("stackcheck: %s will most likely overflow irq stack "
+ "(sp:%lx, stk bottom-top:%lx-%lx)\n",
+ current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
+ goto panic_check;
+
+check_kernel_stack:
+#endif
+
+ /* check kernel stack usage */
+ last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
+
+ if (unlikely(stack_usage > *last_usage))
+ *last_usage = stack_usage;
+
+ if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
+ return;
+
+ pr_emerg("stackcheck: %s will most likely overflow kernel stack "
+ "(sp:%lx, stk bottom-top:%lx-%lx)\n",
+ current->comm, sp, stack_start, stack_start + THREAD_SIZE);
+
+#ifdef CONFIG_IRQSTACKS
+panic_check:
+#endif
+ if (sysctl_panic_on_stackoverflow)
+ panic("low stack detected by irq handler - check messages\n");
+#endif
+}
+
+#ifdef CONFIG_IRQSTACKS
+/* in entry.S: */
+void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
+
+static void execute_on_irq_stack(void *func, unsigned long param1)
+{
+ union irq_stack_union *union_ptr;
+ unsigned long irq_stack;
+ volatile unsigned int *irq_stack_in_use;
+
+ union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
+ irq_stack = (unsigned long) &union_ptr->stack;
+ irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
+ 64); /* align for stack frame usage */
+
+ /* We may be called recursive. If we are already using the irq stack,
+ * just continue to use it. Use spinlocks to serialize
+ * the irq stack usage.
+ */
+ irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
+ if (!__ldcw(irq_stack_in_use)) {
+ void (*direct_call)(unsigned long p1) = func;
+
+ /* We are using the IRQ stack already.
+ * Do direct call on current stack. */
+ direct_call(param1);
+ return;
+ }
+
+ /* This is where we switch to the IRQ stack. */
+ call_on_stack(param1, func, irq_stack);
+
+ /* free up irq stack usage. */
+ *irq_stack_in_use = 1;
+}
+
+void do_softirq_own_stack(void)
+{
+ execute_on_irq_stack(__do_softirq, 0);
+}
+#endif /* CONFIG_IRQSTACKS */
+
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
@@ -338,6 +508,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
unsigned long eirr_val;
int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
+ struct irq_desc *desc;
cpumask_t dest;
#endif
@@ -351,19 +522,26 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP
- dest = irq_desc[irq].affinity;
- if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
+ desc = irq_to_desc(irq);
+ cpumask_copy(&dest, desc->irq_data.affinity);
+ if (irqd_is_per_cpu(&desc->irq_data) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
irq, smp_processor_id(), cpu);
gsc_writel(irq + CPU_IRQ_BASE,
- cpu_data[cpu].hpa);
+ per_cpu(cpu_data, cpu).hpa);
goto set_out;
}
#endif
- __do_IRQ(irq);
+ stack_overflow_check(regs);
+
+#ifdef CONFIG_IRQSTACKS
+ execute_on_irq_stack(&generic_handle_irq, irq);
+#else
+ generic_handle_irq(irq);
+#endif /* CONFIG_IRQSTACKS */
out:
irq_exit();
@@ -378,14 +556,14 @@ void do_cpu_irq_mask(struct pt_regs *regs)
static struct irqaction timer_action = {
.handler = timer_interrupt,
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
+ .flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
};
#ifdef CONFIG_SMP
static struct irqaction ipi_action = {
.handler = ipi_interrupt,
.name = "IPI",
- .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .flags = IRQF_PERCPU,
};
#endif
@@ -393,14 +571,15 @@ static void claim_cpu_irqs(void)
{
int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
- irq_desc[i].chip = &cpu_interrupt_type;
+ irq_set_chip_and_handler(i, &cpu_interrupt_type,
+ handle_percpu_irq);
}
- irq_desc[TIMER_IRQ].action = &timer_action;
- irq_desc[TIMER_IRQ].status = IRQ_PER_CPU;
+ irq_set_handler(TIMER_IRQ, handle_percpu_irq);
+ setup_irq(TIMER_IRQ, &timer_action);
#ifdef CONFIG_SMP
- irq_desc[IPI_IRQ].action = &ipi_action;
- irq_desc[IPI_IRQ].status = IRQ_PER_CPU;
+ irq_set_handler(IPI_IRQ, handle_percpu_irq);
+ setup_irq(IPI_IRQ, &ipi_action);
#endif
}
@@ -408,18 +587,14 @@ void __init init_IRQ(void)
{
local_irq_disable(); /* PARANOID - should already be disabled */
mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
- claim_cpu_irqs();
#ifdef CONFIG_SMP
- if (!cpu_eiem)
+ if (!cpu_eiem) {
+ claim_cpu_irqs();
cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
+ }
#else
+ claim_cpu_irqs();
cpu_eiem = EIEM_MASK(TIMER_IRQ);
#endif
set_eiem(cpu_eiem); /* EIEM : enable all external intr */
-
-}
-
-void ack_bad_irq(unsigned int irq)
-{
- printk("unexpected IRQ %d\n", irq);
}
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index fdacdd4341c..50dfafc3f2c 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -6,6 +6,7 @@
*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
* Copyright (C) 2003 Randolph Chung <tausq at debian . org>
+ * Copyright (C) 2008 Helge Deller <deller@gmx.de>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -24,6 +25,19 @@
*
*
* Notes:
+ * - PLT stub handling
+ * On 32bit (and sometimes 64bit) and with big kernel modules like xfs or
+ * ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may
+ * fail to reach their PLT stub if we only create one big stub array for
+ * all sections at the beginning of the core or init section.
+ * Instead we now insert individual PLT stub entries directly in front of
+ * of the code sections where the stubs are actually called.
+ * This reduces the distance between the PCREL location and the stub entry
+ * so that the relocations can be fulfilled.
+ * While calculating the final layout of the kernel module in memory, the
+ * kernel module loader calls arch_mod_section_prepend() to request the
+ * to be reserved amount of memory in front of each individual section.
+ *
* - SEGREL32 handling
* We are not doing SEGREL32 handling correctly. According to the ABI, we
* should do a value offset, like this:
@@ -47,7 +61,10 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/pgtable.h>
#include <asm/unwind.h>
#if 0
@@ -56,9 +73,13 @@
#define DEBUGP(fmt...)
#endif
+#define RELOC_REACHABLE(val, bits) \
+ (( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
+ ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \
+ 0 : 1)
+
#define CHECK_RELOC(val, bits) \
- if ( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
- ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) { \
+ if (!RELOC_REACHABLE(val, bits)) { \
printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
me->name, strtab + sym->st_name, (unsigned long)val, bits); \
return -ENOEXEC; \
@@ -68,8 +89,12 @@
* the bottom of the table, which has a maximum signed displacement of
* 0x3fff; however, since we're only going forward, this becomes
* 0x1fff, and thus, since each GOT entry is 8 bytes long we can have
- * at most 1023 entries */
-#define MAX_GOTS 1023
+ * at most 1023 entries.
+ * To overcome this 14bit displacement with some kernel modules, we'll
+ * use instead the unusal 16bit displacement method (see reassemble_16a)
+ * which gives us a maximum positive displacement of 0x7fff, and as such
+ * allows us to allocate up to 4095 GOT entries. */
+#define MAX_GOTS 4095
/* three functions to determine where in the module core
* or init pieces the location is */
@@ -90,20 +115,11 @@ static inline int in_local(struct module *me, void *loc)
return in_init(me, loc) || in_core(me, loc);
}
-static inline int in_local_section(struct module *me, void *loc, void *dot)
-{
- return (in_init(me, loc) && in_init(me, dot)) ||
- (in_core(me, loc) && in_core(me, dot));
-}
-
-
#ifndef CONFIG_64BIT
struct got_entry {
Elf32_Addr addr;
};
-#define Elf_Fdesc Elf32_Fdesc
-
struct stub_entry {
Elf32_Word insns[2]; /* each stub entry has two insns */
};
@@ -112,8 +128,6 @@ struct got_entry {
Elf64_Addr addr;
};
-#define Elf_Fdesc Elf64_Fdesc
-
struct stub_entry {
Elf64_Word insns[4]; /* each stub entry has four insns */
};
@@ -138,12 +152,40 @@ struct stub_entry {
/* The reassemble_* functions prepare an immediate value for
insertion into an opcode. pa-risc uses all sorts of weird bitfields
in the instruction to hold the value. */
+static inline int sign_unext(int x, int len)
+{
+ int len_ones;
+
+ len_ones = (1 << len) - 1;
+ return x & len_ones;
+}
+
+static inline int low_sign_unext(int x, int len)
+{
+ int sign, temp;
+
+ sign = (x >> (len-1)) & 1;
+ temp = sign_unext(x, len-1);
+ return (temp << 1) | sign;
+}
+
static inline int reassemble_14(int as14)
{
return (((as14 & 0x1fff) << 1) |
((as14 & 0x2000) >> 13));
}
+static inline int reassemble_16a(int as16)
+{
+ int s, t;
+
+ /* Unusual 16-bit encoding, for wide mode only. */
+ t = (as16 << 1) & 0xffff;
+ s = (as16 & 0x8000);
+ return (t ^ s ^ (s >> 1)) | (s >> 15);
+}
+
+
static inline int reassemble_17(int as17)
{
return (((as17 & 0x10000) >> 16) |
@@ -172,9 +214,13 @@ static inline int reassemble_22(int as22)
void *module_alloc(unsigned long size)
{
- if (size == 0)
- return NULL;
- return vmalloc(size);
+ /* using RWX means less protection for modules, but it's
+ * easier than trying to map the text, data, init_text and
+ * init_data correctly */
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL | __GFP_HIGHMEM,
+ PAGE_KERNEL_RWX, NUMA_NO_NODE,
+ __builtin_return_address(0));
}
#ifndef CONFIG_64BIT
@@ -256,9 +302,20 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
+ kfree(mod->arch.section);
+ mod->arch.section = NULL;
+
vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
+}
+
+/* Additional bytes needed in front of individual sections */
+unsigned int arch_mod_section_prepend(struct module *mod,
+ unsigned int section)
+{
+ /* size needed for all stubs of this section (including
+ * one additional for correct alignment of the stubs) */
+ return (mod->arch.section[section].stub_entries + 1)
+ * sizeof(struct stub_entry);
}
#define CONST
@@ -267,12 +324,18 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
CONST char *secstrings,
struct module *me)
{
- unsigned long gots = 0, fdescs = 0, stubs = 0, init_stubs = 0;
+ unsigned long gots = 0, fdescs = 0, len;
unsigned int i;
+ len = hdr->e_shnum * sizeof(me->arch.section[0]);
+ me->arch.section = kzalloc(len, GFP_KERNEL);
+ if (!me->arch.section)
+ return -ENOMEM;
+
for (i = 1; i < hdr->e_shnum; i++) {
- const Elf_Rela *rels = (void *)hdr + sechdrs[i].sh_offset;
+ const Elf_Rela *rels = (void *)sechdrs[i].sh_addr;
unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
+ unsigned int count, s;
if (strncmp(secstrings + sechdrs[i].sh_name,
".PARISC.unwind", 14) == 0)
@@ -288,11 +351,23 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
*/
gots += count_gots(rels, nrels);
fdescs += count_fdescs(rels, nrels);
- if(strncmp(secstrings + sechdrs[i].sh_name,
- ".rela.init", 10) == 0)
- init_stubs += count_stubs(rels, nrels);
- else
- stubs += count_stubs(rels, nrels);
+
+ /* XXX: By sorting the relocs and finding duplicate entries
+ * we could reduce the number of necessary stubs and save
+ * some memory. */
+ count = count_stubs(rels, nrels);
+ if (!count)
+ continue;
+
+ /* so we need relocation stubs. reserve necessary memory. */
+ /* sh_info gives the section for which we need to add stubs. */
+ s = sechdrs[i].sh_info;
+
+ /* each code section should only have one relocation section */
+ WARN_ON(me->arch.section[s].stub_entries);
+
+ /* store number of stubs we need for this section */
+ me->arch.section[s].stub_entries += count;
}
/* align things a bit */
@@ -304,18 +379,8 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
me->arch.fdesc_offset = me->core_size;
me->core_size += fdescs * sizeof(Elf_Fdesc);
- me->core_size = ALIGN(me->core_size, 16);
- me->arch.stub_offset = me->core_size;
- me->core_size += stubs * sizeof(struct stub_entry);
-
- me->init_size = ALIGN(me->init_size, 16);
- me->arch.init_stub_offset = me->init_size;
- me->init_size += init_stubs * sizeof(struct stub_entry);
-
me->arch.got_max = gots;
me->arch.fdesc_max = fdescs;
- me->arch.stub_max = stubs;
- me->arch.init_stub_max = init_stubs;
return 0;
}
@@ -378,23 +443,28 @@ enum elf_stub_type {
};
static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
- enum elf_stub_type stub_type, int init_section)
+ enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec)
{
- unsigned long i;
struct stub_entry *stub;
-
- if(init_section) {
- i = me->arch.init_stub_count++;
- BUG_ON(me->arch.init_stub_count > me->arch.init_stub_max);
- stub = me->module_init + me->arch.init_stub_offset +
- i * sizeof(struct stub_entry);
- } else {
- i = me->arch.stub_count++;
- BUG_ON(me->arch.stub_count > me->arch.stub_max);
- stub = me->module_core + me->arch.stub_offset +
- i * sizeof(struct stub_entry);
+ int __maybe_unused d;
+
+ /* initialize stub_offset to point in front of the section */
+ if (!me->arch.section[targetsec].stub_offset) {
+ loc0 -= (me->arch.section[targetsec].stub_entries + 1) *
+ sizeof(struct stub_entry);
+ /* get correct alignment for the stubs */
+ loc0 = ALIGN(loc0, sizeof(struct stub_entry));
+ me->arch.section[targetsec].stub_offset = loc0;
}
+ /* get address of stub entry */
+ stub = (void *) me->arch.section[targetsec].stub_offset;
+ me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry);
+
+ /* do not write outside available stub area */
+ BUG_ON(0 == me->arch.section[targetsec].stub_entries--);
+
+
#ifndef CONFIG_64BIT
/* for 32-bit the stub looks like this:
* ldil L'XXX,%r1
@@ -430,12 +500,19 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
*/
switch (stub_type) {
case ELF_STUB_GOT:
- stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */
+ d = get_got(me, value, addend);
+ if (d <= 15) {
+ /* Format 5 */
+ stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp */
+ stub->insns[0] |= low_sign_unext(d, 5) << 16;
+ } else {
+ /* Format 3 */
+ stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */
+ stub->insns[0] |= reassemble_16a(d);
+ }
stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */
stub->insns[2] = 0xe820d000; /* bve (%r1) */
stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */
-
- stub->insns[0] |= reassemble_14(get_got(me, value, addend) & 0x3fff);
break;
case ELF_STUB_MILLI:
stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
@@ -461,18 +538,6 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
return (Elf_Addr)stub;
}
-int apply_relocate(Elf_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- /* parisc should not need this ... */
- printk(KERN_ERR "module %s: RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
#ifndef CONFIG_64BIT
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
@@ -487,15 +552,19 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
Elf32_Addr val;
Elf32_Sword addend;
Elf32_Addr dot;
+ Elf_Addr loc0;
+ unsigned int targetsec = sechdrs[relsec].sh_info;
//unsigned long dp = (unsigned long)$global$;
register unsigned long dp asm ("r27");
DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
+ targetsec);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
- loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ loc = (void *)sechdrs[targetsec].sh_addr
+ rel[i].r_offset;
+ /* This is the start of the target section */
+ loc0 = sechdrs[targetsec].sh_addr;
/* This is the symbol it is referring to */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
@@ -567,19 +636,32 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
break;
case R_PARISC_PCREL17F:
/* 17-bit PC relative address */
- val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc));
+ /* calculate direct call offset */
+ val += addend;
val = (val - dot - 8)/4;
- CHECK_RELOC(val, 17)
+ if (!RELOC_REACHABLE(val, 17)) {
+ /* direct distance too far, create
+ * stub entry instead */
+ val = get_stub(me, sym->st_value, addend,
+ ELF_STUB_DIRECT, loc0, targetsec);
+ val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 17);
+ }
*loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
break;
case R_PARISC_PCREL22F:
/* 22-bit PC relative address; only defined for pa20 */
- val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc));
- DEBUGP("STUB FOR %s loc %lx+%lx at %lx\n",
- strtab + sym->st_name, (unsigned long)loc, addend,
- val)
+ /* calculate direct call offset */
+ val += addend;
val = (val - dot - 8)/4;
- CHECK_RELOC(val, 22);
+ if (!RELOC_REACHABLE(val, 22)) {
+ /* direct distance too far, create
+ * stub entry instead */
+ val = get_stub(me, sym->st_value, addend,
+ ELF_STUB_DIRECT, loc0, targetsec);
+ val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 22);
+ }
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
break;
@@ -608,13 +690,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
Elf64_Addr val;
Elf64_Sxword addend;
Elf64_Addr dot;
+ Elf_Addr loc0;
+ unsigned int targetsec = sechdrs[relsec].sh_info;
DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
+ targetsec);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
- loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ loc = (void *)sechdrs[targetsec].sh_addr
+ rel[i].r_offset;
+ /* This is the start of the target section */
+ loc0 = sechdrs[targetsec].sh_addr;
/* This is the symbol it is referring to */
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info);
@@ -670,42 +756,40 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
DEBUGP("PCREL22F Symbol %s loc %p val %lx\n",
strtab + sym->st_name,
loc, val);
+ val += addend;
/* can we reach it locally? */
- if(!in_local_section(me, (void *)val, (void *)dot)) {
-
- if (in_local(me, (void *)val))
- /* this is the case where the
- * symbol is local to the
- * module, but in a different
- * section, so stub the jump
- * in case it's more than 22
- * bits away */
- val = get_stub(me, val, addend, ELF_STUB_DIRECT,
- in_init(me, loc));
- else if (strncmp(strtab + sym->st_name, "$$", 2)
+ if (in_local(me, (void *)val)) {
+ /* this is the case where the symbol is local
+ * to the module, but in a different section,
+ * so stub the jump in case it's more than 22
+ * bits away */
+ val = (val - dot - 8)/4;
+ if (!RELOC_REACHABLE(val, 22)) {
+ /* direct distance too far, create
+ * stub entry instead */
+ val = get_stub(me, sym->st_value,
+ addend, ELF_STUB_DIRECT,
+ loc0, targetsec);
+ } else {
+ /* Ok, we can reach it directly. */
+ val = sym->st_value;
+ val += addend;
+ }
+ } else {
+ val = sym->st_value;
+ if (strncmp(strtab + sym->st_name, "$$", 2)
== 0)
val = get_stub(me, val, addend, ELF_STUB_MILLI,
- in_init(me, loc));
+ loc0, targetsec);
else
val = get_stub(me, val, addend, ELF_STUB_GOT,
- in_init(me, loc));
+ loc0, targetsec);
}
DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n",
strtab + sym->st_name, loc, sym->st_value,
addend, val);
- /* FIXME: local symbols work as long as the
- * core and init pieces aren't separated too
- * far. If this is ever broken, you will trip
- * the check below. The way to fix it would
- * be to generate local stubs to go between init
- * and core */
- if((Elf64_Sxword)(val - dot - 8) > 0x800000 -1 ||
- (Elf64_Sxword)(val - dot - 8) < -0x800000) {
- printk(KERN_ERR "Module %s, symbol %s is out of range for PCREL22F relocation\n",
- me->name, strtab + sym->st_name);
- return -ENOEXEC;
- }
val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 22);
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
break;
case R_PARISC_DIR64:
@@ -792,12 +876,8 @@ int module_finalize(const Elf_Ehdr *hdr,
addr = (u32 *)entry->addr;
printk("INSNS: %x %x %x %x\n",
addr[0], addr[1], addr[2], addr[3]);
- printk("stubs used %ld, stubs max %ld\n"
- "init_stubs used %ld, init stubs max %ld\n"
- "got entries used %ld, gots max %ld\n"
+ printk("got entries used %ld, gots max %ld\n"
"fdescs used %ld, fdescs max %ld\n",
- me->arch.stub_count, me->arch.stub_max,
- me->arch.init_stub_count, me->arch.init_stub_max,
me->arch.got_count, me->arch.got_max,
me->arch.fdesc_count, me->arch.fdesc_max);
#endif
@@ -808,7 +888,7 @@ int module_finalize(const Elf_Ehdr *hdr,
* ourselves */
for (i = 1; i < hdr->e_shnum; i++) {
if(sechdrs[i].sh_type == SHT_SYMTAB
- && (sechdrs[i].sh_type & SHF_ALLOC)) {
+ && (sechdrs[i].sh_flags & SHF_ALLOC)) {
int strindex = sechdrs[i].sh_link;
/* FIXME: AWFUL HACK
* The cast is to drop the const from
@@ -827,7 +907,10 @@ int module_finalize(const Elf_Ehdr *hdr,
me->name, me->arch.got_count, MAX_GOTS);
return -EINVAL;
}
-
+
+ kfree(me->arch.section);
+ me->arch.section = NULL;
+
/* no symbol table */
if(symhdr == NULL)
return 0;
@@ -852,11 +935,10 @@ int module_finalize(const Elf_Ehdr *hdr,
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
DEBUGP("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
- return module_bug_finalize(hdr, sechdrs, me);
+ return 0;
}
void module_arch_cleanup(struct module *mod)
{
deregister_unwind_table(mod);
- module_bug_cleanup(mod);
}
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 5901092e019..b743a80eaba 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -85,7 +85,7 @@ ENTRY(flush_tlb_all_local)
LDREG ITLB_OFF_COUNT(%r1), %arg2
LDREG ITLB_LOOP(%r1), %arg3
- ADDIB= -1, %arg3, fitoneloop /* Preadjust and test */
+ addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
copy %arg0, %r28 /* Init base addr */
@@ -95,14 +95,14 @@ fitmanyloop: /* Loop if LOOP >= 2 */
copy %arg2, %r29 /* Init middle loop count */
fitmanymiddle: /* Loop if LOOP >= 2 */
- ADDIB> -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
+ addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
pitlbe 0(%sr1, %r28)
pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
- ADDIB> -1, %r29, fitmanymiddle /* Middle loop decr */
+ addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */
movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
- ADDIB<=,n -1, %r22, fitdone /* Outer loop count decr */
+ addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
fitoneloop: /* Loop if LOOP = 1 */
mtsp %r20, %sr1
@@ -110,10 +110,10 @@ fitoneloop: /* Loop if LOOP = 1 */
copy %arg2, %r29 /* init middle loop count */
fitonemiddle: /* Loop if LOOP = 1 */
- ADDIB> -1, %r29, fitonemiddle /* Middle loop count decr */
+ addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
- ADDIB> -1, %r22, fitoneloop /* Outer loop count decr */
+ addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
add %r21, %r20, %r20 /* increment space */
fitdone:
@@ -128,7 +128,7 @@ fitdone:
LDREG DTLB_OFF_COUNT(%r1), %arg2
LDREG DTLB_LOOP(%r1), %arg3
- ADDIB= -1, %arg3, fdtoneloop /* Preadjust and test */
+ addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
copy %arg0, %r28 /* Init base addr */
@@ -138,14 +138,14 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
copy %arg2, %r29 /* Init middle loop count */
fdtmanymiddle: /* Loop if LOOP >= 2 */
- ADDIB> -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
+ addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
pdtlbe 0(%sr1, %r28)
pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
- ADDIB> -1, %r29, fdtmanymiddle /* Middle loop decr */
+ addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */
movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
- ADDIB<=,n -1, %r22,fdtdone /* Outer loop count decr */
+ addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
fdtoneloop: /* Loop if LOOP = 1 */
mtsp %r20, %sr1
@@ -153,10 +153,10 @@ fdtoneloop: /* Loop if LOOP = 1 */
copy %arg2, %r29 /* init middle loop count */
fdtonemiddle: /* Loop if LOOP = 1 */
- ADDIB> -1, %r29, fdtonemiddle /* Middle loop count decr */
+ addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
- ADDIB> -1, %r22, fdtoneloop /* Outer loop count decr */
+ addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
add %r21, %r20, %r20 /* increment space */
@@ -199,7 +199,6 @@ ENTRY(flush_instruction_cache_local)
.callinfo NO_CALLS
.entry
- mtsp %r0, %sr1
load32 cache_info, %r1
/* Flush Instruction Cache */
@@ -208,19 +207,46 @@ ENTRY(flush_instruction_cache_local)
LDREG ICACHE_STRIDE(%r1), %arg1
LDREG ICACHE_COUNT(%r1), %arg2
LDREG ICACHE_LOOP(%r1), %arg3
- rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
- ADDIB= -1, %arg3, fioneloop /* Preadjust and test */
+ rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
+ mtsp %r0, %sr1
+ addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
fimanyloop: /* Loop if LOOP >= 2 */
- ADDIB> -1, %r31, fimanyloop /* Adjusted inner loop decr */
+ addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
fice %r0(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
- ADDIB<=,n -1, %arg2, fisync /* Outer loop decr */
+ addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
fioneloop: /* Loop if LOOP = 1 */
- ADDIB> -1, %arg2, fioneloop /* Outer loop count decr */
+ /* Some implementations may flush with a single fice instruction */
+ cmpib,COND(>>=),n 15, %arg2, fioneloop2
+
+fioneloop1:
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0)
+ addib,COND(>) -16, %arg2, fioneloop1
+ fice,m %arg1(%sr1, %arg0)
+
+ /* Check if done */
+ cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
+
+fioneloop2:
+ addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
fisync:
@@ -240,8 +266,7 @@ ENTRY(flush_data_cache_local)
.callinfo NO_CALLS
.entry
- mtsp %r0, %sr1
- load32 cache_info, %r1
+ load32 cache_info, %r1
/* Flush Data Cache */
@@ -249,19 +274,46 @@ ENTRY(flush_data_cache_local)
LDREG DCACHE_STRIDE(%r1), %arg1
LDREG DCACHE_COUNT(%r1), %arg2
LDREG DCACHE_LOOP(%r1), %arg3
- rsm PSW_SM_I, %r22
- ADDIB= -1, %arg3, fdoneloop /* Preadjust and test */
+ rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
+ mtsp %r0, %sr1
+ addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
fdmanyloop: /* Loop if LOOP >= 2 */
- ADDIB> -1, %r31, fdmanyloop /* Adjusted inner loop decr */
+ addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
fdce %r0(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
- ADDIB<=,n -1, %arg2, fdsync /* Outer loop decr */
+ addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
fdoneloop: /* Loop if LOOP = 1 */
- ADDIB> -1, %arg2, fdoneloop /* Outer loop count decr */
+ /* Some implementations may flush with a single fdce instruction */
+ cmpib,COND(>>=),n 15, %arg2, fdoneloop2
+
+fdoneloop1:
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0)
+ addib,COND(>) -16, %arg2, fdoneloop1
+ fdce,m %arg1(%sr1, %arg0)
+
+ /* Check if done */
+ cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
+
+fdoneloop2:
+ addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
fdsync:
@@ -277,7 +329,104 @@ ENDPROC(flush_data_cache_local)
.align 16
-ENTRY(copy_user_page_asm)
+/* Macros to serialize TLB purge operations on SMP. */
+
+ .macro tlb_lock la,flags,tmp
+#ifdef CONFIG_SMP
+ ldil L%pa_tlb_lock,%r1
+ ldo R%pa_tlb_lock(%r1),\la
+ rsm PSW_SM_I,\flags
+1: LDCW 0(\la),\tmp
+ cmpib,<>,n 0,\tmp,3f
+2: ldw 0(\la),\tmp
+ cmpb,<> %r0,\tmp,1b
+ nop
+ b,n 2b
+3:
+#endif
+ .endm
+
+ .macro tlb_unlock la,flags,tmp
+#ifdef CONFIG_SMP
+ ldi 1,\tmp
+ stw \tmp,0(\la)
+ mtsm \flags
+#endif
+ .endm
+
+/* Clear page using kernel mapping. */
+
+ENTRY(clear_page_asm)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+#ifdef CONFIG_64BIT
+
+ /* Unroll the loop. */
+ ldi (PAGE_SIZE / 128), %r1
+
+1:
+ std %r0, 0(%r26)
+ std %r0, 8(%r26)
+ std %r0, 16(%r26)
+ std %r0, 24(%r26)
+ std %r0, 32(%r26)
+ std %r0, 40(%r26)
+ std %r0, 48(%r26)
+ std %r0, 56(%r26)
+ std %r0, 64(%r26)
+ std %r0, 72(%r26)
+ std %r0, 80(%r26)
+ std %r0, 88(%r26)
+ std %r0, 96(%r26)
+ std %r0, 104(%r26)
+ std %r0, 112(%r26)
+ std %r0, 120(%r26)
+
+ /* Note reverse branch hint for addib is taken. */
+ addib,COND(>),n -1, %r1, 1b
+ ldo 128(%r26), %r26
+
+#else
+
+ /*
+ * Note that until (if) we start saving the full 64-bit register
+ * values on interrupt, we can't use std on a 32 bit kernel.
+ */
+ ldi (PAGE_SIZE / 64), %r1
+
+1:
+ stw %r0, 0(%r26)
+ stw %r0, 4(%r26)
+ stw %r0, 8(%r26)
+ stw %r0, 12(%r26)
+ stw %r0, 16(%r26)
+ stw %r0, 20(%r26)
+ stw %r0, 24(%r26)
+ stw %r0, 28(%r26)
+ stw %r0, 32(%r26)
+ stw %r0, 36(%r26)
+ stw %r0, 40(%r26)
+ stw %r0, 44(%r26)
+ stw %r0, 48(%r26)
+ stw %r0, 52(%r26)
+ stw %r0, 56(%r26)
+ stw %r0, 60(%r26)
+
+ addib,COND(>),n -1, %r1, 1b
+ ldo 64(%r26), %r26
+#endif
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+ENDPROC(clear_page_asm)
+
+/* Copy page using kernel mapping. */
+
+ENTRY(copy_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -285,18 +434,14 @@ ENTRY(copy_user_page_asm)
#ifdef CONFIG_64BIT
/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
* Unroll the loop by hand and arrange insn appropriately.
- * GCC probably can do this just as well.
+ * Prefetch doesn't improve performance on rp3440.
+ * GCC probably can do this just as well...
*/
- ldd 0(%r25), %r19
ldi (PAGE_SIZE / 128), %r1
- ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
- ldw 128(%r25), %r0 /* prefetch 2 */
-
-1: ldd 8(%r25), %r20
- ldw 192(%r25), %r0 /* prefetch 3 */
- ldw 256(%r25), %r0 /* prefetch 4 */
+1: ldd 0(%r25), %r19
+ ldd 8(%r25), %r20
ldd 16(%r25), %r21
ldd 24(%r25), %r22
@@ -330,20 +475,16 @@ ENTRY(copy_user_page_asm)
ldd 112(%r25), %r21
ldd 120(%r25), %r22
+ ldo 128(%r25), %r25
std %r19, 96(%r26)
std %r20, 104(%r26)
- ldo 128(%r25), %r25
std %r21, 112(%r26)
std %r22, 120(%r26)
- ldo 128(%r26), %r26
- /* conditional branches nullify on forward taken branch, and on
- * non-taken backward branch. Note that .+4 is a backwards branch.
- * The ldd should only get executed if the branch is taken.
- */
- ADDIB>,n -1, %r1, 1b /* bundle 10 */
- ldd 0(%r25), %r19 /* start next loads */
+ /* Note reverse branch hint for addib is taken. */
+ addib,COND(>),n -1, %r1, 1b
+ ldo 128(%r26), %r26
#else
@@ -391,7 +532,7 @@ ENTRY(copy_user_page_asm)
stw %r21, 56(%r26)
stw %r22, 60(%r26)
ldo 64(%r26), %r26
- ADDIB>,n -1, %r1, 1b
+ addib,COND(>),n -1, %r1, 1b
ldw 0(%r25), %r19
#endif
bv %r0(%r2)
@@ -399,7 +540,7 @@ ENTRY(copy_user_page_asm)
.exit
.procend
-ENDPROC(copy_user_page_asm)
+ENDPROC(copy_page_asm)
/*
* NOTE: Code in clear_user_page has a hard coded dependency on the
@@ -422,7 +563,14 @@ ENDPROC(copy_user_page_asm)
* %r23 physical page (shifted for tlb insert) of "from" translation
*/
-#if 0
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
+ .macro convert_phys_for_tlb_insert20 phys
+ extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
+#if _PAGE_SIZE_ENCODING_DEFAULT
+ depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
+#endif
+ .endm
/*
* We can't do this since copy_user_page is used to bring in
@@ -435,6 +583,7 @@ ENDPROC(copy_user_page_asm)
* use it if more information is passed into copy_user_page().
* Have to do some measurements to see if it is worthwhile to
* lobby for such a change.
+ *
*/
ENTRY(copy_user_page_asm)
@@ -442,34 +591,104 @@ ENTRY(copy_user_page_asm)
.callinfo NO_CALLS
.entry
+ /* Convert virtual `to' and `from' addresses to physical addresses.
+ Move `from' physical address to non shadowed register. */
ldil L%(__PAGE_OFFSET), %r1
sub %r26, %r1, %r26
- sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
+ sub %r25, %r1, %r23
ldil L%(TMPALIAS_MAP_START), %r28
- /* FIXME for different page sizes != 4k */
#ifdef CONFIG_64BIT
- extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
- extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
- depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,12, %r28 /* Clear any offset bits */
+#if (TMPALIAS_MAP_START >= 0x80000000)
+ depdi 0, 31,32, %r28 /* clear any sign extension */
+#endif
+ convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
+ convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
+ depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
copy %r28, %r29
depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,12, %r28 /* Clear any offset bits */
+ depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
copy %r28, %r29
depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
#endif
/* Purge any old translations */
+#ifdef CONFIG_PA20
+ pdtlb,l 0(%r28)
+ pdtlb,l 0(%r29)
+#else
+ tlb_lock %r20,%r21,%r22
pdtlb 0(%r28)
pdtlb 0(%r29)
+ tlb_unlock %r20,%r21,%r22
+#endif
+
+#ifdef CONFIG_64BIT
+ /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
+ * Unroll the loop by hand and arrange insn appropriately.
+ * GCC probably can do this just as well.
+ */
+
+ ldd 0(%r29), %r19
+ ldi (PAGE_SIZE / 128), %r1
+
+1: ldd 8(%r29), %r20
+
+ ldd 16(%r29), %r21
+ ldd 24(%r29), %r22
+ std %r19, 0(%r28)
+ std %r20, 8(%r28)
+
+ ldd 32(%r29), %r19
+ ldd 40(%r29), %r20
+ std %r21, 16(%r28)
+ std %r22, 24(%r28)
+
+ ldd 48(%r29), %r21
+ ldd 56(%r29), %r22
+ std %r19, 32(%r28)
+ std %r20, 40(%r28)
+
+ ldd 64(%r29), %r19
+ ldd 72(%r29), %r20
+ std %r21, 48(%r28)
+ std %r22, 56(%r28)
+
+ ldd 80(%r29), %r21
+ ldd 88(%r29), %r22
+ std %r19, 64(%r28)
+ std %r20, 72(%r28)
+
+ ldd 96(%r29), %r19
+ ldd 104(%r29), %r20
+ std %r21, 80(%r28)
+ std %r22, 88(%r28)
+
+ ldd 112(%r29), %r21
+ ldd 120(%r29), %r22
+ std %r19, 96(%r28)
+ std %r20, 104(%r28)
+
+ ldo 128(%r29), %r29
+ std %r21, 112(%r28)
+ std %r22, 120(%r28)
+ ldo 128(%r28), %r28
- ldi 64, %r1
+ /* conditional branches nullify on forward taken branch, and on
+ * non-taken backward branch. Note that .+4 is a backwards branch.
+ * The ldd should only get executed if the branch is taken.
+ */
+ addib,COND(>),n -1, %r1, 1b /* bundle 10 */
+ ldd 0(%r29), %r19 /* start next loads */
+
+#else
+ ldi (PAGE_SIZE / 64), %r1
/*
* This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
@@ -480,9 +699,7 @@ ENTRY(copy_user_page_asm)
* use ldd/std on a 32 bit kernel.
*/
-
-1:
- ldw 0(%r29), %r19
+1: ldw 0(%r29), %r19
ldw 4(%r29), %r20
ldw 8(%r29), %r21
ldw 12(%r29), %r22
@@ -515,8 +732,10 @@ ENTRY(copy_user_page_asm)
stw %r21, 56(%r28)
stw %r22, 60(%r28)
ldo 64(%r28), %r28
- ADDIB> -1, %r1,1b
+
+ addib,COND(>) -1, %r1,1b
ldo 64(%r29), %r29
+#endif
bv %r0(%r2)
nop
@@ -524,9 +743,8 @@ ENTRY(copy_user_page_asm)
.procend
ENDPROC(copy_user_page_asm)
-#endif
-ENTRY(__clear_user_page_asm)
+ENTRY(clear_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -537,20 +755,25 @@ ENTRY(__clear_user_page_asm)
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
- /* FIXME: page size dependend */
#endif
- extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
+ convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,12, %r28 /* Clear any offset bits */
+ depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,12, %r28 /* Clear any offset bits */
+ depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
/* Purge any old translation */
+#ifdef CONFIG_PA20
+ pdtlb,l 0(%r28)
+#else
+ tlb_lock %r20,%r21,%r22
pdtlb 0(%r28)
+ tlb_unlock %r20,%r21,%r22
+#endif
#ifdef CONFIG_64BIT
ldi (PAGE_SIZE / 128), %r1
@@ -574,14 +797,13 @@ ENTRY(__clear_user_page_asm)
std %r0, 104(%r28)
std %r0, 112(%r28)
std %r0, 120(%r28)
- ADDIB> -1, %r1, 1b
+ addib,COND(>) -1, %r1, 1b
ldo 128(%r28), %r28
#else /* ! CONFIG_64BIT */
ldi (PAGE_SIZE / 64), %r1
-1:
- stw %r0, 0(%r28)
+1: stw %r0, 0(%r28)
stw %r0, 4(%r28)
stw %r0, 8(%r28)
stw %r0, 12(%r28)
@@ -597,7 +819,7 @@ ENTRY(__clear_user_page_asm)
stw %r0, 52(%r28)
stw %r0, 56(%r28)
stw %r0, 60(%r28)
- ADDIB> -1, %r1, 1b
+ addib,COND(>) -1, %r1, 1b
ldo 64(%r28), %r28
#endif /* CONFIG_64BIT */
@@ -606,95 +828,163 @@ ENTRY(__clear_user_page_asm)
.exit
.procend
-ENDPROC(__clear_user_page_asm)
+ENDPROC(clear_user_page_asm)
-ENTRY(flush_kernel_dcache_page_asm)
+ENTRY(flush_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
+ ldil L%(TMPALIAS_MAP_START), %r28
+#ifdef CONFIG_64BIT
+#if (TMPALIAS_MAP_START >= 0x80000000)
+ depdi 0, 31,32, %r28 /* clear any sign extension */
+#endif
+ convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
+ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
+#else
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
+#endif
+
+ /* Purge any old translation */
+
+#ifdef CONFIG_PA20
+ pdtlb,l 0(%r28)
+#else
+ tlb_lock %r20,%r21,%r22
+ pdtlb 0(%r28)
+ tlb_unlock %r20,%r21,%r22
+#endif
+
ldil L%dcache_stride, %r1
- ldw R%dcache_stride(%r1), %r23
+ ldw R%dcache_stride(%r1), r31
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
- add %r26, %r25, %r25
- sub %r25, %r23, %r25
+ add %r28, %r25, %r25
+ sub %r25, r31, %r25
+
+
+1: fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ fdc,m r31(%r28)
+ cmpb,COND(<<) %r28, %r25,1b
+ fdc,m r31(%r28)
+ sync
-1: fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- fdc,m %r23(%r26)
- CMPB<< %r26, %r25,1b
- fdc,m %r23(%r26)
+#ifdef CONFIG_PA20
+ pdtlb,l 0(%r25)
+#else
+ tlb_lock %r20,%r21,%r22
+ pdtlb 0(%r25)
+ tlb_unlock %r20,%r21,%r22
+#endif
- sync
bv %r0(%r2)
nop
.exit
.procend
-ENDPROC(flush_kernel_dcache_page_asm)
-
-ENTRY(flush_user_dcache_page)
+ENDPROC(flush_dcache_page_asm)
+
+ENTRY(flush_icache_page_asm)
.proc
.callinfo NO_CALLS
.entry
- ldil L%dcache_stride, %r1
- ldw R%dcache_stride(%r1), %r23
-
+ ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
- depdi,z 1,63-PAGE_SHIFT,1, %r25
+#if (TMPALIAS_MAP_START >= 0x80000000)
+ depdi 0, 31,32, %r28 /* clear any sign extension */
+#endif
+ convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
+ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
#else
- depwi,z 1,31-PAGE_SHIFT,1, %r25
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
#endif
- add %r26, %r25, %r25
- sub %r25, %r23, %r25
+ /* Purge any old translation */
-1: fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- fdc,m %r23(%sr3, %r26)
- CMPB<< %r26, %r25,1b
- fdc,m %r23(%sr3, %r26)
+#ifdef CONFIG_PA20
+ pitlb,l %r0(%sr4,%r28)
+#else
+ tlb_lock %r20,%r21,%r22
+ pitlb (%sr4,%r28)
+ tlb_unlock %r20,%r21,%r22
+#endif
+
+ ldil L%icache_stride, %r1
+ ldw R%icache_stride(%r1), %r31
+
+#ifdef CONFIG_64BIT
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r28, %r25, %r25
+ sub %r25, %r31, %r25
+
+
+ /* fic only has the type 26 form on PA1.1, requiring an
+ * explicit space specification, so use %sr4 */
+1: fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ fic,m %r31(%sr4,%r28)
+ cmpb,COND(<<) %r28, %r25,1b
+ fic,m %r31(%sr4,%r28)
sync
+
+#ifdef CONFIG_PA20
+ pitlb,l %r0(%sr4,%r25)
+#else
+ tlb_lock %r20,%r21,%r22
+ pitlb (%sr4,%r25)
+ tlb_unlock %r20,%r21,%r22
+#endif
+
bv %r0(%r2)
nop
.exit
.procend
-ENDPROC(flush_user_dcache_page)
+ENDPROC(flush_icache_page_asm)
-ENTRY(flush_user_icache_page)
+ENTRY(flush_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -711,23 +1001,23 @@ ENTRY(flush_user_icache_page)
sub %r25, %r23, %r25
-1: fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- fic,m %r23(%sr3, %r26)
- CMPB<< %r26, %r25,1b
- fic,m %r23(%sr3, %r26)
+1: fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ cmpb,COND(<<) %r26, %r25,1b
+ fdc,m %r23(%r26)
sync
bv %r0(%r2)
@@ -735,10 +1025,9 @@ ENTRY(flush_user_icache_page)
.exit
.procend
-ENDPROC(flush_user_icache_page)
-
+ENDPROC(flush_kernel_dcache_page_asm)
-ENTRY(purge_kernel_dcache_page)
+ENTRY(purge_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -769,7 +1058,7 @@ ENTRY(purge_kernel_dcache_page)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
- CMPB<< %r26, %r25, 1b
+ cmpb,COND(<<) %r26, %r25, 1b
pdc,m %r23(%r26)
sync
@@ -778,75 +1067,9 @@ ENTRY(purge_kernel_dcache_page)
.exit
.procend
-ENDPROC(purge_kernel_dcache_page)
-
-#if 0
- /* Currently not used, but it still is a possible alternate
- * solution.
- */
-
-ENTRY(flush_alias_page)
- .proc
- .callinfo NO_CALLS
- .entry
-
- tophys_r1 %r26
-
- ldil L%(TMPALIAS_MAP_START), %r28
-#ifdef CONFIG_64BIT
- extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
- depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
- depdi 0, 63,12, %r28 /* Clear any offset bits */
-#else
- extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
- depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
- depwi 0, 31,12, %r28 /* Clear any offset bits */
-#endif
-
- /* Purge any old translation */
-
- pdtlb 0(%r28)
-
- ldil L%dcache_stride, %r1
- ldw R%dcache_stride(%r1), %r23
-
-#ifdef CONFIG_64BIT
- depdi,z 1, 63-PAGE_SHIFT,1, %r29
-#else
- depwi,z 1, 31-PAGE_SHIFT,1, %r29
-#endif
- add %r28, %r29, %r29
- sub %r29, %r23, %r29
-
-1: fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- fdc,m %r23(%r28)
- CMPB<< %r28, %r29, 1b
- fdc,m %r23(%r28)
-
- sync
- bv %r0(%r2)
- nop
- .exit
-
- .procend
-#endif
-
- .export flush_user_dcache_range_asm
+ENDPROC(purge_kernel_dcache_page_asm)
-flush_user_dcache_range_asm:
+ENTRY(flush_user_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
@@ -856,7 +1079,7 @@ flush_user_dcache_range_asm:
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
-1: CMPB<<,n %r26, %r25, 1b
+1: cmpb,COND(<<),n %r26, %r25, 1b
fdc,m %r23(%sr3, %r26)
sync
@@ -865,7 +1088,7 @@ flush_user_dcache_range_asm:
.exit
.procend
-ENDPROC(flush_alias_page)
+ENDPROC(flush_user_dcache_range_asm)
ENTRY(flush_kernel_dcache_range_asm)
.proc
@@ -877,7 +1100,7 @@ ENTRY(flush_kernel_dcache_range_asm)
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
-1: CMPB<<,n %r26, %r25,1b
+1: cmpb,COND(<<),n %r26, %r25,1b
fdc,m %r23(%r26)
sync
@@ -899,7 +1122,7 @@ ENTRY(flush_user_icache_range_asm)
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
-1: CMPB<<,n %r26, %r25,1b
+1: cmpb,COND(<<),n %r26, %r25,1b
fic,m %r23(%sr3, %r26)
sync
@@ -942,7 +1165,7 @@ ENTRY(flush_kernel_icache_page)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
- CMPB<< %r26, %r25, 1b
+ cmpb,COND(<<) %r26, %r25, 1b
fic,m %r23(%sr4, %r26)
sync
@@ -963,7 +1186,7 @@ ENTRY(flush_kernel_icache_range_asm)
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
-1: CMPB<<,n %r26, %r25, 1b
+1: cmpb,COND(<<),n %r26, %r25, 1b
fic,m %r23(%sr4, %r26)
sync
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7aca704e96f..568b2c61ea0 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -31,7 +31,7 @@
#include <linux/string.h>
EXPORT_SYMBOL(memset);
-#include <asm/atomic.h>
+#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
EXPORT_SYMBOL(__cmpxchg_u32);
@@ -44,7 +44,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
#endif
#include <asm/uaccess.h>
-EXPORT_SYMBOL(lstrncpy_from_user);
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);
@@ -69,11 +68,6 @@ EXPORT_SYMBOL(memcpy_toio);
EXPORT_SYMBOL(memcpy_fromio);
EXPORT_SYMBOL(memset_io);
-#include <asm/semaphore.h>
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down);
-
extern void $$divI(void);
extern void $$divU(void);
extern void $$remI(void);
@@ -126,11 +120,13 @@ extern void __ashrdi3(void);
extern void __ashldi3(void);
extern void __lshrdi3(void);
extern void __muldi3(void);
+extern void __ucmpdi2(void);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
@@ -157,3 +153,12 @@ EXPORT_SYMBOL($$dyncall);
EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(pfnnid_map);
#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
+
+/* from pacache.S -- needed for clear/copy_page */
+EXPORT_SYMBOL(clear_page_asm);
+EXPORT_SYMBOL(copy_page_asm);
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 9448d4e9114..d87d1c476d8 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -2,7 +2,7 @@
** PARISC 1.1 Dynamic DMA mapping support.
** This implementation is for PA-RISC platforms that do not support
** I/O TLBs (aka DMA address translation hardware).
-** See Documentation/DMA-mapping.txt for interface definitions.
+** See Documentation/DMA-API-HOWTO.txt for interface definitions.
**
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
@@ -18,14 +18,15 @@
*/
#include <linux/init.h>
+#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
+#include <linux/export.h>
#include <asm/cacheflush.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
@@ -90,12 +91,14 @@ static inline int map_pte_uncached(pte_t * pte,
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
+ unsigned long flags;
+
if (!pte_none(*pte))
printk(KERN_ERR "map_pte_uncached: page already exists\n");
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
- purge_tlb_start();
+ purge_tlb_start(flags);
pdtlb_kernel(orig_vaddr);
- purge_tlb_end();
+ purge_tlb_end(flags);
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
(*paddr_ptr) += PAGE_SIZE;
@@ -168,11 +171,13 @@ static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
+ unsigned long flags;
pte_t page = *pte;
+
pte_clear(&init_mm, vaddr, pte);
- purge_tlb_start();
+ purge_tlb_start(flags);
pdtlb_kernel(orig_vaddr);
- purge_tlb_end();
+ purge_tlb_end(flags);
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
pte++;
@@ -397,10 +402,9 @@ pcxl_dma_init(void)
"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
else {
struct proc_dir_entry* ent;
- ent = create_proc_entry("pcxl_dma", 0, proc_gsc_root);
- if (ent)
- ent->proc_fops = &proc_pcxl_dma_ops;
- else
+ ent = proc_create("pcxl_dma", 0, proc_gsc_root,
+ &proc_pcxl_dma_ops);
+ if (!ent)
printk(KERN_WARNING
"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
}
@@ -448,10 +452,7 @@ static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vad
static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
{
- if (direction == DMA_NONE) {
- printk(KERN_ERR "pa11_dma_map_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
- BUG();
- }
+ BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) addr, size);
return virt_to_phys(addr);
@@ -459,10 +460,7 @@ static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t siz
static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
- if (direction == DMA_NONE) {
- printk(KERN_ERR "pa11_dma_unmap_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
- BUG();
- }
+ BUG_ON(direction == DMA_NONE);
if (direction == DMA_TO_DEVICE)
return;
@@ -481,8 +479,7 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int n
{
int i;
- if (direction == DMA_NONE)
- BUG();
+ BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sglist++ ) {
unsigned long vaddr = sg_virt_addr(sglist);
@@ -497,8 +494,7 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in
{
int i;
- if (direction == DMA_NONE)
- BUG();
+ BUG_ON(direction == DMA_NONE);
if (direction == DMA_TO_DEVICE)
return;
@@ -512,16 +508,14 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in
static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
{
- if (direction == DMA_NONE)
- BUG();
+ BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
}
static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
{
- if (direction == DMA_NONE)
- BUG();
+ BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
}
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 507d0ac99f6..64f2764a8ce 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -1,5 +1,4 @@
-/* $Id: pci.c,v 1.6 2000/01/29 00:12:05 grundler Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -14,12 +13,9 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/slab.h>
#include <linux/types.h>
#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/cache.h> /* for L1_CACHE_BYTES */
#include <asm/superio.h>
#define DEBUG_RESOURCES 0
@@ -124,6 +120,10 @@ static int __init pcibios_init(void)
} else {
printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
}
+
+ /* Set the CLS for PCI as early as possible. */
+ pci_cache_line_size = pci_dfl_cache_line_size;
+
return 0;
}
@@ -139,11 +139,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
}
-char *pcibios_setup(char *str)
-{
- return str;
-}
-
/*
* Called by pci_set_master() - a driver interface.
*
@@ -172,7 +167,7 @@ void pcibios_set_master(struct pci_dev *dev)
** upper byte is PCI_LATENCY_TIMER.
*/
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
- (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
+ (0x80 << 8) | pci_cache_line_size);
}
@@ -194,58 +189,6 @@ void __init pcibios_init_bus(struct pci_bus *bus)
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
}
-/* called by drivers/pci/setup-bus.c:pci_setup_bridge(). */
-void __devinit pcibios_resource_to_bus(struct pci_dev *dev,
- struct pci_bus_region *region, struct resource *res)
-{
-#ifdef CONFIG_64BIT
- struct pci_hba_data *hba = HBA_DATA(dev->bus->bridge->platform_data);
-#endif
-
- if (res->flags & IORESOURCE_IO) {
- /*
- ** I/O space may see busnumbers here. Something
- ** in the form of 0xbbxxxx where bb is the bus num
- ** and xxxx is the I/O port space address.
- ** Remaining address translation are done in the
- ** PCI Host adapter specific code - ie dino_out8.
- */
- region->start = PCI_PORT_ADDR(res->start);
- region->end = PCI_PORT_ADDR(res->end);
- } else if (res->flags & IORESOURCE_MEM) {
- /* Convert MMIO addr to PCI addr (undo global virtualization) */
- region->start = PCI_BUS_ADDR(hba, res->start);
- region->end = PCI_BUS_ADDR(hba, res->end);
- }
-
- DBG_RES("pcibios_resource_to_bus(%02x %s [%lx,%lx])\n",
- dev->bus->number, res->flags & IORESOURCE_IO ? "IO" : "MEM",
- region->start, region->end);
-}
-
-void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
- struct pci_bus_region *region)
-{
-#ifdef CONFIG_64BIT
- struct pci_hba_data *hba = HBA_DATA(dev->bus->bridge->platform_data);
-#endif
-
- if (res->flags & IORESOURCE_MEM) {
- res->start = PCI_HOST_ADDR(hba, region->start);
- res->end = PCI_HOST_ADDR(hba, region->end);
- }
-
- if (res->flags & IORESOURCE_IO) {
- res->start = region->start;
- res->end = region->end;
- }
-}
-
-#ifdef CONFIG_HOTPLUG
-EXPORT_SYMBOL(pcibios_resource_to_bus);
-EXPORT_SYMBOL(pcibios_bus_to_resource);
-#endif
-
/*
* pcibios align resources() is called every time generic PCI code
* wants to generate a new address. The process of looking for
@@ -255,10 +198,10 @@ EXPORT_SYMBOL(pcibios_bus_to_resource);
* Since we are just checking candidates, don't use any fields other
* than res->start.
*/
-void pcibios_align_resource(void *data, struct resource *res,
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t alignment)
{
- resource_size_t mask, align;
+ resource_size_t mask, align, start = res->start;
DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n",
pci_name(((struct pci_dev *) data)),
@@ -270,13 +213,40 @@ void pcibios_align_resource(void *data, struct resource *res,
/* Align to largest of MIN or input size */
mask = max(alignment, align) - 1;
- res->start += mask;
- res->start &= ~mask;
+ start += mask;
+ start &= ~mask;
- /* The caller updates the end field, we don't. */
+ return start;
}
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ unsigned long prot;
+
+ /*
+ * I/O space can be accessed via normal processor loads and stores on
+ * this platform but for now we elect not to do this and portable
+ * drivers should not do this anyway.
+ */
+ if (mmap_state == pci_mmap_io)
+ return -EINVAL;
+
+ if (write_combine)
+ return -EINVAL;
+
+ /*
+ * Ignore write-combine; for now only return uncached mappings.
+ */
+ prot = pgprot_val(vma->vm_page_prot);
+ prot |= _PAGE_NO_CACHE;
+ vma->vm_page_prot = __pgprot(prot);
+
+ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
/*
* A driver is enabling the device. We make sure that all the appropriate
* bits are set to allow the device to operate as the driver is expecting.
@@ -287,23 +257,15 @@ void pcibios_align_resource(void *data, struct resource *res,
*/
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- u16 cmd;
- int idx;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
-
- for (idx = 0; idx < DEVICE_COUNT_RESOURCE; idx++) {
- struct resource *r = &dev->resource[idx];
+ int err;
+ u16 cmd, old_cmd;
- /* only setup requested resources */
- if (!(mask & (1<<idx)))
- continue;
+ err = pci_enable_resources(dev, mask);
+ if (err < 0)
+ return err;
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
cmd |= (PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
@@ -312,8 +274,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
if (dev->bus->bridge_ctl & PCI_BRIDGE_CTL_FAST_BACK)
cmd |= PCI_COMMAND_FAST_BACK;
#endif
- DBGC("PCIBIOS: Enabling device %s cmd 0x%04x\n", pci_name(dev), cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ if (cmd != old_cmd) {
+ dev_info(&dev->dev, "enabling SERR and PARITY (%04x -> %04x)\n",
+ old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
return 0;
}
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
index d47ba1aa825..3e04242de5a 100644
--- a/arch/parisc/kernel/pdc_chassis.c
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -30,11 +30,13 @@
#endif
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/cache.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <asm/pdc_chassis.h>
#include <asm/processor.h>
@@ -244,38 +246,38 @@ int pdc_chassis_send_status(int message)
#ifdef CONFIG_PDC_CHASSIS_WARN
#ifdef CONFIG_PROC_FS
-static int pdc_chassis_warn_pread(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int pdc_chassis_warn_show(struct seq_file *m, void *v)
{
- char *out = page;
- int len, ret;
unsigned long warn;
u32 warnreg;
- ret = pdc_chassis_warn(&warn);
- if (ret != PDC_OK)
+ if (pdc_chassis_warn(&warn) != PDC_OK)
return -EIO;
warnreg = (warn & 0xFFFFFFFF);
if ((warnreg >> 24) & 0xFF)
- out += sprintf(out, "Chassis component failure! (eg fan or PSU): 0x%.2x\n", ((warnreg >> 24) & 0xFF));
-
- out += sprintf(out, "Battery: %s\n", (warnreg & 0x04) ? "Low!" : "OK");
- out += sprintf(out, "Temp low: %s\n", (warnreg & 0x02) ? "Exceeded!" : "OK");
- out += sprintf(out, "Temp mid: %s\n", (warnreg & 0x01) ? "Exceeded!" : "OK");
-
- len = out - page - off;
- if (len < count) {
- *eof = 1;
- if (len <= 0) return 0;
- } else {
- len = count;
- }
- *start = page + off;
- return len;
+ seq_printf(m, "Chassis component failure! (eg fan or PSU): 0x%.2x\n",
+ (warnreg >> 24) & 0xFF);
+
+ seq_printf(m, "Battery: %s\n", (warnreg & 0x04) ? "Low!" : "OK");
+ seq_printf(m, "Temp low: %s\n", (warnreg & 0x02) ? "Exceeded!" : "OK");
+ seq_printf(m, "Temp mid: %s\n", (warnreg & 0x01) ? "Exceeded!" : "OK");
+ return 0;
+}
+
+static int pdc_chassis_warn_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pdc_chassis_warn_show, NULL);
}
+static const struct file_operations pdc_chassis_warn_fops = {
+ .open = pdc_chassis_warn_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int __init pdc_chassis_create_procfs(void)
{
unsigned long test;
@@ -290,8 +292,7 @@ static int __init pdc_chassis_create_procfs(void)
printk(KERN_INFO "Enabling PDC chassis warnings support v%s\n",
PDC_CHASSIS_VER);
- create_proc_read_entry("chassis", 0400, NULL, pdc_chassis_warn_pread,
- NULL);
+ proc_create("chassis", 0400, NULL, &pdc_chassis_warn_fops);
return 0;
}
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index ccb68090781..d5cae55195e 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -12,6 +12,7 @@
* Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2010 Guy Martin <gmsoft at tuxicoman.be>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -31,12 +32,11 @@
/*
* The PDC console is a simple console, which can be used for debugging
- * boot related problems on HP PA-RISC machines.
+ * boot related problems on HP PA-RISC machines. It is also useful when no
+ * other console works.
*
* This code uses the ROM (=PDC) based functions to read and write characters
* from and to PDC's boot path.
- * Since all character read from that path must be polled, this code never
- * can or will be a fully functional linux console.
*/
/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems.
@@ -50,9 +50,11 @@
#include <linux/init.h>
#include <linux/major.h>
#include <linux/tty.h>
+#include <asm/page.h> /* for PAGE0 */
#include <asm/pdc.h> /* for iodc_call() proto and friends */
-static spinlock_t pdc_console_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pdc_console_lock);
+static struct console pdc_cons;
static void pdc_console_write(struct console *co, const char *s, unsigned count)
{
@@ -85,12 +87,133 @@ static int pdc_console_setup(struct console *co, char *options)
#if defined(CONFIG_PDC_CONSOLE)
#include <linux/vt_kern.h>
+#include <linux/tty_flip.h>
+
+#define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
+
+static void pdc_console_poll(unsigned long unused);
+static DEFINE_TIMER(pdc_console_timer, pdc_console_poll, 0, 0);
+static struct tty_port tty_port;
+
+static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ tty_port_tty_set(&tty_port, tty);
+ mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+
+ return 0;
+}
+
+static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ if (tty->count == 1) {
+ del_timer_sync(&pdc_console_timer);
+ tty_port_tty_set(&tty_port, NULL);
+ }
+}
+
+static int pdc_console_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ pdc_console_write(NULL, buf, count);
+ return count;
+}
+
+static int pdc_console_tty_write_room(struct tty_struct *tty)
+{
+ return 32768; /* no limit, no buffer used */
+}
+
+static int pdc_console_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ return 0; /* no buffer */
+}
+
+static const struct tty_operations pdc_console_tty_ops = {
+ .open = pdc_console_tty_open,
+ .close = pdc_console_tty_close,
+ .write = pdc_console_tty_write,
+ .write_room = pdc_console_tty_write_room,
+ .chars_in_buffer = pdc_console_tty_chars_in_buffer,
+};
+
+static void pdc_console_poll(unsigned long unused)
+{
+ int data, count = 0;
+
+ while (1) {
+ data = pdc_console_poll_key(NULL);
+ if (data == -1)
+ break;
+ tty_insert_flip_char(&tty_port, data & 0xFF, TTY_NORMAL);
+ count ++;
+ }
+
+ if (count)
+ tty_flip_buffer_push(&tty_port);
+
+ if (pdc_cons.flags & CON_ENABLED)
+ mod_timer(&pdc_console_timer, jiffies + PDC_CONS_POLL_DELAY);
+}
+
+static struct tty_driver *pdc_console_tty_driver;
+
+static int __init pdc_console_tty_driver_init(void)
+{
+ int err;
+
+ /* Check if the console driver is still registered.
+ * It is unregistered if the pdc console was not selected as the
+ * primary console. */
+
+ struct console *tmp;
+
+ console_lock();
+ for_each_console(tmp)
+ if (tmp == &pdc_cons)
+ break;
+ console_unlock();
+
+ if (!tmp) {
+ printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
+ return -ENODEV;
+ }
+
+ printk(KERN_INFO "The PDC console driver is still registered, removing CON_BOOT flag\n");
+ pdc_cons.flags &= ~CON_BOOT;
+
+ pdc_console_tty_driver = alloc_tty_driver(1);
+
+ if (!pdc_console_tty_driver)
+ return -ENOMEM;
+
+ tty_port_init(&tty_port);
+
+ pdc_console_tty_driver->driver_name = "pdc_cons";
+ pdc_console_tty_driver->name = "ttyB";
+ pdc_console_tty_driver->major = MUX_MAJOR;
+ pdc_console_tty_driver->minor_start = 0;
+ pdc_console_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ pdc_console_tty_driver->init_termios = tty_std_termios;
+ pdc_console_tty_driver->flags = TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_RESET_TERMIOS;
+ tty_set_operations(pdc_console_tty_driver, &pdc_console_tty_ops);
+ tty_port_link_device(&tty_port, pdc_console_tty_driver, 0);
+
+ err = tty_register_driver(pdc_console_tty_driver);
+ if (err) {
+ printk(KERN_ERR "Unable to register the PDC console TTY driver\n");
+ tty_port_destroy(&tty_port);
+ return err;
+ }
+
+ return 0;
+}
+
+module_init(pdc_console_tty_driver_init);
static struct tty_driver * pdc_console_device (struct console *c, int *index)
{
- extern struct tty_driver console_driver;
- *index = c->index ? c->index-1 : fg_console;
- return &console_driver;
+ *index = c->index;
+ return pdc_console_tty_driver;
}
#else
#define pdc_console_device NULL
@@ -101,7 +224,7 @@ static struct console pdc_cons = {
.write = pdc_console_write,
.device = pdc_console_device,
.setup = pdc_console_setup,
- .flags = CON_BOOT | CON_PRINTBUFFER | CON_ENABLED,
+ .flags = CON_BOOT | CON_PRINTBUFFER,
.index = -1,
};
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 89d6d5ad44b..ba0c053e25a 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -24,7 +24,7 @@
*
* This driver programs the PCX-U/PCX-W performance counters
* on the PA-RISC 2.0 chips. The driver keeps all images now
- * internally to the kernel to hopefully eliminate the possiblity
+ * internally to the kernel to hopefully eliminate the possibility
* of a bad image halting the CPU. Also, there are different
* images for the PCX-W and later chips vs the PCX-U chips.
*
@@ -537,9 +537,9 @@ static int __init perf_init(void)
spin_lock_init(&perf_lock);
/* TODO: this only lets us access the first cpu.. what to do for SMP? */
- cpu_device = cpu_data[0].dev;
+ cpu_device = per_cpu(cpu_data, 0).dev;
printk("Performance monitoring counters enabled for %s\n",
- cpu_data[0].dev->name);
+ per_cpu(cpu_data, 0).dev->name);
return 0;
}
diff --git a/arch/parisc/kernel/perf_asm.S b/arch/parisc/kernel/perf_asm.S
index 43874ca3ed6..fa6ea99bb32 100644
--- a/arch/parisc/kernel/perf_asm.S
+++ b/arch/parisc/kernel/perf_asm.S
@@ -20,6 +20,8 @@
*/
#include <asm/assembly.h>
+
+#include <linux/init.h>
#include <linux/linkage.h>
#ifdef CONFIG_64BIT
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b80e02a4d81..0bbbf0d3f60 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -13,7 +13,7 @@
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
- * Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
+ * Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
*
*
@@ -43,39 +43,22 @@
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+#include <linux/rcupdate.h>
+#include <linux/random.h>
#include <asm/io.h>
#include <asm/asm-offsets.h>
+#include <asm/assembly.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/pgalloc.h>
-#include <asm/uaccess.h>
#include <asm/unwind.h>
-
-/*
- * The idle thread. There's no useful work to be
- * done, so just try to conserve power and have a
- * low exit latency (ie sit in a loop waiting for
- * somebody to say that they'd like to reschedule)
- */
-void cpu_idle(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- /* endless idle loop with no priority at all */
- while (1) {
- while (!need_resched())
- barrier();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- check_pgt_cache();
- }
-}
-
+#include <asm/sections.h>
#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
#define CMD_RESET 5 /* reset any module */
@@ -155,30 +138,13 @@ void machine_power_off(void)
* software. The user has to press the button himself. */
printk(KERN_EMERG "System shut down completed.\n"
- KERN_EMERG "Please power this system off now.");
+ "Please power this system off now.");
}
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
/*
- * Create a kernel thread
- */
-
-extern pid_t __kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
-{
-
- /*
- * FIXME: Once we are sure we don't need any debug here,
- * kernel_thread can become a #define.
- */
-
- return __kernel_thread(fn, arg, flags);
-}
-EXPORT_SYMBOL(kernel_thread);
-
-/*
* Free current thread data structures etc..
*/
void exit_thread(void)
@@ -190,7 +156,6 @@ void flush_thread(void)
/* Only needs to handle fpu stuff or perf monitors.
** REVISIT: several arches implement a "lazy fpu state".
*/
- set_fs(USER_DS);
}
void release_thread(struct task_struct *dead_task)
@@ -216,58 +181,11 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
return 1;
}
-/* Note that "fork()" is implemented in terms of clone, with
- parameters (SIGCHLD, regs->gr[30], regs). */
int
-sys_clone(unsigned long clone_flags, unsigned long usp,
- struct pt_regs *regs)
+copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long arg, struct task_struct *p)
{
- /* Arugments from userspace are:
- r26 = Clone flags.
- r25 = Child stack.
- r24 = parent_tidptr.
- r23 = Is the TLS storage descriptor
- r22 = child_tidptr
-
- However, these last 3 args are only examined
- if the proper flags are set. */
- int __user *child_tidptr;
- int __user *parent_tidptr;
-
- /* usp must be word aligned. This also prevents users from
- * passing in the value 1 (which is the signal for a special
- * return for a kernel thread) */
- usp = ALIGN(usp, 4);
-
- /* A zero value for usp means use the current stack */
- if (usp == 0)
- usp = regs->gr[30];
-
- if (clone_flags & CLONE_PARENT_SETTID)
- parent_tidptr = (int __user *)regs->gr[24];
- else
- parent_tidptr = NULL;
-
- if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID))
- child_tidptr = (int __user *)regs->gr[22];
- else
- child_tidptr = NULL;
-
- return do_fork(clone_flags, usp, regs, 0, parent_tidptr, child_tidptr);
-}
-
-int
-sys_vfork(struct pt_regs *regs)
-{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gr[30], regs, 0, NULL, NULL);
-}
-
-int
-copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
- unsigned long unused, /* in ia64 this is "user_stack_size" */
- struct task_struct * p, struct pt_regs * pregs)
-{
- struct pt_regs * cregs = &(p->thread.regs);
+ struct pt_regs *cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
/* We have to use void * instead of a function pointer, because
@@ -278,49 +196,40 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
#ifdef CONFIG_HPUX
extern void * const hpux_child_return;
#endif
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(cregs, 0, sizeof(struct pt_regs));
+ if (!usp) /* idle thread */
+ return 0;
- *cregs = *pregs;
-
- /* Set the return value for the child. Note that this is not
- actually restored by the syscall exit path, but we put it
- here for consistency in case of signals. */
- cregs->gr[28] = 0; /* child */
-
- /*
- * We need to differentiate between a user fork and a
- * kernel fork. We can't use user_mode, because the
- * the syscall path doesn't save iaoq. Right now
- * We rely on the fact that kernel_thread passes
- * in zero for usp.
- */
- if (usp == 1) {
/* kernel thread */
- cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN;
/* Must exit via ret_from_kernel_thread in order
* to call schedule_tail()
*/
+ cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
cregs->kpc = (unsigned long) &ret_from_kernel_thread;
/*
* Copy function and argument to be called from
* ret_from_kernel_thread.
*/
#ifdef CONFIG_64BIT
- cregs->gr[27] = pregs->gr[27];
+ cregs->gr[27] = ((unsigned long *)usp)[3];
+ cregs->gr[26] = ((unsigned long *)usp)[2];
+#else
+ cregs->gr[26] = usp;
#endif
- cregs->gr[26] = pregs->gr[26];
- cregs->gr[25] = pregs->gr[25];
+ cregs->gr[25] = arg;
} else {
/* user thread */
- /*
- * Note that the fork wrappers are responsible
- * for setting gr[21].
- */
-
- /* Use same stack depth as parent */
- cregs->ksp = (unsigned long)stack
- + (pregs->gr[21] & (THREAD_SIZE - 1));
- cregs->gr[30] = usp;
- if (p->personality == PER_HPUX) {
+ /* usp must be word aligned. This also prevents users from
+ * passing in the value 1 (which is the signal for a special
+ * return for a kernel thread) */
+ if (usp) {
+ usp = ALIGN(usp, 4);
+ if (likely(usp))
+ cregs->gr[30] = usp;
+ }
+ cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
+ if (personality(p->personality) == PER_HPUX) {
#ifdef CONFIG_HPUX
cregs->kpc = (unsigned long) &hpux_child_return;
#else
@@ -331,8 +240,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
}
/* Setup thread TLS area from the 4th parameter in clone */
if (clone_flags & CLONE_SETTLS)
- cregs->cr27 = pregs->gr[23];
-
+ cregs->cr27 = cregs->gr[23];
}
return 0;
@@ -343,39 +251,6 @@ unsigned long thread_saved_pc(struct task_struct *t)
return t->thread.regs.kpc;
}
-/*
- * sys_execve() executes a new program.
- */
-
-asmlinkage int sys_execve(struct pt_regs *regs)
-{
- int error;
- char *filename;
-
- filename = getname((const char __user *) regs->gr[26]);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename, (char __user * __user *) regs->gr[25],
- (char __user * __user *) regs->gr[24], regs);
- if (error == 0) {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
- }
- putname(filename);
-out:
-
- return error;
-}
-
-extern int __execve(const char *filename, char *const argv[],
- char *const envp[], struct task_struct *task);
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
-{
- return __execve(filename, argv, envp, current);
-}
-
unsigned long
get_wchan(struct task_struct *p)
{
@@ -400,3 +275,33 @@ get_wchan(struct task_struct *p)
} while (count++ < 16);
return 0;
}
+
+#ifdef CONFIG_64BIT
+void *dereference_function_descriptor(void *ptr)
+{
+ Elf64_Fdesc *desc = ptr;
+ void *p;
+
+ if (!probe_kernel_address(&desc->addr, p))
+ ptr = p;
+ return ptr;
+}
+#endif
+
+static inline unsigned long brk_rnd(void)
+{
+ /* 8MB for 32bit, 1GB for 64bit */
+ if (is_32bit_task())
+ return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+ else
+ return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+ unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
+
+ if (ret < mm->brk)
+ return mm->brk;
+ return ret;
+}
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 370086fb833..b68d977ce30 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -1,9 +1,8 @@
-/* $Id: processor.c,v 1.1 2002/07/20 16:27:06 rhirst Exp $
- *
+/*
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
- * Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
+ * Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
* Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
* Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
@@ -46,7 +45,7 @@
struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
EXPORT_SYMBOL(boot_cpu_data);
-struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly;
+DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
extern int update_cr16_clocksource(void); /* from time.c */
@@ -69,6 +68,23 @@ extern int update_cr16_clocksource(void); /* from time.c */
*/
/**
+ * init_cpu_profiler - enable/setup per cpu profiling hooks.
+ * @cpunum: The processor instance.
+ *
+ * FIXME: doesn't do much yet...
+ */
+static void
+init_percpu_prof(unsigned long cpunum)
+{
+ struct cpuinfo_parisc *p;
+
+ p = &per_cpu(cpu_data, cpunum);
+ p->prof_counter = 1;
+ p->prof_multiplier = 1;
+}
+
+
+/**
* processor_probe - Determine if processor driver should claim this device.
* @dev: The device which has been found.
*
@@ -76,15 +92,15 @@ extern int update_cr16_clocksource(void); /* from time.c */
* (return 1). If so, initialize the chip and tell other partners in crime
* they have work to do.
*/
-static int __cpuinit processor_probe(struct parisc_device *dev)
+static int processor_probe(struct parisc_device *dev)
{
unsigned long txn_addr;
unsigned long cpuid;
struct cpuinfo_parisc *p;
#ifdef CONFIG_SMP
- if (num_online_cpus() >= NR_CPUS) {
- printk(KERN_INFO "num_online_cpus() >= NR_CPUS\n");
+ if (num_online_cpus() >= nr_cpu_ids) {
+ printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
return 1;
}
#else
@@ -104,22 +120,28 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
if (is_pdc_pat()) {
ulong status;
unsigned long bytecnt;
- pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
+ pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
#undef USE_PAT_CPUID
#ifdef USE_PAT_CPUID
struct pdc_pat_cpu_num cpu_info;
#endif
+ pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
+ if (!pa_pdc_cell)
+ panic("couldn't allocate memory for PDC_PAT_CELL!");
+
status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
- dev->mod_index, PA_VIEW, &pa_pdc_cell);
+ dev->mod_index, PA_VIEW, pa_pdc_cell);
BUG_ON(PDC_OK != status);
/* verify it's the same as what do_pat_inventory() found */
- BUG_ON(dev->mod_info != pa_pdc_cell.mod_info);
- BUG_ON(dev->pmod_loc != pa_pdc_cell.mod_location);
+ BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
+ BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
+
+ txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */
- txn_addr = pa_pdc_cell.mod[0]; /* id_eid for IO sapic */
+ kfree(pa_pdc_cell);
#ifdef USE_PAT_CPUID
/* We need contiguous numbers for cpuid. Firmware's notion
@@ -147,7 +169,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
}
#endif
- p = &cpu_data[cpuid];
+ p = &per_cpu(cpu_data, cpuid);
boot_cpu_data.cpu_count++;
/* initialize counters - CPU 0 gets it_value set in time_init() */
@@ -162,12 +184,9 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
#ifdef CONFIG_SMP
/*
** FIXME: review if any other initialization is clobbered
- ** for boot_cpu by the above memset().
+ ** for boot_cpu by the above memset().
*/
-
- /* stolen from init_percpu_prof() */
- cpu_data[cpuid].prof_counter = 1;
- cpu_data[cpuid].prof_multiplier = 1;
+ init_percpu_prof(cpuid);
#endif
/*
@@ -200,7 +219,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
*/
#ifdef CONFIG_SMP
if (cpuid) {
- cpu_set(cpuid, cpu_present_map);
+ set_cpu_present(cpuid, true);
cpu_up(cpuid);
}
#endif
@@ -261,19 +280,6 @@ void __init collect_boot_cpu_data(void)
}
-/**
- * init_cpu_profiler - enable/setup per cpu profiling hooks.
- * @cpunum: The processor instance.
- *
- * FIXME: doesn't do much yet...
- */
-static inline void __init
-init_percpu_prof(int cpunum)
-{
- cpu_data[cpunum].prof_counter = 1;
- cpu_data[cpunum].prof_multiplier = 1;
-}
-
/**
* init_per_cpu - Handle individual processor initializations.
@@ -293,7 +299,7 @@ init_percpu_prof(int cpunum)
*
* o Enable CPU profiling hooks.
*/
-int __init init_per_cpu(int cpunum)
+int init_per_cpu(int cpunum)
{
int ret;
struct pdc_coproc_cfg coproc_cfg;
@@ -307,8 +313,8 @@ int __init init_per_cpu(int cpunum)
/* FWIW, FP rev/model is a more accurate way to determine
** CPU type. CPU rev/model has some ambiguous cases.
*/
- cpu_data[cpunum].fp_rev = coproc_cfg.revision;
- cpu_data[cpunum].fp_model = coproc_cfg.model;
+ per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+ per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
cpunum, coproc_cfg.revision, coproc_cfg.model);
@@ -344,16 +350,17 @@ int __init init_per_cpu(int cpunum)
int
show_cpuinfo (struct seq_file *m, void *v)
{
- int n;
+ unsigned long cpu;
- for(n=0; n<boot_cpu_data.cpu_count; n++) {
+ for_each_online_cpu(cpu) {
+ const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
#ifdef CONFIG_SMP
- if (0 == cpu_data[n].hpa)
+ if (0 == cpuinfo->hpa)
continue;
#endif
- seq_printf(m, "processor\t: %d\n"
+ seq_printf(m, "processor\t: %lu\n"
"cpu family\t: PA-RISC %s\n",
- n, boot_cpu_data.family_name);
+ cpu, boot_cpu_data.family_name);
seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
@@ -362,11 +369,31 @@ show_cpuinfo (struct seq_file *m, void *v)
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
+ seq_printf(m, "capabilities\t:");
+ if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
+ seq_puts(m, " os32");
+ if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
+ seq_puts(m, " os64");
+ if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)
+ seq_puts(m, " iopdir_fdc");
+ switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) {
+ case PDC_MODEL_NVA_SUPPORTED:
+ seq_puts(m, " nva_supported");
+ break;
+ case PDC_MODEL_NVA_SLOW:
+ seq_puts(m, " nva_slow");
+ break;
+ case PDC_MODEL_NVA_UNSUPPORTED:
+ seq_puts(m, " needs_equivalent_aliasing");
+ break;
+ }
+ seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
+
seq_printf(m, "model\t\t: %s\n"
"model name\t: %s\n",
boot_cpu_data.pdc.sys_model_name,
- cpu_data[n].dev ?
- cpu_data[n].dev->name : "Unknown" );
+ cpuinfo->dev ?
+ cpuinfo->dev->name : "Unknown");
seq_printf(m, "hversion\t: 0x%08x\n"
"sversion\t: 0x%08x\n",
@@ -377,8 +404,8 @@ show_cpuinfo (struct seq_file *m, void *v)
show_cache_info(m);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
- cpu_data[n].loops_per_jiffy / (500000 / HZ),
- (cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100);
+ cpuinfo->loops_per_jiffy / (500000 / HZ),
+ (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
seq_printf(m, "software id\t: %ld\n\n",
boot_cpu_data.pdc.model.sw_id);
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 49c63797078..e842ee233db 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -4,6 +4,7 @@
* Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
* Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
* Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
+ * Copyright (C) 2008 Helge Deller <deller@gmx.de>
*/
#include <linux/kernel.h>
@@ -12,30 +13,167 @@
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
+#include <linux/tracehook.h>
#include <linux/user.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/signal.h>
+#include <linux/audit.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-#include <asm/system.h>
#include <asm/processor.h>
#include <asm/asm-offsets.h>
/* PSW bits we allow the debugger to modify */
-#define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB)
+#define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB)
-#undef DEBUG_PTRACE
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *task)
+{
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+ clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
-#ifdef DEBUG_PTRACE
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
+ /* make sure the trap bits are not set */
+ pa_psw(task)->r = 0;
+ pa_psw(task)->t = 0;
+ pa_psw(task)->h = 0;
+ pa_psw(task)->l = 0;
+}
+
+/*
+ * The following functions are called by ptrace_resume() when
+ * enabling or disabling single/block tracing.
+ */
+void user_disable_single_step(struct task_struct *task)
+{
+ ptrace_disable(task);
+}
+
+void user_enable_single_step(struct task_struct *task)
+{
+ clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+
+ if (pa_psw(task)->n) {
+ struct siginfo si;
+
+ /* Nullified, just crank over the queue. */
+ task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
+ task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
+ task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
+ pa_psw(task)->n = 0;
+ pa_psw(task)->x = 0;
+ pa_psw(task)->y = 0;
+ pa_psw(task)->z = 0;
+ pa_psw(task)->b = 0;
+ ptrace_disable(task);
+ /* Don't wake up the task, but let the
+ parent know something happened. */
+ si.si_code = TRAP_TRACE;
+ si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3);
+ si.si_signo = SIGTRAP;
+ si.si_errno = 0;
+ force_sig_info(SIGTRAP, &si, task);
+ /* notify_parent(task, SIGCHLD); */
+ return;
+ }
+
+ /* Enable recovery counter traps. The recovery counter
+ * itself will be set to zero on a task switch. If the
+ * task is suspended on a syscall then the syscall return
+ * path will overwrite the recovery counter with a suitable
+ * value such that it traps once back in user space. We
+ * disable interrupts in the tasks PSW here also, to avoid
+ * interrupts while the recovery counter is decrementing.
+ */
+ pa_psw(task)->r = 1;
+ pa_psw(task)->t = 0;
+ pa_psw(task)->h = 0;
+ pa_psw(task)->l = 0;
+}
+
+void user_enable_block_step(struct task_struct *task)
+{
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+ set_tsk_thread_flag(task, TIF_BLOCKSTEP);
+
+ /* Enable taken branch trap. */
+ pa_psw(task)->r = 0;
+ pa_psw(task)->t = 1;
+ pa_psw(task)->h = 0;
+ pa_psw(task)->l = 0;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ unsigned long tmp;
+ long ret = -EIO;
+
+ switch (request) {
+
+ /* Read the word at location addr in the USER area. For ptraced
+ processes, the kernel saves all regs on a syscall. */
+ case PTRACE_PEEKUSR:
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
+ break;
+ tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
+ ret = put_user(tmp, (unsigned long __user *) data);
+ break;
+
+ /* Write the word at location addr in the USER area. This will need
+ to change when the kernel no longer saves all regs on a syscall.
+ FIXME. There is a problem at the moment in that r3-r18 are only
+ saved if the process is ptraced on syscall entry, and even then
+ those values are overwritten by actual register values on syscall
+ exit. */
+ case PTRACE_POKEUSR:
+ /* Some register values written here may be ignored in
+ * entry.S:syscall_restore_rfi; e.g. iaoq is written with
+ * r31/r31+4, and not with the values in pt_regs.
+ */
+ if (addr == PT_PSW) {
+ /* Allow writing to Nullify, Divide-step-correction,
+ * and carry/borrow bits.
+ * BEWARE, if you set N, and then single step, it won't
+ * stop on the nullified instruction.
+ */
+ data &= USER_PSW_BITS;
+ task_regs(child)->gr[0] &= ~USER_PSW_BITS;
+ task_regs(child)->gr[0] |= data;
+ ret = 0;
+ break;
+ }
+
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
+ break;
+ if ((addr >= PT_GR1 && addr <= PT_GR31) ||
+ addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
+ (addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
+ addr == PT_SAR) {
+ *(unsigned long *) ((char *) task_regs(child) + addr) = data;
+ ret = 0;
+ }
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
-#ifdef CONFIG_64BIT
+
+#ifdef CONFIG_COMPAT
/* This function is needed to translate 32 bit pt_regs offsets in to
* 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel
@@ -48,10 +186,10 @@
* being 64 bit in both cases.
*/
-static long translate_usr_offset(long offset)
+static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
{
if (offset < 0)
- return -1;
+ return sizeof(struct pt_regs);
else if (offset <= 32*4) /* gr[0..31] */
return offset * 2 + 4;
else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
@@ -59,108 +197,27 @@ static long translate_usr_offset(long offset)
else if (offset < sizeof(struct pt_regs)/2 + 32*4)
return offset * 2 + 4 - 32*8;
else
- return -1;
+ return sizeof(struct pt_regs);
}
-#endif
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void ptrace_disable(struct task_struct *child)
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t addr, compat_ulong_t data)
{
- /* make sure the trap bits are not set */
- pa_psw(child)->r = 0;
- pa_psw(child)->t = 0;
- pa_psw(child)->h = 0;
- pa_psw(child)->l = 0;
-}
-
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
-{
- long ret;
-#ifdef DEBUG_PTRACE
- long oaddr=addr, odata=data;
-#endif
+ compat_uint_t tmp;
+ long ret = -EIO;
switch (request) {
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
-#ifdef CONFIG_64BIT
- if (__is_compat_task(child)) {
- int copied;
- unsigned int tmp;
-
- addr &= 0xffffffffL;
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- goto out_tsk;
- ret = put_user(tmp,(unsigned int *) data);
- DBG("sys_ptrace(PEEK%s, %d, %lx, %lx) returning %ld, data %x\n",
- request == PTRACE_PEEKTEXT ? "TEXT" : "DATA",
- pid, oaddr, odata, ret, tmp);
- }
- else
-#endif
- ret = generic_ptrace_peekdata(child, addr, data);
- goto out_tsk;
- }
-
- /* when I and D space are separate, this will have to be fixed. */
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- ret = 0;
-#ifdef CONFIG_64BIT
- if (__is_compat_task(child)) {
- unsigned int tmp = (unsigned int)data;
- DBG("sys_ptrace(POKE%s, %d, %lx, %lx)\n",
- request == PTRACE_POKETEXT ? "TEXT" : "DATA",
- pid, oaddr, odata);
- addr &= 0xffffffffL;
- if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1) == sizeof(tmp))
- goto out_tsk;
- }
- else
-#endif
- {
- if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
- goto out_tsk;
- }
- ret = -EIO;
- goto out_tsk;
- /* Read the word at location addr in the USER area. For ptraced
- processes, the kernel saves all regs on a syscall. */
- case PTRACE_PEEKUSR: {
- ret = -EIO;
-#ifdef CONFIG_64BIT
- if (__is_compat_task(child)) {
- unsigned int tmp;
-
- if (addr & (sizeof(int)-1))
- goto out_tsk;
- if ((addr = translate_usr_offset(addr)) < 0)
- goto out_tsk;
-
- tmp = *(unsigned int *) ((char *) task_regs(child) + addr);
- ret = put_user(tmp, (unsigned int *) data);
- DBG("sys_ptrace(PEEKUSR, %d, %lx, %lx) returning %ld, addr %lx, data %x\n",
- pid, oaddr, odata, ret, addr, tmp);
- }
- else
-#endif
- {
- unsigned long tmp;
+ case PTRACE_PEEKUSR:
+ if (addr & (sizeof(compat_uint_t)-1))
+ break;
+ addr = translate_usr_offset(addr);
+ if (addr >= sizeof(struct pt_regs))
+ break;
- if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs))
- goto out_tsk;
- tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
- ret = put_user(tmp, (unsigned long *) data);
- }
- goto out_tsk;
- }
+ tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
+ ret = put_user(tmp, (compat_uint_t *) (unsigned long) data);
+ break;
/* Write the word at location addr in the USER area. This will need
to change when the kernel no longer saves all regs on a syscall.
@@ -169,201 +226,79 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
those values are overwritten by actual register values on syscall
exit. */
case PTRACE_POKEUSR:
- ret = -EIO;
/* Some register values written here may be ignored in
* entry.S:syscall_restore_rfi; e.g. iaoq is written with
* r31/r31+4, and not with the values in pt_regs.
*/
- /* PT_PSW=0, so this is valid for 32 bit processes under 64
- * bit kernels.
- */
if (addr == PT_PSW) {
- /* PT_PSW=0, so this is valid for 32 bit processes
- * under 64 bit kernels.
- *
- * Allow writing to Nullify, Divide-step-correction,
- * and carry/borrow bits.
- * BEWARE, if you set N, and then single step, it won't
- * stop on the nullified instruction.
+ /* Since PT_PSW==0, it is valid for 32 bit processes
+ * under 64 bit kernels as well.
*/
- DBG("sys_ptrace(POKEUSR, %d, %lx, %lx)\n",
- pid, oaddr, odata);
- data &= USER_PSW_BITS;
- task_regs(child)->gr[0] &= ~USER_PSW_BITS;
- task_regs(child)->gr[0] |= data;
- ret = 0;
- goto out_tsk;
- }
-#ifdef CONFIG_64BIT
- if (__is_compat_task(child)) {
- if (addr & (sizeof(int)-1))
- goto out_tsk;
- if ((addr = translate_usr_offset(addr)) < 0)
- goto out_tsk;
- DBG("sys_ptrace(POKEUSR, %d, %lx, %lx) addr %lx\n",
- pid, oaddr, odata, addr);
+ ret = arch_ptrace(child, request, addr, data);
+ } else {
+ if (addr & (sizeof(compat_uint_t)-1))
+ break;
+ addr = translate_usr_offset(addr);
+ if (addr >= sizeof(struct pt_regs))
+ break;
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */
- *(unsigned int *) ((char *) task_regs(child) + addr) = data;
+ *(__u64 *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
addr == PT_SAR+4) {
/* Zero the top 32 bits */
- *(unsigned int *) ((char *) task_regs(child) + addr - 4) = 0;
- *(unsigned int *) ((char *) task_regs(child) + addr) = data;
- ret = 0;
- }
- goto out_tsk;
- }
- else
-#endif
- {
- if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs))
- goto out_tsk;
- if ((addr >= PT_GR1 && addr <= PT_GR31) ||
- addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
- (addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
- addr == PT_SAR) {
- *(unsigned long *) ((char *) task_regs(child) + addr) = data;
+ *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0;
+ *(__u32 *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
- goto out_tsk;
}
-
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT:
- ret = -EIO;
- DBG("sys_ptrace(%s)\n",
- request == PTRACE_SYSCALL ? "SYSCALL" : "CONT");
- if (!valid_signal(data))
- goto out_tsk;
- child->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP);
- if (request == PTRACE_SYSCALL) {
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- } else {
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
- child->exit_code = data;
- goto out_wake_notrap;
-
- case PTRACE_KILL:
- /*
- * make the child exit. Best I can do is send it a
- * sigkill. perhaps it should be put in the status
- * that it wants to exit.
- */
- ret = 0;
- DBG("sys_ptrace(KILL)\n");
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- goto out_tsk;
- child->exit_code = SIGKILL;
- goto out_wake_notrap;
-
- case PTRACE_SINGLEBLOCK:
- DBG("sys_ptrace(SINGLEBLOCK)\n");
- ret = -EIO;
- if (!valid_signal(data))
- goto out_tsk;
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- child->ptrace &= ~PT_SINGLESTEP;
- child->ptrace |= PT_BLOCKSTEP;
- child->exit_code = data;
-
- /* Enable taken branch trap. */
- pa_psw(child)->r = 0;
- pa_psw(child)->t = 1;
- pa_psw(child)->h = 0;
- pa_psw(child)->l = 0;
- goto out_wake;
-
- case PTRACE_SINGLESTEP:
- DBG("sys_ptrace(SINGLESTEP)\n");
- ret = -EIO;
- if (!valid_signal(data))
- goto out_tsk;
-
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- child->ptrace &= ~PT_BLOCKSTEP;
- child->ptrace |= PT_SINGLESTEP;
- child->exit_code = data;
-
- if (pa_psw(child)->n) {
- struct siginfo si;
-
- /* Nullified, just crank over the queue. */
- task_regs(child)->iaoq[0] = task_regs(child)->iaoq[1];
- task_regs(child)->iasq[0] = task_regs(child)->iasq[1];
- task_regs(child)->iaoq[1] = task_regs(child)->iaoq[0] + 4;
- pa_psw(child)->n = 0;
- pa_psw(child)->x = 0;
- pa_psw(child)->y = 0;
- pa_psw(child)->z = 0;
- pa_psw(child)->b = 0;
- ptrace_disable(child);
- /* Don't wake up the child, but let the
- parent know something happened. */
- si.si_code = TRAP_TRACE;
- si.si_addr = (void __user *) (task_regs(child)->iaoq[0] & ~3);
- si.si_signo = SIGTRAP;
- si.si_errno = 0;
- force_sig_info(SIGTRAP, &si, child);
- //notify_parent(child, SIGCHLD);
- //ret = 0;
- goto out_wake;
- }
-
- /* Enable recovery counter traps. The recovery counter
- * itself will be set to zero on a task switch. If the
- * task is suspended on a syscall then the syscall return
- * path will overwrite the recovery counter with a suitable
- * value such that it traps once back in user space. We
- * disable interrupts in the childs PSW here also, to avoid
- * interrupts while the recovery counter is decrementing.
- */
- pa_psw(child)->r = 1;
- pa_psw(child)->t = 0;
- pa_psw(child)->h = 0;
- pa_psw(child)->l = 0;
- /* give it a chance to run. */
- goto out_wake;
-
- case PTRACE_GETEVENTMSG:
- ret = put_user(child->ptrace_message, (unsigned int __user *) data);
- goto out_tsk;
+ break;
default:
- ret = ptrace_request(child, request, addr, data);
- goto out_tsk;
+ ret = compat_ptrace_request(child, request, addr, data);
+ break;
}
-out_wake_notrap:
- ptrace_disable(child);
-out_wake:
- wake_up_process(child);
- ret = 0;
-out_tsk:
- DBG("arch_ptrace(%ld, %d, %lx, %lx) returning %ld\n",
- request, pid, oaddr, odata, ret);
return ret;
}
+#endif
-void syscall_trace(void)
+long do_syscall_trace_enter(struct pt_regs *regs)
{
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
- if (!(current->ptrace & PT_PTRACED))
- return;
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ long ret = 0;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ ret = -1L;
+
+#ifdef CONFIG_64BIT
+ if (!is_compat_task())
+ audit_syscall_entry(AUDIT_ARCH_PARISC64,
+ regs->gr[20],
+ regs->gr[26], regs->gr[25],
+ regs->gr[24], regs->gr[23]);
+ else
+#endif
+ audit_syscall_entry(AUDIT_ARCH_PARISC,
+ regs->gr[20] & 0xffffffff,
+ regs->gr[26] & 0xffffffff,
+ regs->gr[25] & 0xffffffff,
+ regs->gr[24] & 0xffffffff,
+ regs->gr[23] & 0xffffffff);
+
+ return ret ? : regs->gr[20];
+}
+
+void do_syscall_trace_exit(struct pt_regs *regs)
+{
+ int stepping = test_thread_flag(TIF_SINGLESTEP) ||
+ test_thread_flag(TIF_BLOCKSTEP);
+
+ audit_syscall_exit(regs);
+
+ if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, stepping);
}
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 7a92695d95a..5f3d3a1f903 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -8,12 +8,24 @@
*
*/
+#include <asm/pdc.h>
#include <asm/psw.h>
#include <asm/assembly.h>
+#include <asm/asm-offsets.h>
#include <linux/linkage.h>
+
.section .bss
+
+ .export pdc_result
+ .export pdc_result2
+ .align 8
+pdc_result:
+ .block ASM_PDC_RESULT_SIZE
+pdc_result2:
+ .block ASM_PDC_RESULT_SIZE
+
.export real_stack
.export real32_stack
.export real64_stack
diff --git a/arch/parisc/kernel/semaphore.c b/arch/parisc/kernel/semaphore.c
deleted file mode 100644
index ee806bcc372..00000000000
--- a/arch/parisc/kernel/semaphore.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Semaphore implementation Copyright (c) 2001 Matthew Wilcox, Hewlett-Packard
- */
-
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-
-/*
- * Semaphores are complex as we wish to avoid using two variables.
- * `count' has multiple roles, depending on its value. If it is positive
- * or zero, there are no waiters. The functions here will never be
- * called; see <asm/semaphore.h>
- *
- * When count is -1 it indicates there is at least one task waiting
- * for the semaphore.
- *
- * When count is less than that, there are '- count - 1' wakeups
- * pending. ie if it has value -3, there are 2 wakeups pending.
- *
- * Note that these functions are only called when there is contention
- * on the lock, and as such all this is the "non-critical" part of the
- * whole semaphore business. The critical part is the inline stuff in
- * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
- */
-void __up(struct semaphore *sem)
-{
- sem->count--;
- wake_up(&sem->wait);
-}
-
-#define wakers(count) (-1 - count)
-
-#define DOWN_HEAD \
- int ret = 0; \
- DECLARE_WAITQUEUE(wait, current); \
- \
- /* Note that someone is waiting */ \
- if (sem->count == 0) \
- sem->count = -1; \
- \
- /* protected by the sentry still -- use unlocked version */ \
- wait.flags = WQ_FLAG_EXCLUSIVE; \
- __add_wait_queue_tail(&sem->wait, &wait); \
- lost_race: \
- spin_unlock_irq(&sem->sentry); \
-
-#define DOWN_TAIL \
- spin_lock_irq(&sem->sentry); \
- if (wakers(sem->count) == 0 && ret == 0) \
- goto lost_race; /* Someone stole our wakeup */ \
- __remove_wait_queue(&sem->wait, &wait); \
- current->state = TASK_RUNNING; \
- if (!waitqueue_active(&sem->wait) && (sem->count < 0)) \
- sem->count = wakers(sem->count);
-
-#define UPDATE_COUNT \
- sem->count += (sem->count < 0) ? 1 : - 1;
-
-
-void __sched __down(struct semaphore * sem)
-{
- DOWN_HEAD
-
- for(;;) {
- set_task_state(current, TASK_UNINTERRUPTIBLE);
- /* we can _read_ this without the sentry */
- if (sem->count != -1)
- break;
- schedule();
- }
-
- DOWN_TAIL
- UPDATE_COUNT
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
- DOWN_HEAD
-
- for(;;) {
- set_task_state(current, TASK_INTERRUPTIBLE);
- /* we can _read_ this without the sentry */
- if (sem->count != -1)
- break;
-
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- schedule();
- }
-
- DOWN_TAIL
-
- if (!ret) {
- UPDATE_COUNT
- }
-
- return ret;
-}
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 39e7c5a5946..72a3c658ad7 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -1,5 +1,4 @@
-/* $Id: setup.c,v 1.8 2000/02/02 04:42:38 prumpf Exp $
- *
+/*
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
@@ -36,6 +35,7 @@
#include <linux/pci.h>
#undef PCI_DEBUG
#include <linux/proc_fs.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/pdc.h>
@@ -44,6 +44,7 @@
#include <asm/pdc_chassis.h>
#include <asm/io.h>
#include <asm/setup.h>
+#include <asm/unwind.h>
static char __initdata command_line[COMMAND_LINE_SIZE];
@@ -57,11 +58,6 @@ int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */
EXPORT_SYMBOL(parisc_bus_is_phys);
#endif
-/* This sets the vmerge boundary and size, it's here because it has to
- * be available on all platforms (zero means no-virtual merging) */
-unsigned long parisc_vmerge_boundary = 0;
-unsigned long parisc_vmerge_max_size = 0;
-
void __init setup_cmdline(char **cmdline_p)
{
extern unsigned int boot_args[];
@@ -73,7 +69,8 @@ void __init setup_cmdline(char **cmdline_p)
/* called from hpux boot loader */
boot_command_line[0] = '\0';
} else {
- strcpy(boot_command_line, (char *)__va(boot_args[1]));
+ strlcpy(boot_command_line, (char *)__va(boot_args[1]),
+ COMMAND_LINE_SIZE);
#ifdef CONFIG_BLK_DEV_INITRD
if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
@@ -123,6 +120,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_64BIT
extern int parisc_narrow_firmware;
#endif
+ unwind_init();
init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */
@@ -132,6 +130,8 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "The 32-bit Kernel has started...\n");
#endif
+ printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024));
+
pdc_console_init();
#ifdef CONFIG_64BIT
@@ -156,7 +156,7 @@ void __init setup_arch(char **cmdline_p)
#endif
#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con; /* we use take_over_console() later ! */
+ conswitchp = &dummy_con; /* we use do_take_over_console() later ! */
#endif
}
@@ -318,8 +318,12 @@ static int __init parisc_init(void)
pdc_stable_write(0x40, &osid, sizeof(osid));
processor_init();
- printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
- boot_cpu_data.cpu_count,
+#ifdef CONFIG_SMP
+ pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
+ num_online_cpus(), num_present_cpus(),
+#else
+ pr_info("CPU(s): 1 x %s at %d.%06d MHz\n",
+#endif
boot_cpu_data.cpu_name,
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
@@ -368,6 +372,31 @@ static int __init parisc_init(void)
return 0;
}
-
arch_initcall(parisc_init);
+void start_parisc(void)
+{
+ extern void start_kernel(void);
+
+ int ret, cpunum;
+ struct pdc_coproc_cfg coproc_cfg;
+
+ cpunum = smp_processor_id();
+
+ set_firmware_width_unlocked();
+
+ ret = pdc_coproc_cfg_unlocked(&coproc_cfg);
+ if (ret >= 0 && coproc_cfg.ccr_functional) {
+ mtctl(coproc_cfg.ccr_functional, 10);
+
+ per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+ per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
+
+ asm volatile ("fstd %fr0,8(%sp)");
+ } else {
+ panic("must have an fpu to boot linux");
+ }
+
+ start_kernel();
+ // not reached
+}
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 58fccc96d00..1cba8f29bb4 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -21,6 +21,7 @@
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
+#include <linux/tracehook.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/compat.h>
@@ -33,7 +34,6 @@
#include <asm/asm-offsets.h>
#ifdef CONFIG_COMPAT
-#include <linux/compat.h>
#include "signal32.h"
#endif
@@ -48,9 +48,6 @@
#define DBG(LEVEL, ...)
#endif
-
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
/* gcc will complain if a pointer is cast to an integer of different
* size. If you really need to do this (and we do for an ELF32 user
* application in an ELF64 kernel) then you have to do a cast to an
@@ -59,13 +56,6 @@
#define A(__x) ((unsigned long)(__x))
/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-#ifdef CONFIG_64BIT
-#include "sys32.h"
-#endif
-
-/*
* Do a signal return - restore sigcontext.
*/
@@ -88,7 +78,7 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq));
err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq));
err |= __get_user(regs->sar, &sc->sc_sar);
- DBG(2,"restore_sigcontext: iaoq is 0x%#lx / 0x%#lx\n",
+ DBG(2,"restore_sigcontext: iaoq is %#lx / %#lx\n",
regs->iaoq[0],regs->iaoq[1]);
DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]);
return err;
@@ -98,7 +88,6 @@ void
sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
{
struct rt_sigframe __user *frame;
- struct siginfo si;
sigset_t set;
unsigned long usp = (regs->gr[30] & ~(0x01UL));
unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
@@ -110,12 +99,15 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
#endif
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
/* Unwind the user stack to get the rt_sigframe structure. */
frame = (struct rt_sigframe __user *)
(usp - sigframe_size);
DBG(2,"sys_rt_sigreturn: frame is %p\n", frame);
+ regs->orig_r28 = 1; /* no restarts for sigreturn */
+
#ifdef CONFIG_64BIT
compat_frame = (struct compat_rt_sigframe __user *)frame;
@@ -131,11 +123,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
goto give_sigsegv;
}
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
/* Good thing we saved the old gr[30], eh? */
#ifdef CONFIG_64BIT
@@ -148,7 +136,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
goto give_sigsegv;
DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
usp, &compat_frame->uc.uc_stack);
- if (do_sigaltstack32(&compat_frame->uc.uc_stack, NULL, usp) == -EFAULT)
+ if (compat_restore_altstack(&compat_frame->uc.uc_stack))
goto give_sigsegv;
} else
#endif
@@ -159,7 +147,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
goto give_sigsegv;
DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
usp, &frame->uc.uc_stack);
- if (do_sigaltstack(&frame->uc.uc_stack, NULL, usp) == -EFAULT)
+ if (restore_altstack(&frame->uc.uc_stack))
goto give_sigsegv;
}
@@ -178,13 +166,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
give_sigsegv:
DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n");
- si.si_signo = SIGSEGV;
- si.si_errno = 0;
- si.si_code = SI_KERNEL;
- si.si_pid = task_pid_vnr(current);
- si.si_uid = current->uid;
- si.si_addr = &frame->uc;
- force_sig_info(SIGSEGV, &si, current);
+ force_sig(SIGSEGV, current);
return;
}
@@ -201,8 +183,10 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
(unsigned long)ka, sp, frame_size);
+ /* Align alternate stack and reserve 64 bytes for the signal
+ handler's frame marker. */
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
- sp = current->sas_ss_sp; /* Stacks grow up! */
+ sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */
DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
return (void __user *) sp; /* Stacks grow up. Fun. */
@@ -251,7 +235,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
unsigned long haddr, sigframe_size;
int err = 0;
#ifdef CONFIG_64BIT
- compat_int_t compat_val;
struct compat_rt_sigframe __user * compat_frame;
compat_sigset_t compat_set;
#endif
@@ -271,15 +254,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (is_compat_task()) {
DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info);
err |= copy_siginfo_to_user32(&compat_frame->info, info);
- DBG(1,"SETUP_RT_FRAME: 1\n");
- compat_val = (compat_int_t)current->sas_ss_sp;
- err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_sp);
- DBG(1,"SETUP_RT_FRAME: 2\n");
- compat_val = (compat_int_t)current->sas_ss_size;
- err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_size);
- DBG(1,"SETUP_RT_FRAME: 3\n");
- compat_val = sas_ss_flags(regs->gr[30]);
- err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_flags);
+ err |= __compat_save_altstack( &compat_frame->uc.uc_stack, regs->gr[30]);
DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc);
DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext);
err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext,
@@ -291,14 +266,11 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
{
DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info);
err |= copy_siginfo_to_user(&frame->info, info);
- err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
- err |= __put_user(sas_ss_flags(regs->gr[30]),
- &frame->uc.uc_stack.ss_flags);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->gr[30]);
DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc);
DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall);
- /* FIXME: Should probably be converted aswell for the compat case */
+ /* FIXME: Should probably be converted as well for the compat case */
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
}
@@ -321,7 +293,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
#if DEBUG_SIG
/* Assert that we're flushing in the correct space... */
{
- int sid;
+ unsigned long sid;
asm ("mfsp %%sr3,%0" : "=r" (sid));
DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
sid, frame->tramp);
@@ -450,34 +422,35 @@ give_sigsegv:
* OK, we're invoking a handler.
*/
-static long
+static void
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
- sigset_t *oldset, struct pt_regs *regs, int in_syscall)
+ struct pt_regs *regs, int in_syscall)
{
+ sigset_t *oldset = sigmask_to_save();
DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
sig, ka, info, oldset, regs);
/* Set up the stack frame */
if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
- return 0;
-
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- return 1;
+ return;
+
+ signal_delivered(sig, info, ka, regs,
+ test_thread_flag(TIF_SINGLESTEP) ||
+ test_thread_flag(TIF_BLOCKSTEP));
+
+ DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
+ regs->gr[28]);
}
static inline void
syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
{
+ if (regs->orig_r28)
+ return;
+ regs->orig_r28 = 1; /* no more restarts */
/* Check the return code */
switch (regs->gr[28]) {
case -ERESTART_RESTARTBLOCK:
- current_thread_info()->restart_block.fn =
- do_no_restart_syscall;
case -ERESTARTNOHAND:
DBG(1,"ERESTARTNOHAND: returning -EINTR\n");
regs->gr[28] = -EINTR;
@@ -495,8 +468,6 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
* we have to do is fiddle the return pointer.
*/
regs->gr[31] -= 8; /* delayed branching */
- /* Preserve original r28. */
- regs->gr[28] = regs->orig_r28;
break;
}
}
@@ -504,6 +475,9 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
static inline void
insert_restart_trampoline(struct pt_regs *regs)
{
+ if (regs->orig_r28)
+ return;
+ regs->orig_r28 = 1; /* no more restarts */
switch(regs->gr[28]) {
case -ERESTART_RESTARTBLOCK: {
/* Restart the system call - no handlers present */
@@ -534,12 +508,10 @@ insert_restart_trampoline(struct pt_regs *regs)
* Flushing one cacheline is cheap.
* "sync" on bigger (> 4 way) boxes is not.
*/
- flush_icache_range(regs->gr[30], regs->gr[30] + 4);
+ flush_user_dcache_range(regs->gr[30], regs->gr[30] + 4);
+ flush_user_icache_range(regs->gr[30], regs->gr[30] + 4);
regs->gr[31] = regs->gr[30] + 8;
- /* Preserve original r28. */
- regs->gr[28] = regs->orig_r28;
-
return;
}
case -ERESTARTNOHAND:
@@ -551,9 +523,6 @@ insert_restart_trampoline(struct pt_regs *regs)
* slot of the branch external instruction.
*/
regs->gr[31] -= 8;
- /* Preserve original r28. */
- regs->gr[28] = regs->orig_r28;
-
return;
}
default:
@@ -578,51 +547,21 @@ do_signal(struct pt_regs *regs, long in_syscall)
siginfo_t info;
struct k_sigaction ka;
int signr;
- sigset_t *oldset;
-
- DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n",
- oldset, regs, regs->sr[7], in_syscall);
- /* Everyone else checks to see if they are in kernel mode at
- this point and exits if that's the case. I'm not sure why
- we would be called in that case, but for some reason we
- are. */
+ DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n",
+ regs, regs->sr[7], in_syscall);
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- oldset = &current->saved_sigmask;
- else
- oldset = &current->blocked;
-
- DBG(1,"do_signal: oldset %08lx / %08lx\n",
- oldset->sig[0], oldset->sig[1]);
-
-
- /* May need to force signal if handle_signal failed to deliver */
- while (1) {
-
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]);
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]);
- if (signr <= 0)
- break;
-
+ if (signr > 0) {
/* Restart a system call if necessary. */
if (in_syscall)
syscall_restart(regs, &ka);
- /* Whee! Actually deliver the signal. If the
- delivery failed, we need to continue to iterate in
- this loop so we can deliver the SIGSEGV... */
- if (handle_signal(signr, &info, &ka, oldset,
- regs, in_syscall)) {
- DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
- regs->gr[28]);
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- return;
- }
+ handle_signal(signr, &info, &ka, regs, in_syscall);
+ return;
}
- /* end of while(1) looping forever if we can't force a signal */
/* Did we come from a system call? */
if (in_syscall)
@@ -631,17 +570,16 @@ do_signal(struct pt_regs *regs, long in_syscall)
DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n",
regs->gr[28]);
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
- }
-
- return;
+ restore_saved_sigmask();
}
void do_notify_resume(struct pt_regs *regs, long in_syscall)
{
- if (test_thread_flag(TIF_SIGPENDING) ||
- test_thread_flag(TIF_RESTORE_SIGMASK))
+ if (test_thread_flag(TIF_SIGPENDING))
do_signal(regs, in_syscall);
+
+ if (test_thread_flag(TIF_NOTIFY_RESUME)) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
}
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index db94affe5c7..984abbee71c 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -23,7 +23,6 @@
*/
#include <linux/compat.h>
-#include <linux/slab.h>
#include <linux/module.h>
#include <linux/unistd.h>
#include <linux/init.h>
@@ -35,7 +34,6 @@
#include <asm/uaccess.h>
#include "signal32.h"
-#include "sys32.h"
#define DEBUG_COMPAT_SIG 0
#define DEBUG_COMPAT_SIG_LEVEL 2
@@ -48,8 +46,6 @@
#define DBG(LEVEL, ...)
#endif
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
inline void
sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
{
@@ -63,127 +59,6 @@ sigset_64to32(compat_sigset_t *s32, sigset_t *s64)
s32->sig[1] = (s64->sig[0] >> 32) & 0xffffffffUL;
}
-static int
-put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
-{
- compat_sigset_t s;
-
- if (sz != sizeof *set) panic("put_sigset32()");
- sigset_64to32(&s, set);
-
- return copy_to_user(up, &s, sizeof s);
-}
-
-static int
-get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
-{
- compat_sigset_t s;
- int r;
-
- if (sz != sizeof *set) panic("put_sigset32()");
-
- if ((r = copy_from_user(&s, up, sz)) == 0) {
- sigset_32to64(set, &s);
- }
-
- return r;
-}
-
-int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_sigset_t __user *oset,
- unsigned int sigsetsize)
-{
- sigset_t old_set, new_set;
- int ret;
-
- if (set && get_sigset32(set, &new_set, sigsetsize))
- return -EFAULT;
-
- KERNEL_SYSCALL(ret, sys_rt_sigprocmask, how, set ? (sigset_t __user *)&new_set : NULL,
- oset ? (sigset_t __user *)&old_set : NULL, sigsetsize);
-
- if (!ret && oset && put_sigset32(oset, &old_set, sigsetsize))
- return -EFAULT;
-
- return ret;
-}
-
-
-int sys32_rt_sigpending(compat_sigset_t __user *uset, unsigned int sigsetsize)
-{
- int ret;
- sigset_t set;
-
- KERNEL_SYSCALL(ret, sys_rt_sigpending, (sigset_t __user *)&set, sigsetsize);
-
- if (!ret && put_sigset32(uset, &set, sigsetsize))
- return -EFAULT;
-
- return ret;
-}
-
-long
-sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, struct sigaction32 __user *oact,
- size_t sigsetsize)
-{
- struct k_sigaction32 new_sa32, old_sa32;
- struct k_sigaction new_sa, old_sa;
- int ret = -EINVAL;
-
- if (act) {
- if (copy_from_user(&new_sa32.sa, act, sizeof new_sa32.sa))
- return -EFAULT;
- new_sa.sa.sa_handler = (__sighandler_t)(unsigned long)new_sa32.sa.sa_handler;
- new_sa.sa.sa_flags = new_sa32.sa.sa_flags;
- sigset_32to64(&new_sa.sa.sa_mask, &new_sa32.sa.sa_mask);
- }
-
- ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
-
- if (!ret && oact) {
- sigset_64to32(&old_sa32.sa.sa_mask, &old_sa.sa.sa_mask);
- old_sa32.sa.sa_flags = old_sa.sa.sa_flags;
- old_sa32.sa.sa_handler = (__sighandler_t32)(unsigned long)old_sa.sa.sa_handler;
- if (copy_to_user(oact, &old_sa32.sa, sizeof old_sa32.sa))
- return -EFAULT;
- }
- return ret;
-}
-
-int
-do_sigaltstack32 (const compat_stack_t __user *uss32, compat_stack_t __user *uoss32, unsigned long sp)
-{
- compat_stack_t ss32, oss32;
- stack_t ss, oss;
- stack_t *ssp = NULL, *ossp = NULL;
- int ret;
-
- if (uss32) {
- if (copy_from_user(&ss32, uss32, sizeof ss32))
- return -EFAULT;
-
- ss.ss_sp = (void __user *)(unsigned long)ss32.ss_sp;
- ss.ss_flags = ss32.ss_flags;
- ss.ss_size = ss32.ss_size;
-
- ssp = &ss;
- }
-
- if (uoss32)
- ossp = &oss;
-
- KERNEL_SYSCALL(ret, do_sigaltstack, (const stack_t __user *)ssp, (stack_t __user *)ossp, sp);
-
- if (!ret && uoss32) {
- oss32.ss_sp = (unsigned int)(unsigned long)oss.ss_sp;
- oss32.ss_flags = oss.ss_flags;
- oss32.ss_size = oss.ss_size;
- if (copy_to_user(uoss32, &oss32, sizeof *uoss32))
- return -EFAULT;
- }
-
- return ret;
-}
-
long
restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
struct pt_regs *regs)
@@ -289,7 +164,7 @@ setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __
&sc->sc_iaoq[0], compat_reg);
/* Store upper half */
- compat_reg = (compat_uint_t)(regs->gr[32] >> 32);
+ compat_reg = (compat_uint_t)(regs->gr[31] >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
@@ -299,7 +174,7 @@ setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __
DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
&sc->sc_iaoq[1], compat_reg);
/* Store upper half */
- compat_reg = (compat_uint_t)((regs->gr[32]+4) >> 32);
+ compat_reg = (compat_uint_t)((regs->gr[31]+4) >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
@@ -444,7 +319,7 @@ copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
}
int
-copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
+copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
{
compat_uptr_t addr;
compat_int_t val;
@@ -500,22 +375,3 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
}
return err;
}
-
-asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig,
- struct compat_siginfo __user *uinfo)
-{
- siginfo_t info;
-
- if (copy_siginfo_from_user32(&info, uinfo))
- return -EFAULT;
-
- /* Not even root can pretend to send signals from the kernel.
- Nor can they impersonate a kill(), which adds source info. */
- if (info.si_code >= 0)
- return -EPERM;
- info.si_signo = sig;
-
- /* POSIX.1b doesn't mention process groups. */
- return kill_proc_info(sig, &info, pid);
-}
-
diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h
index c7800846422..af51d4ccee4 100644
--- a/arch/parisc/kernel/signal32.h
+++ b/arch/parisc/kernel/signal32.h
@@ -21,23 +21,6 @@
#include <linux/compat.h>
-typedef compat_uptr_t compat_sighandler_t;
-
-typedef struct compat_sigaltstack {
- compat_uptr_t ss_sp;
- compat_int_t ss_flags;
- compat_size_t ss_size;
-} compat_stack_t;
-
-/* Most things should be clean enough to redefine this at will, if care
- is taken to make libc match. */
-
-struct compat_sigaction {
- compat_sighandler_t sa_handler;
- compat_uint_t sa_flags;
- compat_sigset_t sa_mask; /* mask last for extensibility */
-};
-
/* 32-bit ucontext as seen from an 64-bit kernel */
struct compat_ucontext {
compat_uint_t uc_flags;
@@ -51,63 +34,7 @@ struct compat_ucontext {
/* ELF32 signal handling */
-struct k_sigaction32 {
- struct compat_sigaction sa;
-};
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[((128/sizeof(int)) - 3)];
-
- /* kill() */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- char _pad[sizeof(unsigned int) - sizeof(int)];
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
-int copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from);
+int copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from);
int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from);
/* In a deft move of uber-hackery, we decide to carry the top half of all
@@ -154,8 +81,6 @@ struct compat_rt_sigframe {
void sigset_32to64(sigset_t *s64, compat_sigset_t *s32);
void sigset_64to32(compat_sigset_t *s32, sigset_t *s64);
-int do_sigaltstack32 (const compat_stack_t __user *uss32,
- compat_stack_t __user *uoss32, unsigned long sp);
long restore_sigcontext32(struct compat_sigcontext __user *sc,
struct compat_regfile __user *rf,
struct pt_regs *regs);
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 85fc7754ec2..ceda229ea6c 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -18,7 +18,6 @@
*/
#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -31,9 +30,10 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/bitops.h>
+#include <linux/ftrace.h>
+#include <linux/cpu.h>
-#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/current.h>
#include <asm/delay.h>
#include <asm/tlbflush.h>
@@ -56,42 +56,17 @@ static int smp_debug_lvl = 0;
if (lvl >= smp_debug_lvl) \
printk(printargs);
#else
-#define smp_debug(lvl, ...)
+#define smp_debug(lvl, ...) do { } while(0)
#endif /* DEBUG_SMP */
-DEFINE_SPINLOCK(smp_lock);
-
volatile struct task_struct *smp_init_current_idle_task;
-static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */
-
-static int parisc_max_cpus __read_mostly = 1;
-
-/* online cpus are ones that we've managed to bring up completely
- * possible cpus are all valid cpu
- * present cpus are all detected cpu
- *
- * On startup we bring up the "possible" cpus. Since we discover
- * CPUs later, we add them as hotplug, so the possible cpu mask is
- * empty in the beginning.
- */
+/* track which CPU is booting */
+static volatile int cpu_now_booting;
-cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */
-cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */
+static int parisc_max_cpus = 1;
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
-
-DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
-
-struct smp_call_struct {
- void (*func) (void *info);
- void *info;
- long wait;
- atomic_t unstarted_count;
- atomic_t unfinished_count;
-};
-static volatile struct smp_call_struct *smp_call_function_data;
+static DEFINE_PER_CPU(spinlock_t, ipi_lock);
enum ipi_message_type {
IPI_NOP=0,
@@ -135,26 +110,21 @@ halt_processor(void)
{
/* REVISIT : redirect I/O Interrupts to another CPU? */
/* REVISIT : does PM *know* this CPU isn't available? */
- cpu_clear(smp_processor_id(), cpu_online_map);
+ set_cpu_online(smp_processor_id(), false);
local_irq_disable();
for (;;)
;
}
-irqreturn_t
+irqreturn_t __irq_entry
ipi_interrupt(int irq, void *dev_id)
{
int this_cpu = smp_processor_id();
- struct cpuinfo_parisc *p = &cpu_data[this_cpu];
+ struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
unsigned long ops;
unsigned long flags;
- /* Count this now; we may make a call that never returns. */
- p->ipi_count++;
-
- mb(); /* Order interrupt and bit testing. */
-
for (;;) {
spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
spin_lock_irqsave(lock, flags);
@@ -179,41 +149,13 @@ ipi_interrupt(int irq, void *dev_id)
case IPI_RESCHEDULE:
smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
- /*
- * Reschedule callback. Everything to be
- * done is done by the interrupt return path.
- */
+ inc_irq_stat(irq_resched_count);
+ scheduler_ipi();
break;
case IPI_CALL_FUNC:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
- {
- volatile struct smp_call_struct *data;
- void (*func)(void *info);
- void *info;
- int wait;
-
- data = smp_call_function_data;
- func = data->func;
- info = data->info;
- wait = data->wait;
-
- mb();
- atomic_dec ((atomic_t *)&data->unstarted_count);
-
- /* At this point, *data can't
- * be relied upon.
- */
-
- (*func)(info);
-
- /* Notify the sending CPU that the
- * task is done.
- */
- mb();
- if (wait)
- atomic_dec ((atomic_t *)&data->unfinished_count);
- }
+ generic_smp_call_function_interrupt();
break;
case IPI_CPU_START:
@@ -246,24 +188,29 @@ ipi_interrupt(int irq, void *dev_id)
static inline void
ipi_send(int cpu, enum ipi_message_type op)
{
- struct cpuinfo_parisc *p = &cpu_data[cpu];
+ struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
spinlock_t *lock = &per_cpu(ipi_lock, cpu);
unsigned long flags;
spin_lock_irqsave(lock, flags);
p->pending_ipi |= 1 << op;
- gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
+ gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
spin_unlock_irqrestore(lock, flags);
}
+static void
+send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mask)
+ ipi_send(cpu, op);
+}
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
{
- if (dest_cpu == NO_PROC_ID) {
- BUG();
- return;
- }
+ BUG_ON(dest_cpu == NO_PROC_ID);
ipi_send(dest_cpu, op);
}
@@ -295,96 +242,14 @@ smp_send_all_nop(void)
send_IPI_allbutself(IPI_NOP);
}
-
-/**
- * Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or have executed.
- */
-
-int
-smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- struct smp_call_struct data;
- unsigned long timeout;
- static DEFINE_SPINLOCK(lock);
- int retries = 0;
-
- if (num_online_cpus() < 2)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- /* can also deadlock if IPIs are disabled */
- WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
-
-
- data.func = func;
- data.info = info;
- data.wait = wait;
- atomic_set(&data.unstarted_count, num_online_cpus() - 1);
- atomic_set(&data.unfinished_count, num_online_cpus() - 1);
-
- if (retry) {
- spin_lock (&lock);
- while (smp_call_function_data != 0)
- barrier();
- }
- else {
- spin_lock (&lock);
- if (smp_call_function_data) {
- spin_unlock (&lock);
- return -EBUSY;
- }
- }
-
- smp_call_function_data = &data;
- spin_unlock (&lock);
-
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(IPI_CALL_FUNC);
-
- retry:
- /* Wait for response */
- timeout = jiffies + HZ;
- while ( (atomic_read (&data.unstarted_count) > 0) &&
- time_before (jiffies, timeout) )
- barrier ();
-
- if (atomic_read (&data.unstarted_count) > 0) {
- printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
- smp_processor_id(), ++retries);
- goto retry;
- }
- /* We either got one or timed out. Release the lock */
-
- mb();
- smp_call_function_data = NULL;
-
- while (wait && atomic_read (&data.unfinished_count) > 0)
- barrier ();
-
- return 0;
+ send_IPI_mask(mask, IPI_CALL_FUNC);
}
-EXPORT_SYMBOL(smp_call_function);
-
-/*
- * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
- * as we want to ensure all TLB's flushed before proceeding.
- */
-
-void
-smp_flush_tlb_all(void)
+void arch_send_call_function_single_ipi(int cpu)
{
- on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
+ send_IPI_single(cpu, IPI_CALL_FUNC);
}
/*
@@ -405,19 +270,21 @@ smp_cpu_init(int cpunum)
mb();
/* Well, support 2.4 linux scheme as well. */
- if (cpu_test_and_set(cpunum, cpu_online_map))
- {
+ if (cpu_online(cpunum)) {
extern void machine_halt(void); /* arch/parisc.../process.c */
printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
machine_halt();
- }
+ }
+
+ notify_cpu_starting(cpunum);
+
+ set_cpu_online(cpunum, true);
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
- if(current->mm)
- BUG();
+ BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQs are enabled or pending */
@@ -441,7 +308,7 @@ void __init smp_callin(void)
local_irq_enable(); /* Interrupts have been off until now */
- cpu_idle(); /* Wait for timer to schedule some work */
+ cpu_startup_entry(CPUHP_ONLINE);
/* NOTREACHED */
panic("smp_callin() AAAAaaaaahhhh....\n");
@@ -450,25 +317,11 @@ void __init smp_callin(void)
/*
* Bring one cpu online.
*/
-int __cpuinit smp_boot_one_cpu(int cpuid)
+int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
- struct task_struct *idle;
+ const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
long timeout;
- /*
- * Create an idle task for this CPU. Note the address wed* give
- * to kernel_thread is irrelevant -- it's going to start
- * where OS_BOOT_RENDEVZ vector in SAL says to start. But
- * this gets all the other task-y sort of data structures set
- * up like we wish. We need to pull the just created idle task
- * off the run queue and stuff it into the init_tasks[] array.
- * Sheesh . . .
- */
-
- idle = fork_idle(cpuid);
- if (IS_ERR(idle))
- panic("SMP: fork failed for CPU:%d", cpuid);
-
task_thread_info(idle)->cpu = cpuid;
/* Let _start know what logical CPU we're booting
@@ -483,7 +336,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
smp_init_current_idle_task = idle ;
mb();
- printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
+ printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
/*
** This gets PDC to release the CPU from a very tight loop.
@@ -494,7 +347,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
** contents of memory are valid."
*/
- gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa);
+ gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
mb();
/*
@@ -512,10 +365,6 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
udelay(100);
barrier();
}
-
- put_task_struct(idle);
- idle = NULL;
-
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
return -1;
@@ -526,15 +375,15 @@ alive:
return 0;
}
-void __devinit smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
- int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */
+ int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
/* Setup BSP mappings */
- printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
+ printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
- cpu_set(bootstrap_processor, cpu_online_map);
- cpu_set(bootstrap_processor, cpu_present_map);
+ set_cpu_online(bootstrap_processor, true);
+ set_cpu_present(bootstrap_processor, true);
}
@@ -545,8 +394,12 @@ void __devinit smp_prepare_boot_cpu(void)
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- cpus_clear(cpu_present_map);
- cpu_set(0, cpu_present_map);
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(ipi_lock, cpu));
+
+ init_cpu_present(cpumask_of(0));
parisc_max_cpus = max_cpus;
if (!max_cpus)
@@ -560,10 +413,10 @@ void smp_cpus_done(unsigned int cpu_max)
}
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
if (cpu != 0 && cpu < parisc_max_cpus)
- smp_boot_one_cpu(cpu);
+ smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}
diff --git a/arch/parisc/kernel/stacktrace.c b/arch/parisc/kernel/stacktrace.c
new file mode 100644
index 00000000000..2fe914c5f53
--- /dev/null
+++ b/arch/parisc/kernel/stacktrace.c
@@ -0,0 +1,63 @@
+/*
+ * Stack trace management functions
+ *
+ * Copyright (C) 2009 Helge Deller <deller@gmx.de>
+ * based on arch/x86/kernel/stacktrace.c by Ingo Molnar <mingo@redhat.com>
+ * and parisc unwind functions by Randolph Chung <tausq@debian.org>
+ *
+ * TODO: Userspace stacktrace (CONFIG_USER_STACKTRACE_SUPPORT)
+ */
+#include <linux/module.h>
+#include <linux/stacktrace.h>
+
+#include <asm/unwind.h>
+
+static void dump_trace(struct task_struct *task, struct stack_trace *trace)
+{
+ struct unwind_frame_info info;
+
+ /* initialize unwind info */
+ if (task == current) {
+ unsigned long sp;
+ struct pt_regs r;
+HERE:
+ asm volatile ("copy %%r30, %0" : "=r"(sp));
+ memset(&r, 0, sizeof(struct pt_regs));
+ r.iaoq[0] = (unsigned long)&&HERE;
+ r.gr[2] = (unsigned long)__builtin_return_address(0);
+ r.gr[30] = sp;
+ unwind_frame_init(&info, task, &r);
+ } else {
+ unwind_frame_init_from_blocked_task(&info, task);
+ }
+
+ /* unwind stack and save entries in stack_trace struct */
+ trace->nr_entries = 0;
+ while (trace->nr_entries < trace->max_entries) {
+ if (unwind_once(&info) < 0 || info.ip == 0)
+ break;
+
+ if (__kernel_text_address(info.ip))
+ trace->entries[trace->nr_entries++] = info.ip;
+ }
+}
+
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+ dump_trace(current, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ dump_trace(tsk, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/parisc/kernel/sys32.h b/arch/parisc/kernel/sys32.h
deleted file mode 100644
index 06c2090cfab..00000000000
--- a/arch/parisc/kernel/sys32.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org>
- * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
- * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _PARISC64_KERNEL_SYS32_H
-#define _PARISC64_KERNEL_SYS32_H
-
-#include <linux/compat.h>
-
-/* Call a kernel syscall which will use kernel space instead of user
- * space for its copy_to/from_user.
- */
-#define KERNEL_SYSCALL(ret, syscall, args...) \
-{ \
- mm_segment_t old_fs = get_fs(); \
- set_fs(KERNEL_DS); \
- ret = syscall(args); \
- set_fs (old_fs); \
-}
-
-#ifdef CONFIG_COMPAT
-
-typedef __u32 __sighandler_t32;
-
-struct sigaction32 {
- __sighandler_t32 sa_handler;
- unsigned int sa_flags;
- compat_sigset_t sa_mask; /* mask last for extensibility */
-};
-
-#endif
-
-#endif
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 4f589216b39..e1ffea2f9a0 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -5,6 +5,7 @@
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ * Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -23,6 +24,7 @@
*/
#include <asm/uaccess.h>
+#include <asm/elf.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/linkage.h>
@@ -32,128 +34,238 @@
#include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/personality.h>
+#include <linux/random.h>
-int sys_pipe(int __user *fildes)
-{
- int fd[2];
- int error;
+/* we construct an artificial offset for the mapping based on the physical
+ * address of the kernel mapping variable */
+#define GET_LAST_MMAP(filp) \
+ (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
+#define SET_LAST_MMAP(filp, val) \
+ { /* nothing */ }
- error = do_pipe(fd);
- if (!error) {
- if (copy_to_user(fildes, fd, 2*sizeof(int)))
- error = -EFAULT;
- }
- return error;
+static int get_offset(unsigned int last_mmap)
+{
+ return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
}
-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+static unsigned long shared_align_offset(unsigned int last_mmap,
+ unsigned long pgoff)
{
- struct vm_area_struct *vma;
+ return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
+}
- addr = PAGE_ALIGN(addr);
+static inline unsigned long COLOR_ALIGN(unsigned long addr,
+ unsigned int last_mmap, unsigned long pgoff)
+{
+ unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
+ unsigned long off = (SHM_COLOUR-1) &
+ (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
- for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
- if (!vma || addr + len <= vma->vm_start)
- return addr;
- addr = vma->vm_end;
- }
+ return base + off;
}
-#define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
-
/*
- * We need to know the offset to use. Old scheme was to look for
- * existing mapping and use the same offset. New scheme is to use the
- * address of the kernel data structure as the seed for the offset.
- * We'll see how that works...
- *
- * The mapping is cacheline aligned, so there's no information in the bottom
- * few bits of the address. We're looking for 10 bits (4MB / 4k), so let's
- * drop the bottom 8 bits and use bits 8-17.
+ * Top of mmap area (just below the process stack).
*/
-static int get_offset(struct address_space *mapping)
+
+static unsigned long mmap_upper_limit(void)
{
- int offset = (unsigned long) mapping << (PAGE_SHIFT - 8);
- return offset & 0x3FF000;
+ unsigned long stack_base;
+
+ /* Limit stack size - see setup_arg_pages() in fs/exec.c */
+ stack_base = rlimit_max(RLIMIT_STACK);
+ if (stack_base > STACK_SIZE_MAX)
+ stack_base = STACK_SIZE_MAX;
+
+ return PAGE_ALIGN(STACK_TOP - stack_base);
}
-static unsigned long get_shared_area(struct address_space *mapping,
- unsigned long addr, unsigned long len, unsigned long pgoff)
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
{
+ struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- int offset = mapping ? get_offset(mapping) : 0;
-
- addr = DCACHE_ALIGN(addr - offset) + offset;
-
- for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
- if (!vma || addr + len <= vma->vm_start)
- return addr;
- addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
- if (addr < vma->vm_end) /* handle wraparound */
- return -ENOMEM;
+ unsigned long task_size = TASK_SIZE;
+ int do_color_align, last_mmap;
+ struct vm_unmapped_area_info info;
+
+ if (len > task_size)
+ return -ENOMEM;
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+ last_mmap = GET_LAST_MMAP(filp);
+
+ if (flags & MAP_FIXED) {
+ if ((flags & MAP_SHARED) && last_mmap &&
+ (addr - shared_align_offset(last_mmap, pgoff))
+ & (SHM_COLOUR - 1))
+ return -EINVAL;
+ goto found_addr;
}
+
+ if (addr) {
+ if (do_color_align && last_mmap)
+ addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (task_size - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ goto found_addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = mm->mmap_legacy_base;
+ info.high_limit = mmap_upper_limit();
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+ info.align_offset = shared_align_offset(last_mmap, pgoff);
+ addr = vm_unmapped_area(&info);
+
+found_addr:
+ if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
+ SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
+
+ return addr;
}
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_color_align, last_mmap;
+ struct vm_unmapped_area_info info;
+
+#ifdef CONFIG_64BIT
+ /* This should only ever run for 32-bit processes. */
+ BUG_ON(!test_thread_flag(TIF_32BIT));
+#endif
+
+ /* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
- /* Might want to check for cache aliasing issues for MAP_FIXED case
- * like ARM or MIPS ??? --BenH.
- */
- if (flags & MAP_FIXED)
- return addr;
- if (!addr)
- addr = TASK_UNMAPPED_BASE;
-
- if (filp) {
- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
- } else if(flags & MAP_SHARED) {
- addr = get_shared_area(NULL, addr, len, pgoff);
- } else {
- addr = get_unshared_area(addr, len);
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+ last_mmap = GET_LAST_MMAP(filp);
+
+ if (flags & MAP_FIXED) {
+ if ((flags & MAP_SHARED) && last_mmap &&
+ (addr - shared_align_offset(last_mmap, pgoff))
+ & (SHM_COLOUR - 1))
+ return -EINVAL;
+ goto found_addr;
}
+
+ /* requesting a specific address */
+ if (addr) {
+ if (do_color_align && last_mmap)
+ addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ goto found_addr;
+ }
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+ info.low_limit = PAGE_SIZE;
+ info.high_limit = mm->mmap_base;
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+ info.align_offset = shared_align_offset(last_mmap, pgoff);
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ goto found_addr;
+ VM_BUG_ON(addr != -ENOMEM);
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+
+found_addr:
+ if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
+ SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
+
return addr;
}
-static unsigned long do_mmap2(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags, unsigned long fd,
- unsigned long pgoff)
+static int mmap_is_legacy(void)
{
- struct file * file = NULL;
- unsigned long error = -EBADF;
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
+ if (current->personality & ADDR_COMPAT_LAYOUT)
+ return 1;
+
+ /* parisc stack always grows up - so a unlimited stack should
+ * not be an indicator to use the legacy memory layout.
+ * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+ * return 1;
+ */
+
+ return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_rnd(void)
+{
+ unsigned long rnd = 0;
+
+ /*
+ * 8 bits of randomness in 32bit mmaps, 20 address space bits
+ * 28 bits of randomness in 64bit mmaps, 40 address space bits
+ */
+ if (current->flags & PF_RANDOMIZE) {
+ if (is_32bit_task())
+ rnd = get_random_int() % (1<<8);
+ else
+ rnd = get_random_int() % (1<<28);
}
+ return rnd << PAGE_SHIFT;
+}
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+static unsigned long mmap_legacy_base(void)
+{
+ return TASK_UNMAPPED_BASE + mmap_rnd();
+}
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
+/*
+ * This function, called very early during the creation of a new
+ * process VM image, sets up which VM layout function to use:
+ */
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+ mm->mmap_legacy_base = mmap_legacy_base();
+ mm->mmap_base = mmap_upper_limit();
- if (file != NULL)
- fput(file);
-out:
- return error;
+ if (mmap_is_legacy()) {
+ mm->mmap_base = mm->mmap_legacy_base;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
}
+
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long pgoff)
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
- return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
}
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
@@ -161,7 +273,8 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long offset)
{
if (!(offset & ~PAGE_MASK)) {
- return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ offset >> PAGE_SHIFT);
} else {
return -EINVAL;
}
@@ -245,6 +358,13 @@ asmlinkage long parisc_sync_file_range(int fd,
(loff_t)hi_nbytes << 32 | lo_nbytes, flags);
}
+asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+ u32 lenhi, u32 lenlo)
+{
+ return sys_fallocate(fd, mode, ((u64)offhi << 32) | offlo,
+ ((u64)lenhi << 32) | lenlo);
+}
+
asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
{
return -ENOMEM;
@@ -260,27 +380,12 @@ long parisc_personality(unsigned long personality)
long err;
if (personality(current->personality) == PER_LINUX32
- && personality == PER_LINUX)
- personality = PER_LINUX32;
+ && personality(personality) == PER_LINUX)
+ personality = (personality & ~PER_MASK) | PER_LINUX32;
err = sys_personality(personality);
- if (err == PER_LINUX32)
- err = PER_LINUX;
-
- return err;
-}
-
-long parisc_newuname(struct new_utsname __user *name)
-{
- int err = sys_newuname(name);
-
-#ifdef CONFIG_COMPAT
- if (!err && personality(current->personality) == PER_LINUX32) {
- if (__put_user(0, name->machine + 6) ||
- __put_user(0, name->machine + 7))
- err = -EFAULT;
- }
-#endif
+ if (personality(err) == PER_LINUX32)
+ err = (err & ~PER_MASK) | PER_LINUX;
return err;
}
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 50bbf33ee00..93c1963d76f 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -4,6 +4,7 @@
* Copyright (C) 2000-2001 Hewlett Packard Company
* Copyright (C) 2000 John Marvin
* Copyright (C) 2001 Matthew Wilcox
+ * Copyright (C) 2014 Helge Deller <deller@gmx.de>
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment. Based heavily on sys_ia32.c and sys_sparc32.c.
@@ -11,83 +12,8 @@
#include <linux/compat.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/file.h>
-#include <linux/signal.h>
-#include <linux/resource.h>
-#include <linux/times.h>
-#include <linux/utsname.h>
-#include <linux/time.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include <linux/nfs_fs.h>
-#include <linux/ncp_fs.h>
-#include <linux/sunrpc/svc.h>
-#include <linux/nfsd/nfsd.h>
-#include <linux/nfsd/cache.h>
-#include <linux/nfsd/xdr.h>
-#include <linux/nfsd/syscall.h>
-#include <linux/poll.h>
-#include <linux/personality.h>
-#include <linux/stat.h>
-#include <linux/highmem.h>
-#include <linux/highuid.h>
-#include <linux/mman.h>
-#include <linux/binfmts.h>
-#include <linux/namei.h>
-#include <linux/vfs.h>
-#include <linux/ptrace.h>
-#include <linux/swap.h>
#include <linux/syscalls.h>
-#include <asm/types.h>
-#include <asm/uaccess.h>
-#include <asm/semaphore.h>
-#include <asm/mmu_context.h>
-
-#include "sys32.h"
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(x) printk x
-#else
-#define DBG(x)
-#endif
-
-/*
- * sys32_execve() executes a new program.
- */
-
-asmlinkage int sys32_execve(struct pt_regs *regs)
-{
- int error;
- char *filename;
-
- DBG(("sys32_execve(%p) r26 = 0x%lx\n", regs, regs->gr[26]));
- filename = getname((const char __user *) regs->gr[26]);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = compat_do_execve(filename, compat_ptr(regs->gr[25]),
- compat_ptr(regs->gr[24]), regs);
- if (error == 0) {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
- }
- putname(filename);
-out:
-
- return error;
-}
asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
int r22, int r21, int r20)
@@ -97,386 +23,11 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
return -ENOSYS;
}
-#ifdef CONFIG_SYSCTL
-
-struct __sysctl_args32 {
- u32 name;
- int nlen;
- u32 oldval;
- u32 oldlenp;
- u32 newval;
- u32 newlen;
- u32 __unused[4];
-};
-
-asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
-{
-#ifndef CONFIG_SYSCTL_SYSCALL
- return -ENOSYS;
-#else
- struct __sysctl_args32 tmp;
- int error;
- unsigned int oldlen32;
- size_t oldlen, __user *oldlenp = NULL;
- unsigned long addr = (((long __force)&args->__unused[0]) + 7) & ~7;
-
- DBG(("sysctl32(%p)\n", args));
-
- if (copy_from_user(&tmp, args, sizeof(tmp)))
- return -EFAULT;
-
- if (tmp.oldval && tmp.oldlenp) {
- /* Duh, this is ugly and might not work if sysctl_args
- is in read-only memory, but do_sysctl does indirectly
- a lot of uaccess in both directions and we'd have to
- basically copy the whole sysctl.c here, and
- glibc's __sysctl uses rw memory for the structure
- anyway. */
- /* a possibly better hack than this, which will avoid the
- * problem if the struct is read only, is to push the
- * 'oldlen' value out to the user's stack instead. -PB
- */
- if (get_user(oldlen32, (u32 *)(u64)tmp.oldlenp))
- return -EFAULT;
- oldlen = oldlen32;
- if (put_user(oldlen, (size_t *)addr))
- return -EFAULT;
- oldlenp = (size_t *)addr;
- }
-
- lock_kernel();
- error = do_sysctl((int __user *)(u64)tmp.name, tmp.nlen,
- (void __user *)(u64)tmp.oldval, oldlenp,
- (void __user *)(u64)tmp.newval, tmp.newlen);
- unlock_kernel();
- if (oldlenp) {
- if (!error) {
- if (get_user(oldlen, (size_t *)addr)) {
- error = -EFAULT;
- } else {
- oldlen32 = oldlen;
- if (put_user(oldlen32, (u32 *)(u64)tmp.oldlenp))
- error = -EFAULT;
- }
- }
- if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
- error = -EFAULT;
- }
- return error;
-#endif
-}
-
-#endif /* CONFIG_SYSCTL */
-
-asmlinkage long sys32_sched_rr_get_interval(pid_t pid,
- struct compat_timespec __user *interval)
-{
- struct timespec t;
- int ret;
-
- KERNEL_SYSCALL(ret, sys_sched_rr_get_interval, pid, (struct timespec __user *)&t);
- if (put_compat_timespec(&t, interval))
- return -EFAULT;
- return ret;
-}
-
-static int
-put_compat_timeval(struct compat_timeval __user *u, struct timeval *t)
-{
- struct compat_timeval t32;
- t32.tv_sec = t->tv_sec;
- t32.tv_usec = t->tv_usec;
- return copy_to_user(u, &t32, sizeof t32);
-}
-
-static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
-{
- long usec;
-
- if (__get_user(o->tv_sec, &i->tv_sec))
- return -EFAULT;
- if (__get_user(usec, &i->tv_usec))
- return -EFAULT;
- o->tv_nsec = usec * 1000;
- return 0;
-}
-
-asmlinkage int
-sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
-{
- extern void do_gettimeofday(struct timeval *tv);
-
- if (tv) {
- struct timeval ktv;
- do_gettimeofday(&ktv);
- if (put_compat_timeval(tv, &ktv))
- return -EFAULT;
- }
- if (tz) {
- extern struct timezone sys_tz;
- if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
- return -EFAULT;
- }
- return 0;
-}
-
-asmlinkage
-int sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
-{
- struct timespec kts;
- struct timezone ktz;
-
- if (tv) {
- if (get_ts32(&kts, tv))
- return -EFAULT;
- }
- if (tz) {
- if (copy_from_user(&ktz, tz, sizeof(ktz)))
- return -EFAULT;
- }
-
- return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
-}
-
-int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
-{
- compat_ino_t ino;
- int err;
-
- if (stat->size > MAX_NON_LFS || !new_valid_dev(stat->dev) ||
- !new_valid_dev(stat->rdev))
- return -EOVERFLOW;
-
- ino = stat->ino;
- if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino)
- return -EOVERFLOW;
-
- err = put_user(new_encode_dev(stat->dev), &statbuf->st_dev);
- err |= put_user(ino, &statbuf->st_ino);
- err |= put_user(stat->mode, &statbuf->st_mode);
- err |= put_user(stat->nlink, &statbuf->st_nlink);
- err |= put_user(0, &statbuf->st_reserved1);
- err |= put_user(0, &statbuf->st_reserved2);
- err |= put_user(new_encode_dev(stat->rdev), &statbuf->st_rdev);
- err |= put_user(stat->size, &statbuf->st_size);
- err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
- err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
- err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
- err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
- err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
- err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
- err |= put_user(stat->blksize, &statbuf->st_blksize);
- err |= put_user(stat->blocks, &statbuf->st_blocks);
- err |= put_user(0, &statbuf->__unused1);
- err |= put_user(0, &statbuf->__unused2);
- err |= put_user(0, &statbuf->__unused3);
- err |= put_user(0, &statbuf->__unused4);
- err |= put_user(0, &statbuf->__unused5);
- err |= put_user(0, &statbuf->st_fstype); /* not avail */
- err |= put_user(0, &statbuf->st_realdev); /* not avail */
- err |= put_user(0, &statbuf->st_basemode); /* not avail */
- err |= put_user(0, &statbuf->st_spareshort);
- err |= put_user(stat->uid, &statbuf->st_uid);
- err |= put_user(stat->gid, &statbuf->st_gid);
- err |= put_user(0, &statbuf->st_spare4[0]);
- err |= put_user(0, &statbuf->st_spare4[1]);
- err |= put_user(0, &statbuf->st_spare4[2]);
-
- return err;
-}
-
-/*** copied from mips64 ***/
-/*
- * Ooo, nasty. We need here to frob 32-bit unsigned longs to
- * 64-bit unsigned longs.
- */
-
-static inline int
-get_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
-{
- n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32));
- if (ufdset) {
- unsigned long odd;
-
- if (!access_ok(VERIFY_WRITE, ufdset, n*sizeof(u32)))
- return -EFAULT;
-
- odd = n & 1UL;
- n &= ~1UL;
- while (n) {
- unsigned long h, l;
- __get_user(l, ufdset);
- __get_user(h, ufdset+1);
- ufdset += 2;
- *fdset++ = h << 32 | l;
- n -= 2;
- }
- if (odd)
- __get_user(*fdset, ufdset);
- } else {
- /* Tricky, must clear full unsigned long in the
- * kernel fdset at the end, this makes sure that
- * actually happens.
- */
- memset(fdset, 0, ((n + 1) & ~1)*sizeof(u32));
- }
- return 0;
-}
-
-static inline void
-set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
-{
- unsigned long odd;
- n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32));
-
- if (!ufdset)
- return;
-
- odd = n & 1UL;
- n &= ~1UL;
- while (n) {
- unsigned long h, l;
- l = *fdset++;
- h = l >> 32;
- __put_user(l, ufdset);
- __put_user(h, ufdset+1);
- ufdset += 2;
- n -= 2;
- }
- if (odd)
- __put_user(*fdset, ufdset);
-}
-
-struct msgbuf32 {
- int mtype;
- char mtext[1];
-};
-
-asmlinkage long sys32_msgsnd(int msqid,
- struct msgbuf32 __user *umsgp32,
- size_t msgsz, int msgflg)
-{
- struct msgbuf *mb;
- struct msgbuf32 mb32;
- int err;
-
- if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
- return -ENOMEM;
-
- err = get_user(mb32.mtype, &umsgp32->mtype);
- mb->mtype = mb32.mtype;
- err |= copy_from_user(mb->mtext, &umsgp32->mtext, msgsz);
-
- if (err)
- err = -EFAULT;
- else
- KERNEL_SYSCALL(err, sys_msgsnd, msqid, (struct msgbuf __user *)mb, msgsz, msgflg);
-
- kfree(mb);
- return err;
-}
-
-asmlinkage long sys32_msgrcv(int msqid,
- struct msgbuf32 __user *umsgp32,
- size_t msgsz, long msgtyp, int msgflg)
-{
- struct msgbuf *mb;
- struct msgbuf32 mb32;
- int err, len;
-
- if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
- return -ENOMEM;
-
- KERNEL_SYSCALL(err, sys_msgrcv, msqid, (struct msgbuf __user *)mb, msgsz, msgtyp, msgflg);
-
- if (err >= 0) {
- len = err;
- mb32.mtype = mb->mtype;
- err = put_user(mb32.mtype, &umsgp32->mtype);
- err |= copy_to_user(&umsgp32->mtext, mb->mtext, len);
- if (err)
- err = -EFAULT;
- else
- err = len;
- }
-
- kfree(mb);
- return err;
-}
-
-asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
-{
- mm_segment_t old_fs = get_fs();
- int ret;
- off_t of;
-
- if (offset && get_user(of, offset))
- return -EFAULT;
-
- set_fs(KERNEL_DS);
- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count);
- set_fs(old_fs);
-
- if (offset && put_user(of, offset))
- return -EFAULT;
-
- return ret;
-}
-
-asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
-{
- mm_segment_t old_fs = get_fs();
- int ret;
- loff_t lof;
-
- if (offset && get_user(lof, offset))
- return -EFAULT;
-
- set_fs(KERNEL_DS);
- ret = sys_sendfile64(out_fd, in_fd, offset ? (loff_t __user *)&lof : NULL, count);
- set_fs(old_fs);
-
- if (offset && put_user(lof, offset))
- return -EFAULT;
-
- return ret;
-}
-
-
-/* lseek() needs a wrapper because 'offset' can be negative, but the top
- * half of the argument has been zeroed by syscall.S.
- */
-
-asmlinkage int sys32_lseek(unsigned int fd, int offset, unsigned int origin)
-{
- return sys_lseek(fd, offset, origin);
-}
-
-asmlinkage long sys32_semctl(int semid, int semnum, int cmd, union semun arg)
-{
- union semun u;
-
- if (cmd == SETVAL) {
- /* Ugh. arg is a union of int,ptr,ptr,ptr, so is 8 bytes.
- * The int should be in the first 4, but our argument
- * frobbing has left it in the last 4.
- */
- u.val = *((int *)&arg + 1);
- return sys_semctl (semid, semnum, cmd, u);
- }
- return sys_semctl (semid, semnum, cmd, arg);
-}
-
-long sys32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
- size_t len)
-{
- return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
- buf, len);
-}
-
-asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
- u32 lenhi, u32 lenlo)
+asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
+ compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
+ const char __user * pathname)
{
- return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
- ((loff_t)lenhi << 32) | lenlo);
+ return sys_fanotify_mark(fanotify_fd, flags,
+ ((__u64)mask1 << 32) | mask0,
+ dfd, pathname);
}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 69b6eebc466..83878601103 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -1,12 +1,35 @@
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
- * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
+ * System call entry code / Linux gateway page
+ * Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
* Licensed under the GNU GPL.
* thanks to Philipp Rumpf, Mike Shaver and various others
* sorry about the wall, puffin..
*/
+/*
+How does the Linux gateway page on PA-RISC work?
+------------------------------------------------
+The Linux gateway page on PA-RISC is "special".
+It actually has PAGE_GATEWAY bits set (this is linux terminology; in parisc
+terminology it's Execute, promote to PL0) in the page map. So anything
+executing on this page executes with kernel level privilege (there's more to it
+than that: to have this happen, you also have to use a branch with a ,gate
+completer to activate the privilege promotion). The upshot is that everything
+that runs on the gateway page runs at kernel privilege but with the current
+user process address space (although you have access to kernel space via %sr2).
+For the 0x100 syscall entry, we redo the space registers to point to the kernel
+address space (preserving the user address space in %sr3), move to wide mode if
+required, save the user registers and branch into the kernel syscall entry
+point. For all the other functions, we execute at kernel privilege but don't
+flip address spaces. The basic upshot of this is that these code snippets are
+executed atomically (because the kernel can't be pre-empted) and they may
+perform architecturally forbidden (to PL3) operations (like setting control
+registers).
+*/
+
+
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/errno.h>
@@ -15,6 +38,7 @@
#include <asm/thread_info.h>
#include <asm/assembly.h>
#include <asm/processor.h>
+#include <asm/cache.h>
#include <linux/linkage.h>
@@ -47,18 +71,17 @@ ENTRY(linux_gateway_page)
KILL_INSN
.endr
- /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */
+ /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
/* Light-weight-syscall entry must always be located at 0xb0 */
/* WARNING: Keep this number updated with table size changes */
#define __NR_lws_entries (2)
lws_entry:
- /* Unconditional branch to lws_start, located on the
- same gateway page */
- b,n lws_start
+ gate lws_start, %r0 /* increase privilege */
+ depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
- /* Fill from 0xb4 to 0xe0 */
- .rept 11
+ /* Fill from 0xb8 to 0xe0 */
+ .rept 10
KILL_INSN
.endr
@@ -157,7 +180,7 @@ linux_gateway_entry:
STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
STREG %r27, TASK_PT_GR27(%r1) /* user dp */
STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
- STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */
+ STREG %r0, TASK_PT_ORIG_R28(%r1) /* don't prohibit restarts */
STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */
STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
@@ -181,9 +204,10 @@ linux_gateway_entry:
/* Are we being ptraced? */
mfctl %cr30, %r1
- LDREG TI_TASK(%r1),%r1
- ldw TASK_PTRACE(%r1), %r1
- bb,<,n %r1,31,.Ltracesys
+ LDREG TI_FLAGS(%r1),%r1
+ ldi _TIF_SYSCALL_TRACE_MASK, %r19
+ and,COND(=) %r1, %r19, %r0
+ b,n .Ltracesys
/* Note! We cannot use the syscall table that is mapped
nearby since the gateway page is mapped execute-only. */
@@ -288,26 +312,34 @@ tracesys:
STREG %r18,PT_GR18(%r2)
/* Finished saving things for the debugger */
- ldil L%syscall_trace,%r1
+ copy %r2,%r26
+ ldil L%do_syscall_trace_enter,%r1
ldil L%tracesys_next,%r2
- be R%syscall_trace(%sr7,%r1)
+ be R%do_syscall_trace_enter(%sr7,%r1)
ldo R%tracesys_next(%r2),%r2
-tracesys_next:
+tracesys_next:
+ /* do_syscall_trace_enter either returned the syscallno, or -1L,
+ * so we skip restoring the PT_GR20 below, since we pulled it from
+ * task->thread.regs.gr[20] above.
+ */
+ copy %ret0,%r20
ldil L%sys_call_table,%r1
ldo R%sys_call_table(%r1), %r19
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
- LDREG TASK_PT_GR20(%r1), %r20
LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
LDREG TASK_PT_GR25(%r1), %r25
LDREG TASK_PT_GR24(%r1), %r24
LDREG TASK_PT_GR23(%r1), %r23
-#ifdef CONFIG_64BIT
LDREG TASK_PT_GR22(%r1), %r22
LDREG TASK_PT_GR21(%r1), %r21
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
+#else
+ stw %r22, -52(%r30) /* 5th argument */
+ stw %r21, -56(%r30) /* 6th argument */
#endif
comiclr,>>= __NR_Linux_syscalls, %r20, %r0
@@ -336,7 +368,8 @@ tracesys_exit:
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl syscall_trace, %r2
+ ldo TASK_REGS(%r1),%r26
+ bl do_syscall_trace_exit,%r2
STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
@@ -353,29 +386,63 @@ tracesys_exit:
tracesys_sigexit:
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
- LDREG 0(%r1), %r1
+ LDREG TI_TASK(%r1), %r1
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl syscall_trace, %r2
- nop
+ bl do_syscall_trace_exit,%r2
+ ldo TASK_REGS(%r1),%r26
ldil L%syscall_exit_rfi,%r1
be,n R%syscall_exit_rfi(%sr7,%r1)
/*********************************************************
- Light-weight-syscall code
+ 32/64-bit Light-Weight-Syscall ABI
+
+ * - Indicates a hint for userspace inline asm
+ implementations.
+
+ Syscall number (caller-saves)
+ - %r20
+ * In asm clobber.
+
+ Argument registers (caller-saves)
+ - %r26, %r25, %r24, %r23, %r22
+ * In asm input.
+
+ Return registers (caller-saves)
+ - %r28 (return), %r21 (errno)
+ * In asm output.
- r20 - lws number
- r26,r25,r24,r23,r22 - Input registers
- r28 - Function return register
- r21 - Error code.
+ Caller-saves registers
+ - %r1, %r27, %r29
+ - %r2 (return pointer)
+ - %r31 (ble link register)
+ * In asm clobber.
- Scracth: Any of the above that aren't being
- currently used, including r1.
+ Callee-saves registers
+ - %r3-%r18
+ - %r30 (stack pointer)
+ * Not in asm clobber.
- Return pointer: r31 (Not usable)
+ If userspace is 32-bit:
+ Callee-saves registers
+ - %r19 (32-bit PIC register)
+
+ Differences from 32-bit calling convention:
+ - Syscall number in %r20
+ - Additional argument register %r22 (arg4)
+ - Callee-saves %r19.
+
+ If userspace is 64-bit:
+ Callee-saves registers
+ - %r27 (64-bit PIC register)
+
+ Differences from 64-bit calling convention:
+ - Syscall number in %r20
+ - Additional argument register %r22 (arg4)
+ - Callee-saves %r27.
Error codes returned by entry path:
@@ -383,9 +450,6 @@ tracesys_sigexit:
*********************************************************/
lws_start:
- /* Gate and ensure we return to userspace */
- gate .+8, %r0
- depi 3, 31, 2, %r31 /* Ensure we return to userspace */
#ifdef CONFIG_64BIT
/* FIXME: If we are a 64-bit kernel just
@@ -402,7 +466,7 @@ lws_start:
#endif
/* Is the lws entry number valid? */
- comiclr,>>= __NR_lws_entries, %r20, %r0
+ comiclr,>> __NR_lws_entries, %r20, %r0
b,n lws_exit_nosys
/* WARNING: Trashing sr2 and sr3 */
@@ -433,7 +497,7 @@ lws_exit:
/* now reset the lowest bit of sp if it was set */
xor %r30,%r1,%r30
#endif
- be,n 0(%sr3, %r31)
+ be,n 0(%sr7, %r31)
@@ -473,7 +537,8 @@ lws_compare_and_swap64:
b,n lws_compare_and_swap
#else
/* If we are not a 64-bit kernel, then we don't
- * implement having 64-bit input registers
+ * have 64-bit input registers, and calling
+ * the 64-bit LWS CAS returns ENOSYS.
*/
b,n lws_exit_nosys
#endif
@@ -488,7 +553,6 @@ lws_compare_and_swap32:
#endif
lws_compare_and_swap:
-#ifdef CONFIG_SMP
/* Load start of lock table */
ldil L%lws_lock_start, %r20
ldo R%lws_lock_start(%r20), %r28
@@ -525,14 +589,15 @@ cas_nocontend:
# endif
/* ENABLE_LWS_DEBUG */
+ rsm PSW_SM_I, %r0 /* Disable interrupts */
+ /* COW breaks can cause contention on UP systems */
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
cas_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
+ ssm PSW_SM_I, %r0
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
-#endif
-/* CONFIG_SMP */
/*
prev = *addr;
@@ -557,30 +622,29 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw 0(%sr3,%r26), %r28
+1: ldw,ma 0(%sr3,%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw %r24, 0(%sr3,%r26)
-#ifdef CONFIG_SMP
+2: stw,ma %r24, 0(%sr3,%r26)
/* Free lock */
- stw %r20, 0(%sr2,%r20)
-# if ENABLE_LWS_DEBUG
+ stw,ma %r20, 0(%sr2,%r20)
+#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
-# endif
#endif
+ /* Enable interrupts */
+ ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
b lws_exit
copy %r0, %r21
3:
- /* Error occured on load or store */
-#ifdef CONFIG_SMP
+ /* Error occurred on load or store */
/* Free lock */
stw %r20, 0(%sr2,%r20)
-# if ENABLE_LWS_DEBUG
+#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
-# endif
#endif
+ ssm PSW_SM_I, %r0
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
nop
@@ -591,10 +655,8 @@ cas_action:
/* Two exception table entries, one for the load,
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
- .section __ex_table,"aw"
- ASM_ULONG_INSN (1b - linux_gateway_page), (3b - linux_gateway_page)
- ASM_ULONG_INSN (2b - linux_gateway_page), (3b - linux_gateway_page)
- .previous
+ ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page)
+ ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
/* Make sure nothing else is placed on this page */
@@ -609,7 +671,7 @@ ENTRY(end_linux_gateway_page)
.section .rodata,"a"
- .align PAGE_SIZE
+ .align 8
/* Light-weight-syscall table */
/* Start of lws table. */
ENTRY(lws_table)
@@ -618,29 +680,31 @@ ENTRY(lws_table)
END(lws_table)
/* End of lws table */
- .align PAGE_SIZE
+ .align 8
ENTRY(sys_call_table)
#include "syscall_table.S"
END(sys_call_table)
#ifdef CONFIG_64BIT
- .align PAGE_SIZE
+ .align 8
ENTRY(sys_call_table64)
#define SYSCALL_TABLE_64BIT
#include "syscall_table.S"
END(sys_call_table64)
#endif
-#ifdef CONFIG_SMP
/*
All light-weight-syscall atomic operations
will use this set of locks
+
+ NOTE: The lws_lock_start symbol must be
+ at least 16-byte aligned for safe use
+ with ldcw.
*/
.section .data
- .align PAGE_SIZE
+ .align L1_CACHE_BYTES
ENTRY(lws_lock_start)
/* lws locks */
- .align 16
.rept 16
/* Keep locks aligned at 16-bytes */
.word 1
@@ -650,8 +714,6 @@ ENTRY(lws_lock_start)
.endr
END(lws_lock_start)
.previous
-#endif
-/* CONFIG_SMP for lws_lock_start */
.end
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 6b5ac38f5a9..84c5d3a58fa 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -34,7 +34,7 @@
/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
* narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific
* implementation is required on wide palinux. Use ENTRY_COMP where
- * the compatability layer has a useful 32-bit implementation.
+ * the compatibility layer has a useful 32-bit implementation.
*/
#define ENTRY_SAME(_name_) .dword sys_##_name_
#define ENTRY_DIFF(_name_) .dword sys32_##_name_
@@ -60,13 +60,13 @@
ENTRY_SAME(fork_wrapper)
ENTRY_SAME(read)
ENTRY_SAME(write)
- ENTRY_SAME(open) /* 5 */
+ ENTRY_COMP(open) /* 5 */
ENTRY_SAME(close)
ENTRY_SAME(waitpid)
ENTRY_SAME(creat)
ENTRY_SAME(link)
ENTRY_SAME(unlink) /* 10 */
- ENTRY_DIFF(execve_wrapper)
+ ENTRY_COMP(execve)
ENTRY_SAME(chdir)
/* See comments in kernel/time.c!!! Maybe we don't need this? */
ENTRY_COMP(time)
@@ -76,7 +76,7 @@
ENTRY_SAME(socket)
/* struct stat is MAYBE identical wide and narrow ?? */
ENTRY_COMP(newstat)
- ENTRY_DIFF(lseek)
+ ENTRY_COMP(lseek)
ENTRY_SAME(getpid) /* 20 */
/* the 'void * data' parameter may need re-packing in wide */
ENTRY_COMP(mount)
@@ -87,7 +87,7 @@
ENTRY_SAME(setuid)
ENTRY_SAME(getuid)
ENTRY_COMP(stime) /* 25 */
- ENTRY_SAME(ptrace)
+ ENTRY_COMP(ptrace)
ENTRY_SAME(alarm)
/* see stat comment */
ENTRY_COMP(newfstat)
@@ -127,10 +127,10 @@
ENTRY_SAME(socketpair)
ENTRY_SAME(setpgid)
ENTRY_SAME(send)
- ENTRY_OURS(newuname)
+ ENTRY_SAME(newuname)
ENTRY_SAME(umask) /* 60 */
ENTRY_SAME(chroot)
- ENTRY_SAME(ustat)
+ ENTRY_COMP(ustat)
ENTRY_SAME(dup2)
ENTRY_SAME(getppid)
ENTRY_SAME(getpgrp) /* 65 */
@@ -149,8 +149,8 @@
ENTRY_COMP(getrlimit)
ENTRY_COMP(getrusage)
/* struct timeval and timezone are maybe?? consistent wide and narrow */
- ENTRY_DIFF(gettimeofday)
- ENTRY_DIFF(settimeofday)
+ ENTRY_COMP(gettimeofday)
+ ENTRY_COMP(settimeofday)
ENTRY_SAME(getgroups) /* 80 */
ENTRY_SAME(setgroups)
/* struct socketaddr... */
@@ -165,8 +165,8 @@
ENTRY_SAME(mmap2)
ENTRY_SAME(mmap) /* 90 */
ENTRY_SAME(munmap)
- ENTRY_SAME(truncate)
- ENTRY_SAME(ftruncate)
+ ENTRY_COMP(truncate)
+ ENTRY_COMP(ftruncate)
ENTRY_SAME(fchmod)
ENTRY_SAME(fchown) /* 95 */
ENTRY_SAME(getpriority)
@@ -198,7 +198,7 @@
ENTRY_SAME(madvise)
ENTRY_SAME(clone_wrapper) /* 120 */
ENTRY_SAME(setdomainname)
- ENTRY_DIFF(sendfile)
+ ENTRY_COMP(sendfile)
/* struct sockaddr... */
ENTRY_SAME(recvfrom)
/* struct timex contains longs */
@@ -234,7 +234,7 @@
ENTRY_SAME(getsid)
ENTRY_SAME(fdatasync)
/* struct __sysctl_args is a mess */
- ENTRY_DIFF(sysctl)
+ ENTRY_COMP(sysctl)
ENTRY_SAME(mlock) /* 150 */
ENTRY_SAME(munlock)
ENTRY_SAME(mlockall)
@@ -247,27 +247,24 @@
ENTRY_SAME(sched_yield)
ENTRY_SAME(sched_get_priority_max)
ENTRY_SAME(sched_get_priority_min) /* 160 */
- /* These 2 would've worked if someone had defined struct timespec
- * carefully, like timeval for example (which is about the same).
- * Unfortunately it contains a long :-( */
- ENTRY_DIFF(sched_rr_get_interval)
+ ENTRY_COMP(sched_rr_get_interval)
ENTRY_COMP(nanosleep)
ENTRY_SAME(mremap)
ENTRY_SAME(setresuid)
ENTRY_SAME(getresuid) /* 165 */
- ENTRY_DIFF(sigaltstack_wrapper)
+ ENTRY_COMP(sigaltstack)
ENTRY_SAME(ni_syscall) /* query_module */
ENTRY_SAME(poll)
/* structs contain pointers and an in_addr... */
- ENTRY_COMP(nfsservctl)
+ ENTRY_SAME(ni_syscall) /* was nfsservctl */
ENTRY_SAME(setresgid) /* 170 */
ENTRY_SAME(getresgid)
ENTRY_SAME(prctl)
/* signals need a careful review */
ENTRY_SAME(rt_sigreturn_wrapper)
- ENTRY_DIFF(rt_sigaction)
- ENTRY_DIFF(rt_sigprocmask) /* 175 */
- ENTRY_DIFF(rt_sigpending)
+ ENTRY_COMP(rt_sigaction)
+ ENTRY_COMP(rt_sigprocmask) /* 175 */
+ ENTRY_COMP(rt_sigpending)
ENTRY_COMP(rt_sigtimedwait)
/* even though the struct siginfo_t is different, it appears like
* all the paths use values which should be same wide and narrow.
@@ -285,9 +282,9 @@
ENTRY_COMP(recvmsg)
ENTRY_SAME(semop) /* 185 */
ENTRY_SAME(semget)
- ENTRY_DIFF(semctl)
- ENTRY_DIFF(msgsnd)
- ENTRY_DIFF(msgrcv)
+ ENTRY_COMP(semctl)
+ ENTRY_COMP(msgsnd)
+ ENTRY_COMP(msgrcv)
ENTRY_SAME(msgget) /* 190 */
ENTRY_SAME(msgctl)
ENTRY_SAME(shmat)
@@ -307,32 +304,32 @@
ENTRY_SAME(gettid)
ENTRY_OURS(readahead)
ENTRY_SAME(tkill)
- ENTRY_SAME(sendfile64)
+ ENTRY_COMP(sendfile64)
ENTRY_COMP(futex) /* 210 */
ENTRY_COMP(sched_setaffinity)
ENTRY_COMP(sched_getaffinity)
ENTRY_SAME(ni_syscall) /* set_thread_area */
ENTRY_SAME(ni_syscall) /* get_thread_area */
- ENTRY_SAME(io_setup) /* 215 */
+ ENTRY_COMP(io_setup) /* 215 */
ENTRY_SAME(io_destroy)
- ENTRY_SAME(io_getevents)
- ENTRY_SAME(io_submit)
+ ENTRY_COMP(io_getevents)
+ ENTRY_COMP(io_submit)
ENTRY_SAME(io_cancel)
ENTRY_SAME(alloc_hugepages) /* 220 */
ENTRY_SAME(free_hugepages)
ENTRY_SAME(exit_group)
- ENTRY_DIFF(lookup_dcookie)
+ ENTRY_COMP(lookup_dcookie)
ENTRY_SAME(epoll_create)
ENTRY_SAME(epoll_ctl) /* 225 */
ENTRY_SAME(epoll_wait)
ENTRY_SAME(remap_file_pages)
ENTRY_SAME(semtimedop)
- ENTRY_SAME(mq_open)
+ ENTRY_COMP(mq_open)
ENTRY_SAME(mq_unlink) /* 230 */
- ENTRY_SAME(mq_timedsend)
- ENTRY_SAME(mq_timedreceive)
- ENTRY_SAME(mq_notify)
- ENTRY_SAME(mq_getsetattr)
+ ENTRY_COMP(mq_timedsend)
+ ENTRY_COMP(mq_timedreceive)
+ ENTRY_COMP(mq_notify)
+ ENTRY_COMP(mq_getsetattr)
ENTRY_COMP(waitid) /* 235 */
ENTRY_OURS(fadvise64_64)
ENTRY_SAME(set_tid_address)
@@ -395,7 +392,7 @@
ENTRY_COMP(vmsplice)
ENTRY_COMP(move_pages) /* 295 */
ENTRY_SAME(getcpu)
- ENTRY_SAME(epoll_pwait)
+ ENTRY_COMP(epoll_pwait)
ENTRY_COMP(statfs64)
ENTRY_COMP(fstatfs64)
ENTRY_COMP(kexec_load) /* 300 */
@@ -403,10 +400,39 @@
ENTRY_COMP(signalfd)
ENTRY_SAME(ni_syscall) /* was timerfd */
ENTRY_SAME(eventfd)
- ENTRY_COMP(fallocate) /* 305 */
+ ENTRY_OURS(fallocate) /* 305 */
ENTRY_SAME(timerfd_create)
ENTRY_COMP(timerfd_settime)
ENTRY_COMP(timerfd_gettime)
+ ENTRY_COMP(signalfd4)
+ ENTRY_SAME(eventfd2) /* 310 */
+ ENTRY_SAME(epoll_create1)
+ ENTRY_SAME(dup3)
+ ENTRY_SAME(pipe2)
+ ENTRY_SAME(inotify_init1)
+ ENTRY_COMP(preadv) /* 315 */
+ ENTRY_COMP(pwritev)
+ ENTRY_COMP(rt_tgsigqueueinfo)
+ ENTRY_SAME(perf_event_open)
+ ENTRY_COMP(recvmmsg)
+ ENTRY_SAME(accept4) /* 320 */
+ ENTRY_SAME(prlimit64)
+ ENTRY_SAME(fanotify_init)
+ ENTRY_DIFF(fanotify_mark)
+ ENTRY_COMP(clock_adjtime)
+ ENTRY_SAME(name_to_handle_at) /* 325 */
+ ENTRY_COMP(open_by_handle_at)
+ ENTRY_SAME(syncfs)
+ ENTRY_SAME(setns)
+ ENTRY_COMP(sendmmsg)
+ ENTRY_COMP(process_vm_readv) /* 330 */
+ ENTRY_COMP(process_vm_writev)
+ ENTRY_SAME(kcmp)
+ ENTRY_SAME(finit_module)
+ ENTRY_SAME(sched_setattr)
+ ENTRY_SAME(sched_getattr) /* 335 */
+ ENTRY_COMP(utimes)
+ ENTRY_SAME(renameat2)
/* Nothing yet */
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 24be86bba94..70e105d6242 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -23,10 +23,13 @@
#include <linux/smp.h>
#include <linux/profile.h>
#include <linux/clocksource.h>
+#include <linux/platform_device.h>
+#include <linux/ftrace.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
+#include <asm/page.h>
#include <asm/param.h>
#include <asm/pdc.h>
#include <asm/led.h>
@@ -52,14 +55,14 @@ static unsigned long clocktick __read_mostly; /* timer cycles per tick */
* held off for an arbitrarily long period of time by interrupts being
* disabled, so we may miss one or more ticks.
*/
-irqreturn_t timer_interrupt(int irq, void *dev_id)
+irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
{
- unsigned long now;
+ unsigned long now, now2;
unsigned long next_tick;
- unsigned long cycles_elapsed, ticks_elapsed;
+ unsigned long cycles_elapsed, ticks_elapsed = 1;
unsigned long cycles_remainder;
unsigned int cpu = smp_processor_id();
- struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu];
+ struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
/* gcc can optimize for "read-only" case with a local clocktick */
unsigned long cpt = clocktick;
@@ -69,44 +72,24 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
/* Initialize next_tick to the expected tick time. */
next_tick = cpuinfo->it_value;
- /* Get current interval timer.
- * CR16 reads as 64 bits in CPU wide mode.
- * CR16 reads as 32 bits in CPU narrow mode.
- */
+ /* Get current cycle counter (Control Register 16). */
now = mfctl(16);
cycles_elapsed = now - next_tick;
- if ((cycles_elapsed >> 5) < cpt) {
+ if ((cycles_elapsed >> 6) < cpt) {
/* use "cheap" math (add/subtract) instead
* of the more expensive div/mul method
*/
cycles_remainder = cycles_elapsed;
- ticks_elapsed = 1;
while (cycles_remainder > cpt) {
cycles_remainder -= cpt;
ticks_elapsed++;
}
} else {
+ /* TODO: Reduce this to one fdiv op */
cycles_remainder = cycles_elapsed % cpt;
- ticks_elapsed = 1 + cycles_elapsed / cpt;
- }
-
- /* Can we differentiate between "early CR16" (aka Scenario 1) and
- * "long delay" (aka Scenario 3)? I don't think so.
- *
- * We expected timer_interrupt to be delivered at least a few hundred
- * cycles after the IT fires. But it's arbitrary how much time passes
- * before we call it "late". I've picked one second.
- */
- if (unlikely(ticks_elapsed > HZ)) {
- /* Scenario 3: very long delay? bad in any case */
- printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
- " cycles %lX rem %lX "
- " next/now %lX/%lX\n",
- cpu,
- cycles_elapsed, cycles_remainder,
- next_tick, now );
+ ticks_elapsed += cycles_elapsed / cpt;
}
/* convert from "division remainder" to "remainder of clock tick" */
@@ -120,18 +103,56 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
cpuinfo->it_value = next_tick;
- /* Skip one clocktick on purpose if we are likely to miss next_tick.
- * We want to avoid the new next_tick being less than CR16.
- * If that happened, itimer wouldn't fire until CR16 wrapped.
- * We'll catch the tick we missed on the tick after that.
+ /* Program the IT when to deliver the next interrupt.
+ * Only bottom 32-bits of next_tick are writable in CR16!
*/
- if (!(cycles_remainder >> 13))
- next_tick += cpt;
-
- /* Program the IT when to deliver the next interrupt. */
- /* Only bottom 32-bits of next_tick are written to cr16. */
mtctl(next_tick, 16);
+ /* Skip one clocktick on purpose if we missed next_tick.
+ * The new CR16 must be "later" than current CR16 otherwise
+ * itimer would not fire until CR16 wrapped - e.g 4 seconds
+ * later on a 1Ghz processor. We'll account for the missed
+ * tick on the next timer interrupt.
+ *
+ * "next_tick - now" will always give the difference regardless
+ * if one or the other wrapped. If "now" is "bigger" we'll end up
+ * with a very large unsigned number.
+ */
+ now2 = mfctl(16);
+ if (next_tick - now2 > cpt)
+ mtctl(next_tick+cpt, 16);
+
+#if 1
+/*
+ * GGG: DEBUG code for how many cycles programming CR16 used.
+ */
+ if (unlikely(now2 - now > 0x3000)) /* 12K cycles */
+ printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!"
+ " cyc %lX rem %lX "
+ " next/now %lX/%lX\n",
+ cpu, now2 - now, cycles_elapsed, cycles_remainder,
+ next_tick, now );
+#endif
+
+ /* Can we differentiate between "early CR16" (aka Scenario 1) and
+ * "long delay" (aka Scenario 3)? I don't think so.
+ *
+ * Timer_interrupt will be delivered at least a few hundred cycles
+ * after the IT fires. But it's arbitrary how much time passes
+ * before we call it "late". I've picked one second.
+ *
+ * It's important NO printk's are between reading CR16 and
+ * setting up the next value. May introduce huge variance.
+ */
+ if (unlikely(ticks_elapsed > HZ)) {
+ /* Scenario 3: very long delay? bad in any case */
+ printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
+ " cycles %lX rem %lX "
+ " next/now %lX/%lX\n",
+ cpu,
+ cycles_elapsed, cycles_remainder,
+ next_tick, now );
+ }
/* Done mucking with unreliable delivery of interrupts.
* Go do system house keeping.
@@ -142,11 +163,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
update_process_times(user_mode(get_irq_regs()));
}
- if (cpu == 0) {
- write_seqlock(&xtime_lock);
- do_timer(ticks_elapsed);
- write_sequnlock(&xtime_lock);
- }
+ if (cpu == 0)
+ xtime_update(ticks_elapsed);
return IRQ_HANDLED;
}
@@ -171,7 +189,7 @@ EXPORT_SYMBOL(profile_pc);
/* clock source code */
-static cycle_t read_cr16(void)
+static cycle_t read_cr16(struct clocksource *cs)
{
return get_cycles();
}
@@ -181,8 +199,6 @@ static struct clocksource clocksource_cr16 = {
.rating = 300,
.read = read_cr16,
.mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
- .mult = 0, /* to be set */
- .shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -212,12 +228,39 @@ void __init start_cpu_itimer(void)
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
- cpu_data[cpu].it_value = next_tick;
+ per_cpu(cpu_data, cpu).it_value = next_tick;
}
-void __init time_init(void)
+static struct platform_device rtc_generic_dev = {
+ .name = "rtc-generic",
+ .id = -1,
+};
+
+static int __init rtc_init(void)
+{
+ if (platform_device_register(&rtc_generic_dev) < 0)
+ printk(KERN_ERR "unable to register rtc device...\n");
+
+ /* not necessarily an error */
+ return 0;
+}
+module_init(rtc_init);
+
+void read_persistent_clock(struct timespec *ts)
{
static struct pdc_tod tod_data;
+ if (pdc_tod_read(&tod_data) == 0) {
+ ts->tv_sec = tod_data.tod_sec;
+ ts->tv_nsec = tod_data.tod_usec * 1000;
+ } else {
+ printk(KERN_ERR "Error reading tod clock\n");
+ ts->tv_sec = 0;
+ ts->tv_nsec = 0;
+ }
+}
+
+void __init time_init(void)
+{
unsigned long current_cr16_khz;
clocktick = (100 * PAGE0->mem_10msec) / HZ;
@@ -226,23 +269,5 @@ void __init time_init(void)
/* register at clocksource framework */
current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
- clocksource_cr16.mult = clocksource_khz2mult(current_cr16_khz,
- clocksource_cr16.shift);
- clocksource_register(&clocksource_cr16);
-
- if (pdc_tod_read(&tod_data) == 0) {
- unsigned long flags;
-
- write_seqlock_irqsave(&xtime_lock, flags);
- xtime.tv_sec = tod_data.tod_sec;
- xtime.tv_nsec = tod_data.tod_usec * 1000;
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
- write_sequnlock_irqrestore(&xtime_lock, flags);
- } else {
- printk(KERN_ERR "Error reading tod clock\n");
- xtime.tv_sec = 0;
- xtime.tv_nsec = 0;
- }
+ clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
}
-
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index d71cb018a21..f5159381fdd 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -22,14 +22,14 @@
#include <linux/cpu.h>
#include <linux/cache.h>
-static struct cpu cpu_devices[NR_CPUS] __read_mostly;
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int num;
for_each_present_cpu(num) {
- register_cpu(&cpu_devices[num], num);
+ register_cpu(&per_cpu(cpu_devices, num), num);
}
return 0;
}
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 9dc6dc42f9c..47ee620d15d 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -24,17 +24,16 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/console.h>
-#include <linux/kallsyms.h>
#include <linux/bug.h>
+#include <linux/ratelimit.h>
#include <asm/assembly.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/unaligned.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
@@ -44,14 +43,11 @@
#include "../math-emu/math-emu.h" /* for handle_fpe() */
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
- /* dumped to the console via printk) */
-
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
DEFINE_SPINLOCK(pa_dbit_lock);
#endif
-void parisc_show_stack(struct task_struct *t, unsigned long *sp,
+static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
struct pt_regs *regs);
static int printbinary(char *buf, unsigned long x, int nbits)
@@ -121,18 +117,21 @@ static void print_fr(char *level, struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
- int i;
+ int i, user;
char *level;
unsigned long cr30, cr31;
- level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
+ user = user_mode(regs);
+ level = user ? KERN_DEBUG : KERN_CRIT;
+
+ show_regs_print_info(level);
print_gr(level, regs);
for (i = 0; i < 8; i += 4)
PRINTREGS(level, regs->sr, "sr", RFMT, i);
- if (user_mode(regs))
+ if (user)
print_fr(level, regs);
cr30 = mfctl(30);
@@ -145,23 +144,30 @@ void show_regs(struct pt_regs *regs)
printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
level, current_thread_info()->cpu, cr30, cr31);
printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
- printk(level);
- print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
- printk(level);
- print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
- printk(level);
- print_symbol(" RP(r2): %s\n", regs->gr[2]);
-
- parisc_show_stack(current, NULL, regs);
+
+ if (user) {
+ printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
+ printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
+ printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
+ } else {
+ printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
+ printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
+ printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
+
+ parisc_show_stack(current, NULL, regs);
+ }
}
+static DEFINE_RATELIMIT_STATE(_hppa_rs,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
-void dump_stack(void)
-{
- show_stack(NULL, NULL);
+#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
+ if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
+ printk(fmt, ##__VA_ARGS__); \
+ show_regs(regs); \
+ } \
}
-EXPORT_SYMBOL(dump_stack);
static void do_show_stack(struct unwind_frame_info *info)
{
@@ -173,20 +179,15 @@ static void do_show_stack(struct unwind_frame_info *info)
break;
if (__kernel_text_address(info->ip)) {
- printk("%s [<" RFMT ">] ", (i&0x3)==1 ? KERN_CRIT : "", info->ip);
-#ifdef CONFIG_KALLSYMS
- print_symbol("%s\n", info->ip);
-#else
- if ((i & 0x03) == 0)
- printk("\n");
-#endif
+ printk(KERN_CRIT " [<" RFMT ">] %pS\n",
+ info->ip, (void *) info->ip);
i++;
}
}
- printk("\n");
+ printk(KERN_CRIT "\n");
}
-void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
struct pt_regs *regs)
{
struct unwind_frame_info info;
@@ -237,27 +238,26 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
if (err == 0)
return; /* STFU */
- printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
+ parisc_printk_ratelimited(1, regs,
+ KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
-#ifdef PRINT_USER_FAULTS
- /* XXX for debugging only */
- show_regs(regs);
-#endif
+
return;
}
oops_in_progress = 1;
+ oops_enter();
+
/* Amuse the user in a SPARC fashion */
- if (err) printk(
-KERN_CRIT " _______________________________ \n"
-KERN_CRIT " < Your System ate a SPARC! Gah! >\n"
-KERN_CRIT " ------------------------------- \n"
-KERN_CRIT " \\ ^__^\n"
-KERN_CRIT " \\ (xx)\\_______\n"
-KERN_CRIT " (__)\\ )\\/\\\n"
-KERN_CRIT " U ||----w |\n"
-KERN_CRIT " || ||\n");
+ if (err) printk(KERN_CRIT
+ " _______________________________ \n"
+ " < Your System ate a SPARC! Gah! >\n"
+ " ------------------------------- \n"
+ " \\ ^__^\n"
+ " (__)\\ )\\/\\\n"
+ " U ||----w |\n"
+ " || ||\n");
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
@@ -275,7 +275,7 @@ KERN_CRIT " || ||\n");
/* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) {
- printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
+ printk(KERN_CRIT "%s() recursion detected.\n", __func__);
local_irq_enable();
while (1);
}
@@ -283,7 +283,7 @@ KERN_CRIT " || ||\n");
show_regs(regs);
dump_stack();
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
if (in_interrupt())
panic("Fatal exception in interrupt");
@@ -294,14 +294,10 @@ KERN_CRIT " || ||\n");
panic("Fatal exception");
}
+ oops_exit();
do_exit(SIGSEGV);
}
-int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
-{
- return syscall(regs);
-}
-
/* gdb uses break 4,8 */
#define GDB_BREAK_INSN 0x10004
static void handle_gdb_break(struct pt_regs *regs, int wot)
@@ -332,14 +328,11 @@ static void handle_break(struct pt_regs *regs)
(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
}
-#ifdef PRINT_USER_FAULTS
- if (unlikely(iir != GDB_BREAK_INSN)) {
- printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
+ if (unlikely(iir != GDB_BREAK_INSN))
+ parisc_printk_ratelimited(0, regs,
+ KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
iir & 31, (iir>>13) & ((1<<13)-1),
task_pid_nr(current), current->comm);
- show_regs(regs);
- }
-#endif
/* send standard GDB signal */
handle_gdb_break(regs, TRAP_BRKPT);
@@ -495,7 +488,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
panic(msg);
}
-void handle_interruption(int code, struct pt_regs *regs)
+void notrace handle_interruption(int code, struct pt_regs *regs)
{
unsigned long fault_address = 0;
unsigned long fault_space = 0;
@@ -528,10 +521,10 @@ void handle_interruption(int code, struct pt_regs *regs)
*/
if (((unsigned long)regs->iaoq[0] & 3) &&
((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
- /* Kill the user process later */
- regs->iaoq[0] = 0 | 3;
+ /* Kill the user process later */
+ regs->iaoq[0] = 0 | 3;
regs->iaoq[1] = regs->iaoq[0] + 4;
- regs->iasq[0] = regs->iasq[0] = regs->sr[7];
+ regs->iasq[0] = regs->iasq[1] = regs->sr[7];
regs->gr[0] &= ~PSW_B;
return;
}
@@ -547,8 +540,8 @@ void handle_interruption(int code, struct pt_regs *regs)
/* set up a new led state on systems shipped with a LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
-
- parisc_terminate("High Priority Machine Check (HPMC)",
+
+ parisc_terminate("High Priority Machine Check (HPMC)",
regs, code, 0);
/* NOT REACHED */
@@ -590,13 +583,13 @@ void handle_interruption(int code, struct pt_regs *regs)
/* Break instruction trap */
handle_break(regs);
return;
-
+
case 10:
/* Privileged operation trap */
die_if_kernel("Privileged operation", regs, code);
si.si_code = ILL_PRVOPC;
goto give_sigill;
-
+
case 11:
/* Privileged register trap */
if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
@@ -640,7 +633,7 @@ void handle_interruption(int code, struct pt_regs *regs)
if(user_mode(regs)){
si.si_signo = SIGFPE;
/* Set to zero, and let the userspace app figure it out from
- the insn pointed to by si_addr */
+ the insn pointed to by si_addr */
si.si_code = 0;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGFPE, &si, current);
@@ -652,9 +645,10 @@ void handle_interruption(int code, struct pt_regs *regs)
case 14:
/* Assist Exception Trap, i.e. floating point exception. */
die_if_kernel("Floating point exception", regs, 0); /* quiet */
+ __inc_irq_stat(irq_fpassist_count);
handle_fpe(regs);
return;
-
+
case 15:
/* Data TLB miss fault/Data page fault */
/* Fall through */
@@ -666,15 +660,15 @@ void handle_interruption(int code, struct pt_regs *regs)
case 17:
/* Non-access data TLB miss fault/Non-access data page fault */
/* FIXME:
- Still need to add slow path emulation code here!
- If the insn used a non-shadow register, then the tlb
+ Still need to add slow path emulation code here!
+ If the insn used a non-shadow register, then the tlb
handlers could not have their side-effect (e.g. probe
writing to a target register) emulated since rfir would
erase the changes to said register. Instead we have to
setup everything, call this function we are in, and emulate
by hand. Technically we need to emulate:
fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
- */
+ */
fault_address = regs->ior;
fault_space = regs->isr;
break;
@@ -746,6 +740,10 @@ void handle_interruption(int code, struct pt_regs *regs)
/* Fall Through */
case 27:
/* Data memory protection ID trap */
+ if (code == 27 && !user_mode(regs) &&
+ fixup_exception(regs))
+ return;
+
die_if_kernel("Protection id trap", regs, code);
si.si_code = SEGV_MAPERR;
si.si_signo = SIGSEGV;
@@ -764,11 +762,9 @@ void handle_interruption(int code, struct pt_regs *regs)
default:
if (user_mode(regs)) {
-#ifdef PRINT_USER_FAULTS
- printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
- task_pid_nr(current), current->comm);
- show_regs(regs);
-#endif
+ parisc_printk_ratelimited(0, regs, KERN_DEBUG
+ "handle_interruption() pid=%d command='%s'\n",
+ task_pid_nr(current), current->comm);
/* SIGBUS, for lack of a better one. */
si.si_signo = SIGBUS;
si.si_code = BUS_OBJERR;
@@ -785,15 +781,10 @@ void handle_interruption(int code, struct pt_regs *regs)
if (user_mode(regs)) {
if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
-#ifdef PRINT_USER_FAULTS
- if (fault_space == 0)
- printk(KERN_DEBUG "User Fault on Kernel Space ");
- else
- printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
- code);
- printk("pid=%d command='%s'\n", task_pid_nr(current), current->comm);
- show_regs(regs);
-#endif
+ parisc_printk_ratelimited(0, regs, KERN_DEBUG
+ "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
+ code, fault_space,
+ task_pid_nr(current), current->comm);
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SEGV_MAPERR;
@@ -805,14 +796,14 @@ void handle_interruption(int code, struct pt_regs *regs)
else {
/*
- * The kernel should never fault on its own address space.
+ * The kernel should never fault on its own address space,
+ * unless pagefault_disable() was called before.
*/
- if (fault_space == 0)
+ if (fault_space == 0 && !in_atomic())
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
-
}
}
@@ -822,8 +813,8 @@ void handle_interruption(int code, struct pt_regs *regs)
int __init check_ivt(void *iva)
{
+ extern u32 os_hpmc_size;
extern const u32 os_hpmc[];
- extern const u32 os_hpmc_end[];
int i;
u32 check = 0;
@@ -840,8 +831,7 @@ int __init check_ivt(void *iva)
*ivap++ = 0;
/* Compute Checksum for HPMC handler */
-
- length = os_hpmc_end - os_hpmc;
+ length = os_hpmc_size;
ivap[7] = length;
hpmcp = (u32 *)os_hpmc;
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index aebf3c16887..d7c0acb35ec 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -25,12 +25,14 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/ratelimit.h>
#include <asm/uaccess.h>
+#include <asm/hardirq.h>
/* #define DEBUG_UNALIGNED 1 */
#ifdef DEBUG_UNALIGNED
-#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __FUNCTION__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
+#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#else
#define DPRINTF(fmt, args...)
#endif
@@ -446,27 +448,23 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
void handle_unaligned(struct pt_regs *regs)
{
- static unsigned long unaligned_count = 0;
- static unsigned long last_time = 0;
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0;
int modify = 0;
int ret = ERR_NOTHANDLED;
struct siginfo si;
register int flop=0; /* true if this is a flop */
+ __inc_irq_stat(irq_unaligned_count);
+
/* log a message with pacing */
if (user_mode(regs)) {
if (current->thread.flags & PARISC_UAC_SIGBUS) {
goto force_sigbus;
}
- if (unaligned_count > 5 && jiffies - last_time > 5*HZ) {
- unaligned_count = 0;
- last_time = jiffies;
- }
-
- if (!(current->thread.flags & PARISC_UAC_NOPRINT)
- && ++unaligned_count < 5) {
+ if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
+ __ratelimit(&ratelimit)) {
char buf[256];
sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n",
current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]);
@@ -624,15 +622,12 @@ void handle_unaligned(struct pt_regs *regs)
flop=1;
ret = emulate_std(regs, R2(regs->iir),1);
break;
-
-#ifdef CONFIG_PA20
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
-#endif
}
#endif
switch (regs->iir & OPCODE3_MASK)
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 701b2d2d888..ddd988b267a 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
+#include <linux/sort.h>
#include <asm/uaccess.h>
#include <asm/assembly.h>
@@ -28,7 +29,7 @@
#define dbg(x...)
#endif
-#define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000)
+#define KERNEL_START (KERNEL_BINARY_TEXT_START)
extern struct unwind_table_entry __start___unwind[];
extern struct unwind_table_entry __stop___unwind[];
@@ -79,8 +80,11 @@ find_unwind_entry(unsigned long addr)
if (addr >= table->start &&
addr <= table->end)
e = find_unwind_entry_in_table(table, addr);
- if (e)
+ if (e) {
+ /* Move-to-front to exploit common traces */
+ list_move(&table->list, &unwind_tables);
break;
+ }
}
return e;
@@ -115,24 +119,18 @@ unwind_table_init(struct unwind_table *table, const char *name,
}
}
+static int cmp_unwind_table_entry(const void *a, const void *b)
+{
+ return ((const struct unwind_table_entry *)a)->region_start
+ - ((const struct unwind_table_entry *)b)->region_start;
+}
+
static void
unwind_table_sort(struct unwind_table_entry *start,
struct unwind_table_entry *finish)
{
- struct unwind_table_entry el, *p, *q;
-
- for (p = start + 1; p < finish; ++p) {
- if (p[0].region_start < p[-1].region_start) {
- el = *p;
- q = p;
- do {
- q[0] = q[-1];
- --q;
- } while (q > start &&
- el.region_start < q[-1].region_start);
- *q = el;
- }
- }
+ sort(start, finish - start, sizeof(struct unwind_table_entry),
+ cmp_unwind_table_entry, NULL);
}
struct unwind_table *
@@ -170,7 +168,7 @@ void unwind_table_remove(struct unwind_table *table)
}
/* Called from setup_arch to import the kernel unwind info */
-static int unwind_init(void)
+int __init unwind_init(void)
{
long start, stop;
register unsigned long gp __asm__ ("r27");
@@ -235,7 +233,6 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
e = find_unwind_entry(info->ip);
if (e == NULL) {
unsigned long sp;
- extern char _stext[], _etext[];
dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
@@ -283,8 +280,7 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
break;
info->prev_ip = tmp;
sp = info->prev_sp;
- } while (info->prev_ip < (unsigned long)_stext ||
- info->prev_ip > (unsigned long)_etext);
+ } while (!kernel_text_address(info->prev_ip));
info->rp = 0;
@@ -372,7 +368,7 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct
struct pt_regs *r = &t->thread.regs;
struct pt_regs *r2;
- r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
+ r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
if (!r2)
return;
*r2 = *r;
@@ -418,4 +414,28 @@ int unwind_to_user(struct unwind_frame_info *info)
return ret;
}
-module_init(unwind_init);
+unsigned long return_address(unsigned int level)
+{
+ struct unwind_frame_info info;
+ struct pt_regs r;
+ unsigned long sp;
+
+ /* initialize unwind info */
+ asm volatile ("copy %%r30, %0" : "=r"(sp));
+ memset(&r, 0, sizeof(struct pt_regs));
+ r.iaoq[0] = (unsigned long) current_text_addr();
+ r.gr[2] = (unsigned long) __builtin_return_address(0);
+ r.gr[30] = sp;
+ unwind_frame_init(&info, current, &r);
+
+ /* unwind stack */
+ ++level;
+ do {
+ if (unwind_once(&info) < 0 || info.ip == 0)
+ return 0;
+ if (!kernel_text_address(info.ip))
+ return 0;
+ } while (info.ip && level--);
+
+ return info.ip;
+}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 50b4a3a25d0..0dacc5ca555 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -6,28 +6,24 @@
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
- * Copyright (C) 2006 Helge Deller <deller@gmx.de>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Copyright (C) 2006-2013 Helge Deller <deller@gmx.de>
*/
+
+/*
+ * Put page table entries (swapper_pg_dir) as the first thing in .bss. This
+ * will ensure that it has .bss alignment (PAGE_SIZE).
+ */
+#define BSS_FIRST_SECTIONS *(.data..vm0.pmd) \
+ *(.data..vm0.pgd) \
+ *(.data..vm0.pte)
+
#include <asm-generic/vmlinux.lds.h>
+
/* needed for the processor specific cache alignment size */
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
/* ld script to make hppa Linux kernel */
#ifndef CONFIG_64BIT
@@ -38,7 +34,7 @@ OUTPUT_FORMAT("elf64-hppa-linux")
OUTPUT_ARCH(hppa:hppa2.0w)
#endif
-ENTRY(_stext)
+ENTRY(parisc_kernel_start)
#ifndef CONFIG_64BIT
jiffies = jiffies_64 + 4;
#else
@@ -48,11 +44,34 @@ SECTIONS
{
. = KERNEL_BINARY_TEXT_START;
+ __init_begin = .;
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(8)
+
+ . = ALIGN(PAGE_SIZE);
+ INIT_DATA_SECTION(PAGE_SIZE)
+ /* we have to discard exit text and such at runtime, not link time */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
+ .exit.data :
+ {
+ EXIT_DATA
+ }
+ PERCPU_SECTION(8)
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+ /* freed after init ends here */
+
_text = .; /* Text and read-only data */
- .text ALIGN(16) : {
+ _stext = .;
+ .text ALIGN(PAGE_SIZE) : {
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
*(.text.do_softirq)
*(.text.sys_exit)
*(.text.do_sigaltstack)
@@ -61,29 +80,29 @@ SECTIONS
*(.fixup)
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
- } = 0
- /* End of text section */
+ }
+ . = ALIGN(PAGE_SIZE);
_etext = .;
+ /* End of text section */
- RODATA
- BUG_TABLE
+ /* Start of data section */
+ _sdata = .;
- /* writeable */
- /* Make sure this is page aligned so
- * that we can properly leave these
- * as writable
- */
- . = ALIGN(PAGE_SIZE);
- data_start = .;
+ RO_DATA_SECTION(8)
+
+#ifdef CONFIG_64BIT
. = ALIGN(16);
- /* Exception table */
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
+ /* Linkage tables */
+ .opd : {
+ *(.opd)
+ } PROVIDE (__gp = .);
+ .plt : {
+ *(.plt)
}
-
- NOTES
+ .dlt : {
+ *(.dlt)
+ }
+#endif
/* unwind info */
.PARISC.unwind : {
@@ -92,152 +111,40 @@ SECTIONS
__stop___unwind = .;
}
- /* rarely changed data like cpu maps */
- . = ALIGN(16);
- .data.read_mostly : {
- *(.data.read_mostly)
- }
+ /* writeable */
+ /* Make sure this is page aligned so
+ * that we can properly leave these
+ * as writable
+ */
+ . = ALIGN(PAGE_SIZE);
+ data_start = .;
- . = ALIGN(L1_CACHE_BYTES);
- /* Data */
- .data : {
- DATA_DATA
- CONSTRUCTORS
- }
+ EXCEPTION_TABLE(8)
+ NOTES
- . = ALIGN(L1_CACHE_BYTES);
- .data.cacheline_aligned : {
- *(.data.cacheline_aligned)
- }
+ /* Data */
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
- .data.lock_aligned : {
- *(.data.lock_aligned)
- }
-
- /* nosave data is really only used for software suspend...it's here
- * just in case we ever implement it
- */
- . = ALIGN(PAGE_SIZE);
- __nosave_begin = .;
- .data_nosave : {
- *(.data.nosave)
+ .data..lock_aligned : {
+ *(.data..lock_aligned)
}
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
/* End of data section */
_edata = .;
/* BSS */
- __bss_start = .;
- /* page table entries need to be PAGE_SIZE aligned */
- . = ALIGN(PAGE_SIZE);
- .data.vmpages : {
- *(.data.vm0.pmd)
- *(.data.vm0.pgd)
- *(.data.vm0.pte)
- }
- .bss : {
- *(.bss)
- *(COMMON)
- }
- __bss_stop = .;
-
-
- /* assembler code expects init_task to be 16k aligned */
- . = ALIGN(16384);
- /* init_task */
- .data.init_task : {
- *(.data.init_task)
- }
-
-#ifdef CONFIG_64BIT
- . = ALIGN(16);
- /* Linkage tables */
- .opd : {
- *(.opd)
- } PROVIDE (__gp = .);
- .plt : {
- *(.plt)
- }
- .dlt : {
- *(.dlt)
- }
-#endif
+ BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
- /* reserve space for interrupt stack by aligning __init* to 16k */
- . = ALIGN(16384);
- __init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
- .init.data : {
- INIT_DATA
- }
- . = ALIGN(16);
- .init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
- .initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
- .con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
- SECURITY_INIT
-
- /* alternate instruction replacement. This is a mechanism x86 uses
- * to detect the CPU type and replace generic instruction sequences
- * with CPU specific ones. We don't currently do this in PA, but
- * it seems like a good idea...
- */
- . = ALIGN(4);
- .altinstructions : {
- __alt_instructions = .;
- *(.altinstructions)
- __alt_instructions_end = .;
- }
- .altinstr_replacement : {
- *(.altinstr_replacement)
- }
-
- /* .exit.text is discard at runtime, not link time, to deal with references
- * from .altinstructions and .eh_frame
- */
- .exit.text : {
- EXIT_TEXT
- }
- .exit.data : {
- EXIT_DATA
- }
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- .init.ramfs : {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- }
-#endif
-
- PERCPU(PAGE_SIZE)
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
- /* freed after init ends here */
_end = . ;
+ STABS_DEBUG
+ .note 0 : { *(.note) }
+
/* Sections to be discarded */
+ DISCARDS
/DISCARD/ : {
- *(.exitcall.exit)
#ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these
* for static binaries
@@ -250,7 +157,4 @@ SECTIONS
*(.gnu.hash)
#endif
}
-
- STABS_DEBUG
- .note 0 : { *(.note) }
}