aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/.gitignore1
-rw-r--r--arch/mips/kernel/8250-platform.c1
-rw-r--r--arch/mips/kernel/Makefile108
-rw-r--r--arch/mips/kernel/asm-offsets.c685
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c26
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c64
-rw-r--r--arch/mips/kernel/bmips_vec.S285
-rw-r--r--arch/mips/kernel/branch.c537
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c15
-rw-r--r--arch/mips/kernel/cevt-ds1287.c130
-rw-r--r--arch/mips/kernel/cevt-gic.c105
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c21
-rw-r--r--arch/mips/kernel/cevt-r4k.c202
-rw-r--r--arch/mips/kernel/cevt-sb1250.c15
-rw-r--r--arch/mips/kernel/cevt-txx9.c80
-rw-r--r--arch/mips/kernel/cps-vec.S487
-rw-r--r--arch/mips/kernel/cpu-bugs64.c23
-rw-r--r--arch/mips/kernel/cpu-probe.c1330
-rw-r--r--arch/mips/kernel/crash.c70
-rw-r--r--arch/mips/kernel/crash_dump.c66
-rw-r--r--arch/mips/kernel/csrc-bcm1480.c7
-rw-r--r--arch/mips/kernel/csrc-gic.c40
-rw-r--r--arch/mips/kernel/csrc-ioasic.c69
-rw-r--r--arch/mips/kernel/csrc-r4k.c10
-rw-r--r--arch/mips/kernel/csrc-sb1250.c7
-rw-r--r--arch/mips/kernel/early_printk.c17
-rw-r--r--arch/mips/kernel/early_printk_8250.c66
-rw-r--r--arch/mips/kernel/entry.S73
-rw-r--r--arch/mips/kernel/ftrace.c399
-rw-r--r--arch/mips/kernel/gdb-low.S394
-rw-r--r--arch/mips/kernel/gdb-stub.c1155
-rw-r--r--arch/mips/kernel/genex.S200
-rw-r--r--arch/mips/kernel/gpio_txx9.c89
-rw-r--r--arch/mips/kernel/head.S111
-rw-r--r--arch/mips/kernel/i8253.c188
-rw-r--r--arch/mips/kernel/i8259.c96
-rw-r--r--arch/mips/kernel/idle.c249
-rw-r--r--arch/mips/kernel/init_task.c42
-rw-r--r--arch/mips/kernel/irix5sys.S1041
-rw-r--r--arch/mips/kernel/irixelf.c1358
-rw-r--r--arch/mips/kernel/irixinv.c78
-rw-r--r--arch/mips/kernel/irixioctl.c250
-rw-r--r--arch/mips/kernel/irixsig.c888
-rw-r--r--arch/mips/kernel/irq-gic.c380
-rw-r--r--arch/mips/kernel/irq-gt641xx.c52
-rw-r--r--arch/mips/kernel/irq-msc01.c81
-rw-r--r--arch/mips/kernel/irq-rm7000.c26
-rw-r--r--arch/mips/kernel/irq-rm9000.c109
-rw-r--r--arch/mips/kernel/irq.c127
-rw-r--r--arch/mips/kernel/irq_cpu.c113
-rw-r--r--arch/mips/kernel/irq_txx9.c45
-rw-r--r--arch/mips/kernel/jump_label.c54
-rw-r--r--arch/mips/kernel/kgdb.c409
-rw-r--r--arch/mips/kernel/kprobes.c679
-rw-r--r--arch/mips/kernel/kspd.c405
-rw-r--r--arch/mips/kernel/linux32.c464
-rw-r--r--arch/mips/kernel/machine_kexec.c33
-rw-r--r--arch/mips/kernel/mcount.S202
-rw-r--r--arch/mips/kernel/mips-cm.c121
-rw-r--r--arch/mips/kernel/mips-cpc.c80
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c99
-rw-r--r--arch/mips/kernel/mips-mt.c46
-rw-r--r--arch/mips/kernel/mips_ksyms.c45
-rw-r--r--arch/mips/kernel/mips_machine.c66
-rw-r--r--arch/mips/kernel/module-rela.c145
-rw-r--r--arch/mips/kernel/module.c208
-rw-r--r--arch/mips/kernel/octeon_switch.S519
-rw-r--r--arch/mips/kernel/perf_event.c69
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c1710
-rw-r--r--arch/mips/kernel/pm-cps.c716
-rw-r--r--arch/mips/kernel/pm.c99
-rw-r--r--arch/mips/kernel/proc.c120
-rw-r--r--arch/mips/kernel/process.c324
-rw-r--r--arch/mips/kernel/prom.c57
-rw-r--r--arch/mips/kernel/ptrace.c520
-rw-r--r--arch/mips/kernel/ptrace32.c211
-rw-r--r--arch/mips/kernel/r2300_fpu.S130
-rw-r--r--arch/mips/kernel/r2300_switch.S28
-rw-r--r--arch/mips/kernel/r4k_fpu.S86
-rw-r--r--arch/mips/kernel/r4k_switch.S159
-rw-r--r--arch/mips/kernel/r6000_fpu.S2
-rw-r--r--arch/mips/kernel/relocate_kernel.S118
-rw-r--r--arch/mips/kernel/reset.c2
-rw-r--r--arch/mips/kernel/rtlx-cmp.c119
-rw-r--r--arch/mips/kernel/rtlx-mt.c150
-rw-r--r--arch/mips/kernel/rtlx.c306
-rw-r--r--arch/mips/kernel/scall32-o32.S967
-rw-r--r--arch/mips/kernel/scall64-64.S154
-rw-r--r--arch/mips/kernel/scall64-n32.S139
-rw-r--r--arch/mips/kernel/scall64-o32.S186
-rw-r--r--arch/mips/kernel/segment.c110
-rw-r--r--arch/mips/kernel/semaphore.c168
-rw-r--r--arch/mips/kernel/setup.c358
-rw-r--r--arch/mips/kernel/signal-common.h9
-rw-r--r--arch/mips/kernel/signal.c419
-rw-r--r--arch/mips/kernel/signal32.c415
-rw-r--r--arch/mips/kernel/signal_n32.c108
-rw-r--r--arch/mips/kernel/smp-bmips.c535
-rw-r--r--arch/mips/kernel/smp-cmp.c159
-rw-r--r--arch/mips/kernel/smp-cps.c466
-rw-r--r--arch/mips/kernel/smp-gic.c64
-rw-r--r--arch/mips/kernel/smp-mt.c203
-rw-r--r--arch/mips/kernel/smp-up.c37
-rw-r--r--arch/mips/kernel/smp.c329
-rw-r--r--arch/mips/kernel/smtc-asm.S130
-rw-r--r--arch/mips/kernel/smtc-proc.c92
-rw-r--r--arch/mips/kernel/smtc.c1424
-rw-r--r--arch/mips/kernel/spinlock_test.c141
-rw-r--r--arch/mips/kernel/spram.c221
-rw-r--r--arch/mips/kernel/stacktrace.c26
-rw-r--r--arch/mips/kernel/sync-r4k.c132
-rw-r--r--arch/mips/kernel/syscall.c459
-rw-r--r--arch/mips/kernel/sysirix.c2140
-rw-r--r--arch/mips/kernel/time.c66
-rw-r--r--arch/mips/kernel/topology.c5
-rw-r--r--arch/mips/kernel/traps.c1432
-rw-r--r--arch/mips/kernel/unaligned.c1669
-rw-r--r--arch/mips/kernel/vdso.c109
-rw-r--r--arch/mips/kernel/vmlinux.lds.S154
-rw-r--r--arch/mips/kernel/vpe-cmp.c180
-rw-r--r--arch/mips/kernel/vpe-mt.c521
-rw-r--r--arch/mips/kernel/vpe.c1101
-rw-r--r--arch/mips/kernel/watch.c196
123 files changed, 18627 insertions, 16909 deletions
diff --git a/arch/mips/kernel/.gitignore b/arch/mips/kernel/.gitignore
new file mode 100644
index 00000000000..c5f676c3c22
--- /dev/null
+++ b/arch/mips/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/mips/kernel/8250-platform.c b/arch/mips/kernel/8250-platform.c
index cbf3fe20ad1..5c6b2ab1f56 100644
--- a/arch/mips/kernel/8250-platform.c
+++ b/arch/mips/kernel/8250-platform.c
@@ -5,7 +5,6 @@
*
* Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/serial_8250.h>
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 9e78e1a4ca1..008a2fed058 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -2,86 +2,128 @@
# Makefile for the Linux/MIPS kernel.
#
-extra-y := head.o init_task.o vmlinux.lds
+extra-y := head.o vmlinux.lds
-obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
- ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \
- time.o topology.o traps.o unaligned.o
+obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
+ prom.o ptrace.o reset.o setup.o signal.o syscall.o \
+ time.o topology.o traps.o unaligned.o watch.o vdso.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_early_printk.o = -pg
+CFLAGS_REMOVE_perf_event.o = -pg
+CFLAGS_REMOVE_perf_event_mipsxx.o = -pg
+endif
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
+obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
+obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
+obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
+obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
-binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
- irix5sys.o sysirix.o
-
+obj-$(CONFIG_DEBUG_FS) += segment.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
+obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o
+
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
-obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R5432) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R8000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_RM7000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_RM9000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_NEVADA) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_R10000) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
-obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o
+obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
-obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
+obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
+obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
+obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o
+obj-$(CONFIG_CPU_MIPSR2) += spram.o
-obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
-obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
+obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
+obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o
+obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
+obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
+obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
-obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
-obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
+obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
+obj-$(CONFIG_IRQ_GIC) += irq-gic.o
+obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
-obj-$(CONFIG_BINFMT_IRIX) += binfmt_irix.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o
-obj-$(CONFIG_KGDB) += gdb-low.o gdb-stub.o
+obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
obj-$(CONFIG_I8253) += i8253.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
+
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
+obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
+obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
-CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
-EXTRA_CFLAGS += -Werror
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
+
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+
+obj-$(CONFIG_MIPS_CM) += mips-cm.o
+obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
+
+obj-$(CONFIG_CPU_PM) += pm.o
+obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
+
+#
+# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
+# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
+# here because the compiler may use DSP ASE instructions (such as lwx) in
+# code paths where we cannot check that the CPU we are running on supports it.
+# Proper abstraction using HAVE_AS_DSP and macros is done in
+# arch/mips/include/asm/mipsregs.h.
+#
+ifeq ($(CONFIG_CPU_MIPSR2), y)
+CFLAGS_DSP = -DHAVE_AS_DSP
+
+CFLAGS_signal.o = $(CFLAGS_DSP)
+CFLAGS_signal32.o = $(CFLAGS_DSP)
+CFLAGS_process.o = $(CFLAGS_DSP)
+CFLAGS_branch.o = $(CFLAGS_DSP)
+CFLAGS_ptrace.o = $(CFLAGS_DSP)
+endif
+
+CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ca136298acd..4bb5107511e 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -12,328 +12,483 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/interrupt.h>
-
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+#include <asm/pm.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
+#include <asm/smp-cps.h>
-#define text(t) __asm__("\n@@@" t)
-#define _offset(type, member) (&(((type *)NULL)->member))
-#define offset(string, ptr, member) \
- __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member)))
-#define constant(string, member) \
- __asm__("\n@@@" string "%X0" : : "ri" (member))
-#define size(string, size) \
- __asm__("\n@@@" string "%0" : : "i" (sizeof(size)))
-#define linefeed text("")
+#include <linux/kvm_host.h>
void output_ptreg_defines(void)
{
- text("/* MIPS pt_regs offsets. */");
- offset("#define PT_R0 ", struct pt_regs, regs[0]);
- offset("#define PT_R1 ", struct pt_regs, regs[1]);
- offset("#define PT_R2 ", struct pt_regs, regs[2]);
- offset("#define PT_R3 ", struct pt_regs, regs[3]);
- offset("#define PT_R4 ", struct pt_regs, regs[4]);
- offset("#define PT_R5 ", struct pt_regs, regs[5]);
- offset("#define PT_R6 ", struct pt_regs, regs[6]);
- offset("#define PT_R7 ", struct pt_regs, regs[7]);
- offset("#define PT_R8 ", struct pt_regs, regs[8]);
- offset("#define PT_R9 ", struct pt_regs, regs[9]);
- offset("#define PT_R10 ", struct pt_regs, regs[10]);
- offset("#define PT_R11 ", struct pt_regs, regs[11]);
- offset("#define PT_R12 ", struct pt_regs, regs[12]);
- offset("#define PT_R13 ", struct pt_regs, regs[13]);
- offset("#define PT_R14 ", struct pt_regs, regs[14]);
- offset("#define PT_R15 ", struct pt_regs, regs[15]);
- offset("#define PT_R16 ", struct pt_regs, regs[16]);
- offset("#define PT_R17 ", struct pt_regs, regs[17]);
- offset("#define PT_R18 ", struct pt_regs, regs[18]);
- offset("#define PT_R19 ", struct pt_regs, regs[19]);
- offset("#define PT_R20 ", struct pt_regs, regs[20]);
- offset("#define PT_R21 ", struct pt_regs, regs[21]);
- offset("#define PT_R22 ", struct pt_regs, regs[22]);
- offset("#define PT_R23 ", struct pt_regs, regs[23]);
- offset("#define PT_R24 ", struct pt_regs, regs[24]);
- offset("#define PT_R25 ", struct pt_regs, regs[25]);
- offset("#define PT_R26 ", struct pt_regs, regs[26]);
- offset("#define PT_R27 ", struct pt_regs, regs[27]);
- offset("#define PT_R28 ", struct pt_regs, regs[28]);
- offset("#define PT_R29 ", struct pt_regs, regs[29]);
- offset("#define PT_R30 ", struct pt_regs, regs[30]);
- offset("#define PT_R31 ", struct pt_regs, regs[31]);
- offset("#define PT_LO ", struct pt_regs, lo);
- offset("#define PT_HI ", struct pt_regs, hi);
+ COMMENT("MIPS pt_regs offsets.");
+ OFFSET(PT_R0, pt_regs, regs[0]);
+ OFFSET(PT_R1, pt_regs, regs[1]);
+ OFFSET(PT_R2, pt_regs, regs[2]);
+ OFFSET(PT_R3, pt_regs, regs[3]);
+ OFFSET(PT_R4, pt_regs, regs[4]);
+ OFFSET(PT_R5, pt_regs, regs[5]);
+ OFFSET(PT_R6, pt_regs, regs[6]);
+ OFFSET(PT_R7, pt_regs, regs[7]);
+ OFFSET(PT_R8, pt_regs, regs[8]);
+ OFFSET(PT_R9, pt_regs, regs[9]);
+ OFFSET(PT_R10, pt_regs, regs[10]);
+ OFFSET(PT_R11, pt_regs, regs[11]);
+ OFFSET(PT_R12, pt_regs, regs[12]);
+ OFFSET(PT_R13, pt_regs, regs[13]);
+ OFFSET(PT_R14, pt_regs, regs[14]);
+ OFFSET(PT_R15, pt_regs, regs[15]);
+ OFFSET(PT_R16, pt_regs, regs[16]);
+ OFFSET(PT_R17, pt_regs, regs[17]);
+ OFFSET(PT_R18, pt_regs, regs[18]);
+ OFFSET(PT_R19, pt_regs, regs[19]);
+ OFFSET(PT_R20, pt_regs, regs[20]);
+ OFFSET(PT_R21, pt_regs, regs[21]);
+ OFFSET(PT_R22, pt_regs, regs[22]);
+ OFFSET(PT_R23, pt_regs, regs[23]);
+ OFFSET(PT_R24, pt_regs, regs[24]);
+ OFFSET(PT_R25, pt_regs, regs[25]);
+ OFFSET(PT_R26, pt_regs, regs[26]);
+ OFFSET(PT_R27, pt_regs, regs[27]);
+ OFFSET(PT_R28, pt_regs, regs[28]);
+ OFFSET(PT_R29, pt_regs, regs[29]);
+ OFFSET(PT_R30, pt_regs, regs[30]);
+ OFFSET(PT_R31, pt_regs, regs[31]);
+ OFFSET(PT_LO, pt_regs, lo);
+ OFFSET(PT_HI, pt_regs, hi);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
- offset("#define PT_ACX ", struct pt_regs, acx);
+ OFFSET(PT_ACX, pt_regs, acx);
#endif
- offset("#define PT_EPC ", struct pt_regs, cp0_epc);
- offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
- offset("#define PT_STATUS ", struct pt_regs, cp0_status);
- offset("#define PT_CAUSE ", struct pt_regs, cp0_cause);
-#ifdef CONFIG_MIPS_MT_SMTC
- offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus);
-#endif /* CONFIG_MIPS_MT_SMTC */
- size("#define PT_SIZE ", struct pt_regs);
- linefeed;
+ OFFSET(PT_EPC, pt_regs, cp0_epc);
+ OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
+ OFFSET(PT_STATUS, pt_regs, cp0_status);
+ OFFSET(PT_CAUSE, pt_regs, cp0_cause);
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ OFFSET(PT_MPL, pt_regs, mpl);
+ OFFSET(PT_MTP, pt_regs, mtp);
+#endif /* CONFIG_CPU_CAVIUM_OCTEON */
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ BLANK();
}
void output_task_defines(void)
{
- text("/* MIPS task_struct offsets. */");
- offset("#define TASK_STATE ", struct task_struct, state);
- offset("#define TASK_THREAD_INFO ", struct task_struct, stack);
- offset("#define TASK_FLAGS ", struct task_struct, flags);
- offset("#define TASK_MM ", struct task_struct, mm);
- offset("#define TASK_PID ", struct task_struct, pid);
- size( "#define TASK_STRUCT_SIZE ", struct task_struct);
- linefeed;
+ COMMENT("MIPS task_struct offsets.");
+ OFFSET(TASK_STATE, task_struct, state);
+ OFFSET(TASK_THREAD_INFO, task_struct, stack);
+ OFFSET(TASK_FLAGS, task_struct, flags);
+ OFFSET(TASK_MM, task_struct, mm);
+ OFFSET(TASK_PID, task_struct, pid);
+#if defined(CONFIG_CC_STACKPROTECTOR)
+ OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
+#endif
+ DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
+ BLANK();
}
void output_thread_info_defines(void)
{
- text("/* MIPS thread_info offsets. */");
- offset("#define TI_TASK ", struct thread_info, task);
- offset("#define TI_EXEC_DOMAIN ", struct thread_info, exec_domain);
- offset("#define TI_FLAGS ", struct thread_info, flags);
- offset("#define TI_TP_VALUE ", struct thread_info, tp_value);
- offset("#define TI_CPU ", struct thread_info, cpu);
- offset("#define TI_PRE_COUNT ", struct thread_info, preempt_count);
- offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit);
- offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block);
- offset("#define TI_REGS ", struct thread_info, regs);
- constant("#define _THREAD_SIZE ", THREAD_SIZE);
- constant("#define _THREAD_MASK ", THREAD_MASK);
- linefeed;
+ COMMENT("MIPS thread_info offsets.");
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_TP_VALUE, thread_info, tp_value);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+ OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
+ OFFSET(TI_REGS, thread_info, regs);
+ DEFINE(_THREAD_SIZE, THREAD_SIZE);
+ DEFINE(_THREAD_MASK, THREAD_MASK);
+ BLANK();
}
void output_thread_defines(void)
{
- text("/* MIPS specific thread_struct offsets. */");
- offset("#define THREAD_REG16 ", struct task_struct, thread.reg16);
- offset("#define THREAD_REG17 ", struct task_struct, thread.reg17);
- offset("#define THREAD_REG18 ", struct task_struct, thread.reg18);
- offset("#define THREAD_REG19 ", struct task_struct, thread.reg19);
- offset("#define THREAD_REG20 ", struct task_struct, thread.reg20);
- offset("#define THREAD_REG21 ", struct task_struct, thread.reg21);
- offset("#define THREAD_REG22 ", struct task_struct, thread.reg22);
- offset("#define THREAD_REG23 ", struct task_struct, thread.reg23);
- offset("#define THREAD_REG29 ", struct task_struct, thread.reg29);
- offset("#define THREAD_REG30 ", struct task_struct, thread.reg30);
- offset("#define THREAD_REG31 ", struct task_struct, thread.reg31);
- offset("#define THREAD_STATUS ", struct task_struct,
+ COMMENT("MIPS specific thread_struct offsets.");
+ OFFSET(THREAD_REG16, task_struct, thread.reg16);
+ OFFSET(THREAD_REG17, task_struct, thread.reg17);
+ OFFSET(THREAD_REG18, task_struct, thread.reg18);
+ OFFSET(THREAD_REG19, task_struct, thread.reg19);
+ OFFSET(THREAD_REG20, task_struct, thread.reg20);
+ OFFSET(THREAD_REG21, task_struct, thread.reg21);
+ OFFSET(THREAD_REG22, task_struct, thread.reg22);
+ OFFSET(THREAD_REG23, task_struct, thread.reg23);
+ OFFSET(THREAD_REG29, task_struct, thread.reg29);
+ OFFSET(THREAD_REG30, task_struct, thread.reg30);
+ OFFSET(THREAD_REG31, task_struct, thread.reg31);
+ OFFSET(THREAD_STATUS, task_struct,
thread.cp0_status);
- offset("#define THREAD_FPU ", struct task_struct, thread.fpu);
+ OFFSET(THREAD_FPU, task_struct, thread.fpu);
- offset("#define THREAD_BVADDR ", struct task_struct, \
+ OFFSET(THREAD_BVADDR, task_struct, \
thread.cp0_badvaddr);
- offset("#define THREAD_BUADDR ", struct task_struct, \
+ OFFSET(THREAD_BUADDR, task_struct, \
thread.cp0_baduaddr);
- offset("#define THREAD_ECODE ", struct task_struct, \
+ OFFSET(THREAD_ECODE, task_struct, \
thread.error_code);
- offset("#define THREAD_TRAPNO ", struct task_struct, thread.trap_no);
- offset("#define THREAD_TRAMP ", struct task_struct, \
- thread.irix_trampoline);
- offset("#define THREAD_OLDCTX ", struct task_struct, \
- thread.irix_oldctx);
- linefeed;
+ BLANK();
}
void output_thread_fpu_defines(void)
{
- offset("#define THREAD_FPR0 ",
- struct task_struct, thread.fpu.fpr[0]);
- offset("#define THREAD_FPR1 ",
- struct task_struct, thread.fpu.fpr[1]);
- offset("#define THREAD_FPR2 ",
- struct task_struct, thread.fpu.fpr[2]);
- offset("#define THREAD_FPR3 ",
- struct task_struct, thread.fpu.fpr[3]);
- offset("#define THREAD_FPR4 ",
- struct task_struct, thread.fpu.fpr[4]);
- offset("#define THREAD_FPR5 ",
- struct task_struct, thread.fpu.fpr[5]);
- offset("#define THREAD_FPR6 ",
- struct task_struct, thread.fpu.fpr[6]);
- offset("#define THREAD_FPR7 ",
- struct task_struct, thread.fpu.fpr[7]);
- offset("#define THREAD_FPR8 ",
- struct task_struct, thread.fpu.fpr[8]);
- offset("#define THREAD_FPR9 ",
- struct task_struct, thread.fpu.fpr[9]);
- offset("#define THREAD_FPR10 ",
- struct task_struct, thread.fpu.fpr[10]);
- offset("#define THREAD_FPR11 ",
- struct task_struct, thread.fpu.fpr[11]);
- offset("#define THREAD_FPR12 ",
- struct task_struct, thread.fpu.fpr[12]);
- offset("#define THREAD_FPR13 ",
- struct task_struct, thread.fpu.fpr[13]);
- offset("#define THREAD_FPR14 ",
- struct task_struct, thread.fpu.fpr[14]);
- offset("#define THREAD_FPR15 ",
- struct task_struct, thread.fpu.fpr[15]);
- offset("#define THREAD_FPR16 ",
- struct task_struct, thread.fpu.fpr[16]);
- offset("#define THREAD_FPR17 ",
- struct task_struct, thread.fpu.fpr[17]);
- offset("#define THREAD_FPR18 ",
- struct task_struct, thread.fpu.fpr[18]);
- offset("#define THREAD_FPR19 ",
- struct task_struct, thread.fpu.fpr[19]);
- offset("#define THREAD_FPR20 ",
- struct task_struct, thread.fpu.fpr[20]);
- offset("#define THREAD_FPR21 ",
- struct task_struct, thread.fpu.fpr[21]);
- offset("#define THREAD_FPR22 ",
- struct task_struct, thread.fpu.fpr[22]);
- offset("#define THREAD_FPR23 ",
- struct task_struct, thread.fpu.fpr[23]);
- offset("#define THREAD_FPR24 ",
- struct task_struct, thread.fpu.fpr[24]);
- offset("#define THREAD_FPR25 ",
- struct task_struct, thread.fpu.fpr[25]);
- offset("#define THREAD_FPR26 ",
- struct task_struct, thread.fpu.fpr[26]);
- offset("#define THREAD_FPR27 ",
- struct task_struct, thread.fpu.fpr[27]);
- offset("#define THREAD_FPR28 ",
- struct task_struct, thread.fpu.fpr[28]);
- offset("#define THREAD_FPR29 ",
- struct task_struct, thread.fpu.fpr[29]);
- offset("#define THREAD_FPR30 ",
- struct task_struct, thread.fpu.fpr[30]);
- offset("#define THREAD_FPR31 ",
- struct task_struct, thread.fpu.fpr[31]);
+ OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
+ OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
+ OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
+ OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]);
+ OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]);
+ OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]);
+ OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]);
+ OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]);
+ OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]);
+ OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]);
+ OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]);
+ OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]);
+ OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]);
+ OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]);
+ OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]);
+ OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]);
+ OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]);
+ OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]);
+ OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]);
+ OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]);
+ OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]);
+ OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]);
+ OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]);
+ OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]);
+ OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]);
+ OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]);
+ OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]);
+ OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]);
+ OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]);
+ OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]);
+ OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
+ OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
- offset("#define THREAD_FCR31 ",
- struct task_struct, thread.fpu.fcr31);
- linefeed;
+ /* the least significant 64 bits of each FP register */
+ OFFSET(THREAD_FPR0_LS64, task_struct,
+ thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR1_LS64, task_struct,
+ thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR2_LS64, task_struct,
+ thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR3_LS64, task_struct,
+ thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR4_LS64, task_struct,
+ thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR5_LS64, task_struct,
+ thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR6_LS64, task_struct,
+ thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR7_LS64, task_struct,
+ thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR8_LS64, task_struct,
+ thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR9_LS64, task_struct,
+ thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR10_LS64, task_struct,
+ thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR11_LS64, task_struct,
+ thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR12_LS64, task_struct,
+ thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR13_LS64, task_struct,
+ thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR14_LS64, task_struct,
+ thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR15_LS64, task_struct,
+ thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR16_LS64, task_struct,
+ thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR17_LS64, task_struct,
+ thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR18_LS64, task_struct,
+ thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR19_LS64, task_struct,
+ thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR20_LS64, task_struct,
+ thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR21_LS64, task_struct,
+ thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR22_LS64, task_struct,
+ thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR23_LS64, task_struct,
+ thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR24_LS64, task_struct,
+ thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR25_LS64, task_struct,
+ thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR26_LS64, task_struct,
+ thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR27_LS64, task_struct,
+ thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR28_LS64, task_struct,
+ thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR29_LS64, task_struct,
+ thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR30_LS64, task_struct,
+ thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR31_LS64, task_struct,
+ thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
+
+ OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
+ BLANK();
}
void output_mm_defines(void)
{
- text("/* Size of struct page */");
- size("#define STRUCT_PAGE_SIZE ", struct page);
- linefeed;
- text("/* Linux mm_struct offsets. */");
- offset("#define MM_USERS ", struct mm_struct, mm_users);
- offset("#define MM_PGD ", struct mm_struct, pgd);
- offset("#define MM_CONTEXT ", struct mm_struct, context);
- linefeed;
- constant("#define _PAGE_SIZE ", PAGE_SIZE);
- constant("#define _PAGE_SHIFT ", PAGE_SHIFT);
- linefeed;
- constant("#define _PGD_T_SIZE ", sizeof(pgd_t));
- constant("#define _PMD_T_SIZE ", sizeof(pmd_t));
- constant("#define _PTE_T_SIZE ", sizeof(pte_t));
- linefeed;
- constant("#define _PGD_T_LOG2 ", PGD_T_LOG2);
- constant("#define _PMD_T_LOG2 ", PMD_T_LOG2);
- constant("#define _PTE_T_LOG2 ", PTE_T_LOG2);
- linefeed;
- constant("#define _PGD_ORDER ", PGD_ORDER);
- constant("#define _PMD_ORDER ", PMD_ORDER);
- constant("#define _PTE_ORDER ", PTE_ORDER);
- linefeed;
- constant("#define _PMD_SHIFT ", PMD_SHIFT);
- constant("#define _PGDIR_SHIFT ", PGDIR_SHIFT);
- linefeed;
- constant("#define _PTRS_PER_PGD ", PTRS_PER_PGD);
- constant("#define _PTRS_PER_PMD ", PTRS_PER_PMD);
- constant("#define _PTRS_PER_PTE ", PTRS_PER_PTE);
- linefeed;
+ COMMENT("Size of struct page");
+ DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
+ BLANK();
+ COMMENT("Linux mm_struct offsets.");
+ OFFSET(MM_USERS, mm_struct, mm_users);
+ OFFSET(MM_PGD, mm_struct, pgd);
+ OFFSET(MM_CONTEXT, mm_struct, context);
+ BLANK();
+ DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+ DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
+ DEFINE(_PTE_T_SIZE, sizeof(pte_t));
+ BLANK();
+ DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+#endif
+ DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
+ BLANK();
+ DEFINE(_PGD_ORDER, PGD_ORDER);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_ORDER, PMD_ORDER);
+#endif
+ DEFINE(_PTE_ORDER, PTE_ORDER);
+ BLANK();
+ DEFINE(_PMD_SHIFT, PMD_SHIFT);
+ DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
+ BLANK();
+ DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
+ DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+ DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
+ BLANK();
+ DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+ DEFINE(_PAGE_SIZE, PAGE_SIZE);
+ BLANK();
}
#ifdef CONFIG_32BIT
void output_sc_defines(void)
{
- text("/* Linux sigcontext offsets. */");
- offset("#define SC_REGS ", struct sigcontext, sc_regs);
- offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
- offset("#define SC_ACX ", struct sigcontext, sc_acx);
- offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
- offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
- offset("#define SC_PC ", struct sigcontext, sc_pc);
- offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
- offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir);
- offset("#define SC_HI1 ", struct sigcontext, sc_hi1);
- offset("#define SC_LO1 ", struct sigcontext, sc_lo1);
- offset("#define SC_HI2 ", struct sigcontext, sc_hi2);
- offset("#define SC_LO2 ", struct sigcontext, sc_lo2);
- offset("#define SC_HI3 ", struct sigcontext, sc_hi3);
- offset("#define SC_LO3 ", struct sigcontext, sc_lo3);
- linefeed;
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+ OFFSET(SC_ACX, sigcontext, sc_acx);
+ OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+ OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+ OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir);
+ OFFSET(SC_HI1, sigcontext, sc_hi1);
+ OFFSET(SC_LO1, sigcontext, sc_lo1);
+ OFFSET(SC_HI2, sigcontext, sc_hi2);
+ OFFSET(SC_LO2, sigcontext, sc_lo2);
+ OFFSET(SC_HI3, sigcontext, sc_hi3);
+ OFFSET(SC_LO3, sigcontext, sc_lo3);
+ BLANK();
}
#endif
#ifdef CONFIG_64BIT
void output_sc_defines(void)
{
- text("/* Linux sigcontext offsets. */");
- offset("#define SC_REGS ", struct sigcontext, sc_regs);
- offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
- offset("#define SC_MDHI ", struct sigcontext, sc_mdhi);
- offset("#define SC_MDLO ", struct sigcontext, sc_mdlo);
- offset("#define SC_PC ", struct sigcontext, sc_pc);
- offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
- linefeed;
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+ OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+ OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+ BLANK();
}
#endif
#ifdef CONFIG_MIPS32_COMPAT
void output_sc32_defines(void)
{
- text("/* Linux 32-bit sigcontext offsets. */");
- offset("#define SC32_FPREGS ", struct sigcontext32, sc_fpregs);
- offset("#define SC32_FPC_CSR ", struct sigcontext32, sc_fpc_csr);
- offset("#define SC32_FPC_EIR ", struct sigcontext32, sc_fpc_eir);
- linefeed;
+ COMMENT("Linux 32-bit sigcontext offsets.");
+ OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
+ OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
+ OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
+ BLANK();
}
#endif
void output_signal_defined(void)
{
- text("/* Linux signal numbers. */");
- constant("#define _SIGHUP ", SIGHUP);
- constant("#define _SIGINT ", SIGINT);
- constant("#define _SIGQUIT ", SIGQUIT);
- constant("#define _SIGILL ", SIGILL);
- constant("#define _SIGTRAP ", SIGTRAP);
- constant("#define _SIGIOT ", SIGIOT);
- constant("#define _SIGABRT ", SIGABRT);
- constant("#define _SIGEMT ", SIGEMT);
- constant("#define _SIGFPE ", SIGFPE);
- constant("#define _SIGKILL ", SIGKILL);
- constant("#define _SIGBUS ", SIGBUS);
- constant("#define _SIGSEGV ", SIGSEGV);
- constant("#define _SIGSYS ", SIGSYS);
- constant("#define _SIGPIPE ", SIGPIPE);
- constant("#define _SIGALRM ", SIGALRM);
- constant("#define _SIGTERM ", SIGTERM);
- constant("#define _SIGUSR1 ", SIGUSR1);
- constant("#define _SIGUSR2 ", SIGUSR2);
- constant("#define _SIGCHLD ", SIGCHLD);
- constant("#define _SIGPWR ", SIGPWR);
- constant("#define _SIGWINCH ", SIGWINCH);
- constant("#define _SIGURG ", SIGURG);
- constant("#define _SIGIO ", SIGIO);
- constant("#define _SIGSTOP ", SIGSTOP);
- constant("#define _SIGTSTP ", SIGTSTP);
- constant("#define _SIGCONT ", SIGCONT);
- constant("#define _SIGTTIN ", SIGTTIN);
- constant("#define _SIGTTOU ", SIGTTOU);
- constant("#define _SIGVTALRM ", SIGVTALRM);
- constant("#define _SIGPROF ", SIGPROF);
- constant("#define _SIGXCPU ", SIGXCPU);
- constant("#define _SIGXFSZ ", SIGXFSZ);
- linefeed;
+ COMMENT("Linux signal numbers.");
+ DEFINE(_SIGHUP, SIGHUP);
+ DEFINE(_SIGINT, SIGINT);
+ DEFINE(_SIGQUIT, SIGQUIT);
+ DEFINE(_SIGILL, SIGILL);
+ DEFINE(_SIGTRAP, SIGTRAP);
+ DEFINE(_SIGIOT, SIGIOT);
+ DEFINE(_SIGABRT, SIGABRT);
+ DEFINE(_SIGEMT, SIGEMT);
+ DEFINE(_SIGFPE, SIGFPE);
+ DEFINE(_SIGKILL, SIGKILL);
+ DEFINE(_SIGBUS, SIGBUS);
+ DEFINE(_SIGSEGV, SIGSEGV);
+ DEFINE(_SIGSYS, SIGSYS);
+ DEFINE(_SIGPIPE, SIGPIPE);
+ DEFINE(_SIGALRM, SIGALRM);
+ DEFINE(_SIGTERM, SIGTERM);
+ DEFINE(_SIGUSR1, SIGUSR1);
+ DEFINE(_SIGUSR2, SIGUSR2);
+ DEFINE(_SIGCHLD, SIGCHLD);
+ DEFINE(_SIGPWR, SIGPWR);
+ DEFINE(_SIGWINCH, SIGWINCH);
+ DEFINE(_SIGURG, SIGURG);
+ DEFINE(_SIGIO, SIGIO);
+ DEFINE(_SIGSTOP, SIGSTOP);
+ DEFINE(_SIGTSTP, SIGTSTP);
+ DEFINE(_SIGCONT, SIGCONT);
+ DEFINE(_SIGTTIN, SIGTTIN);
+ DEFINE(_SIGTTOU, SIGTTOU);
+ DEFINE(_SIGVTALRM, SIGVTALRM);
+ DEFINE(_SIGPROF, SIGPROF);
+ DEFINE(_SIGXCPU, SIGXCPU);
+ DEFINE(_SIGXFSZ, SIGXFSZ);
+ BLANK();
+}
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+void output_octeon_cop2_state_defines(void)
+{
+ COMMENT("Octeon specific octeon_cop2_state offsets.");
+ OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv);
+ OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length);
+ OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly);
+ OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat);
+ OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv);
+ OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key);
+ OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result);
+ OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0);
+ OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv);
+ OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key);
+ OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen);
+ OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result);
+ OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult);
+ OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly);
+ OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result);
+ OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw);
+ OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw);
+ OFFSET(THREAD_CP2, task_struct, thread.cp2);
+ OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_HIBERNATION
+void output_pbe_defines(void)
+{
+ COMMENT(" Linux struct pbe offsets. ");
+ OFFSET(PBE_ADDRESS, pbe, address);
+ OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
+ OFFSET(PBE_NEXT, pbe, next);
+ DEFINE(PBE_SIZE, sizeof(struct pbe));
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_CPU_PM
+void output_pm_defines(void)
+{
+ COMMENT(" PM offsets. ");
+#ifdef CONFIG_EVA
+ OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
+ OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
+ OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
+#endif
+ OFFSET(SSS_SP, mips_static_suspend_state, sp);
+ BLANK();
}
+#endif
-void output_irq_cpustat_t_defines(void)
+void output_kvm_defines(void)
{
- text("/* Linux irq_cpustat_t offsets. */");
- offset("#define IC_SOFTIRQ_PENDING ", irq_cpustat_t, __softirq_pending);
- size("#define IC_IRQ_CPUSTAT_T ", irq_cpustat_t);
- linefeed;
+ COMMENT(" KVM/MIPS Specfic offsets. ");
+ DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
+ OFFSET(VCPU_RUN, kvm_vcpu, run);
+ OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
+
+ OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
+ OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
+
+ OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
+ OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
+
+ OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
+ OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
+ OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
+ OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
+
+ OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
+
+ OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
+ OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
+ OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
+ OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
+ OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
+ OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
+ OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
+ OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
+ OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
+ OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
+ OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
+ OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
+ OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
+ OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
+ OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
+ OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
+ OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
+ OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
+ OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
+ OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
+ OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
+ OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
+ OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
+ OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
+ OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
+ OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
+ OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
+ OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
+ OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
+ OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
+ OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
+ OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
+ OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
+ OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
+ OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+ OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
+ OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
+ OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
+
+ OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
+ OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
+ BLANK();
}
+
+#ifdef CONFIG_MIPS_CPS
+void output_cps_defines(void)
+{
+ COMMENT(" MIPS CPS offsets. ");
+
+ OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
+ OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
+ DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
+
+ OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
+ OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
+ OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
+ DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
+}
+#endif
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 77db3473dea..1188e00bb12 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -6,7 +6,7 @@
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
@@ -48,12 +48,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
+#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
@@ -66,8 +67,8 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
@@ -87,7 +88,7 @@ struct elf_prpsinfo32
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
@@ -102,8 +103,8 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
- long rem;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
+ u32 rem;
+ value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
@@ -118,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 08f4cd781ee..7faf5f2bee2 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -6,7 +6,7 @@
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
@@ -28,6 +28,18 @@ typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
+ * In order to be sure that we don't attempt to execute an O32 binary which
+ * requires 64 bit FP (FR=1) on a system which does not support it we refuse
+ * to execute any binary which has bits specified by the following macro set
+ * in its ELF header flags.
+ */
+#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
+# define __MIPS_O32_FP64_MUST_BE_ZERO 0
+#else
+# define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64
+#endif
+
+/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(hdr) \
@@ -44,18 +56,42 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
__res = 0; \
+ if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \
+ __res = 0; \
\
__res; \
})
+#ifdef CONFIG_KVM_GUEST
+#define TASK32_SIZE 0x3fff8000UL
+#else
#define TASK32_SIZE 0x7fff8000UL
+#endif
#undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
+
+/*
+ * When this file is selected, we are definitely running a 64bit kernel.
+ * So using the right regs define in asm/reg.h
+ */
+#define WANT_COMPAT_REG_H
+
+/* These MUST be defined before elf.h gets included */
+extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs);
+#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
+#define ELF_CORE_COPY_TASK_REGS(_tsk, _dest) \
+({ \
+ int __res = 1; \
+ elf32_core_copy_regs(*(_dest), task_pt_regs(_tsk)); \
+ __res; \
+})
+
#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
+#include <linux/math64.h>
#define elf_prstatus elf_prstatus32
struct elf_prstatus32
@@ -68,8 +104,8 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
@@ -89,7 +125,7 @@ struct elf_prpsinfo32
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
@@ -104,14 +140,11 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
- long rem;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
+ u32 rem;
+ value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
-#undef ELF_CORE_COPY_REGS
-#define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs);
-
void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
{
int i;
@@ -143,4 +176,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
new file mode 100644
index 00000000000..290c23b5167
--- /dev/null
+++ b/arch/mips/kernel/bmips_vec.S
@@ -0,0 +1,285 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
+ *
+ * Reset/NMI/re-entry vectors for BMIPS processors
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/cpu.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+#include <asm/hazards.h>
+#include <asm/bmips.h>
+
+ .macro BARRIER
+ .set mips32
+ _ssnop
+ _ssnop
+ _ssnop
+ .set mips0
+ .endm
+
+/***********************************************************************
+ * Alternate CPU1 startup vector for BMIPS4350
+ *
+ * On some systems the bootloader has already started CPU1 and configured
+ * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is
+ * triggered by the SW1 interrupt. If that is the case we try to move
+ * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380.
+ ***********************************************************************/
+
+LEAF(bmips_smp_movevec)
+ la k0, 1f
+ li k1, CKSEG1
+ or k0, k1
+ jr k0
+
+1:
+ /* clear IV, pending IPIs */
+ mtc0 zero, CP0_CAUSE
+
+ /* re-enable IRQs to wait for SW1 */
+ li k0, ST0_IE | ST0_BEV | STATUSF_IP1
+ mtc0 k0, CP0_STATUS
+
+ /* set up CPU1 CBR; move BASE to 0xa000_0000 */
+ li k0, 0xff400000
+ mtc0 k0, $22, 6
+ /* set up relocation vector address based on thread ID */
+ mfc0 k1, $22, 3
+ srl k1, 16
+ andi k1, 0x8000
+ or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
+ or k0, k1
+ li k1, 0xa0080000
+ sw k1, 0(k0)
+
+ /* wait here for SW1 interrupt from bmips_boot_secondary() */
+ wait
+
+ la k0, bmips_reset_nmi_vec
+ li k1, CKSEG1
+ or k0, k1
+ jr k0
+END(bmips_smp_movevec)
+
+/***********************************************************************
+ * Reset/NMI vector
+ * For BMIPS processors that can relocate their exception vectors, this
+ * entire function gets copied to 0x8000_0000.
+ ***********************************************************************/
+
+NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
+ .set push
+ .set noat
+ .align 4
+
+#ifdef CONFIG_SMP
+ /* if the NMI bit is clear, assume this is a CPU1 reset instead */
+ li k1, (1 << 19)
+ mfc0 k0, CP0_STATUS
+ and k0, k1
+ beqz k0, bmips_smp_entry
+
+#if defined(CONFIG_CPU_BMIPS5000)
+ mfc0 k0, CP0_PRID
+ li k1, PRID_IMP_BMIPS5000
+ andi k0, 0xff00
+ bne k0, k1, 1f
+
+ /* if we're not on core 0, this must be the SMP boot signal */
+ li k1, (3 << 25)
+ mfc0 k0, $22
+ and k0, k1
+ bnez k0, bmips_smp_entry
+1:
+#endif /* CONFIG_CPU_BMIPS5000 */
+#endif /* CONFIG_SMP */
+
+ /* nope, it's just a regular NMI */
+ SAVE_ALL
+ move a0, sp
+
+ /* clear EXL, ERL, BEV so that TLB refills still work */
+ mfc0 k0, CP0_STATUS
+ li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE
+ or k0, k1
+ xor k0, k1
+ mtc0 k0, CP0_STATUS
+ BARRIER
+
+ /* jump to the NMI handler function */
+ la k0, nmi_handler
+ jr k0
+
+ RESTORE_ALL
+ .set arch=r4000
+ eret
+
+/***********************************************************************
+ * CPU1 reset vector (used for the initial boot only)
+ * This is still part of bmips_reset_nmi_vec().
+ ***********************************************************************/
+
+#ifdef CONFIG_SMP
+
+bmips_smp_entry:
+
+ /* set up CP0 STATUS; enable FPU */
+ li k0, 0x30000000
+ mtc0 k0, CP0_STATUS
+ BARRIER
+
+ /* set local CP0 CONFIG to make kseg0 cacheable, write-back */
+ mfc0 k0, CP0_CONFIG
+ ori k0, 0x07
+ xori k0, 0x04
+ mtc0 k0, CP0_CONFIG
+
+ mfc0 k0, CP0_PRID
+ andi k0, 0xff00
+#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ li k1, PRID_IMP_BMIPS43XX
+ bne k0, k1, 2f
+
+ /* initialize CPU1's local I-cache */
+ li k0, 0x80000000
+ li k1, 0x80010000
+ mtc0 zero, $28
+ mtc0 zero, $28, 1
+ BARRIER
+
+1: cache Index_Store_Tag_I, 0(k0)
+ addiu k0, 16
+ bne k0, k1, 1b
+
+ b 3f
+2:
+#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
+#if defined(CONFIG_CPU_BMIPS5000)
+ /* set exception vector base */
+ li k1, PRID_IMP_BMIPS5000
+ bne k0, k1, 3f
+
+ la k0, ebase
+ lw k0, 0(k0)
+ mtc0 k0, $15, 1
+ BARRIER
+#endif /* CONFIG_CPU_BMIPS5000 */
+3:
+ /* jump back to kseg0 in case we need to remap the kseg1 area */
+ la k0, 1f
+ jr k0
+1:
+ la k0, bmips_enable_xks01
+ jalr k0
+
+ /* use temporary stack to set up upper memory TLB */
+ li sp, BMIPS_WARM_RESTART_VEC
+ la k0, plat_wired_tlb_setup
+ jalr k0
+
+ /* switch to permanent stack and continue booting */
+
+ .global bmips_secondary_reentry
+bmips_secondary_reentry:
+ la k0, bmips_smp_boot_sp
+ lw sp, 0(k0)
+ la k0, bmips_smp_boot_gp
+ lw gp, 0(k0)
+ la k0, start_secondary
+ jr k0
+
+#endif /* CONFIG_SMP */
+
+ .align 4
+ .global bmips_reset_nmi_vec_end
+bmips_reset_nmi_vec_end:
+
+END(bmips_reset_nmi_vec)
+
+ .set pop
+ .previous
+
+/***********************************************************************
+ * CPU1 warm restart vector (used for second and subsequent boots).
+ * Also used for S2 standby recovery (PM).
+ * This entire function gets copied to (BMIPS_WARM_RESTART_VEC)
+ ***********************************************************************/
+
+LEAF(bmips_smp_int_vec)
+
+ .align 4
+ mfc0 k0, CP0_STATUS
+ ori k0, 0x01
+ xori k0, 0x01
+ mtc0 k0, CP0_STATUS
+ eret
+
+ .align 4
+ .global bmips_smp_int_vec_end
+bmips_smp_int_vec_end:
+
+END(bmips_smp_int_vec)
+
+/***********************************************************************
+ * XKS01 support
+ * Certain CPUs support extending kseg0 to 1024MB.
+ ***********************************************************************/
+
+LEAF(bmips_enable_xks01)
+
+#if defined(CONFIG_XKS01)
+ mfc0 t0, CP0_PRID
+ andi t2, t0, 0xff00
+#if defined(CONFIG_CPU_BMIPS4380)
+ li t1, PRID_IMP_BMIPS43XX
+ bne t2, t1, 1f
+
+ andi t0, 0xff
+ addiu t1, t0, -PRID_REV_BMIPS4380_HI
+ bgtz t1, 2f
+ addiu t0, -PRID_REV_BMIPS4380_LO
+ bltz t0, 2f
+
+ mfc0 t0, $22, 3
+ li t1, 0x1ff0
+ li t2, (1 << 12) | (1 << 9)
+ or t0, t1
+ xor t0, t1
+ or t0, t2
+ mtc0 t0, $22, 3
+ BARRIER
+ b 2f
+1:
+#endif /* CONFIG_CPU_BMIPS4380 */
+#if defined(CONFIG_CPU_BMIPS5000)
+ li t1, PRID_IMP_BMIPS5000
+ bne t2, t1, 2f
+
+ mfc0 t0, $22, 5
+ li t1, 0x01ff
+ li t2, (1 << 8) | (1 << 5)
+ or t0, t1
+ xor t0, t1
+ or t0, t2
+ mtc0 t0, $22, 5
+ BARRIER
+#endif /* CONFIG_CPU_BMIPS5000 */
+2:
+#endif /* defined(CONFIG_XKS01) */
+
+ jr ra
+
+END(bmips_enable_xks01)
+
+ .previous
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 6b5df8bfab8..7b2df224f04 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -9,38 +9,404 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/module.h>
#include <asm/branch.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
/*
- * Compute the return address and do emulate branch simulation, if required.
+ * Calculate and return exception PC in case of branch delay slot
+ * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
*/
-int __compute_return_epc(struct pt_regs *regs)
+int __isa_exception_epc(struct pt_regs *regs)
{
- unsigned int __user *addr;
- unsigned int bit, fcr31, dspcontrol;
+ unsigned short inst;
+ long epc = regs->cp0_epc;
+
+ /* Calculate exception PC in branch delay slot. */
+ if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
+ /* This should never happen because delay slot was checked. */
+ force_sig(SIGSEGV, current);
+ return epc;
+ }
+ if (cpu_has_mips16) {
+ if (((union mips16e_instruction)inst).ri.opcode
+ == MIPS16e_jal_op)
+ epc += 4;
+ else
+ epc += 2;
+ } else if (mm_insn_16bit(inst))
+ epc += 2;
+ else
+ epc += 4;
+
+ return epc;
+}
+
+/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
+static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
+
+int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
+{
+ union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+ int bc_false = 0;
+ unsigned int fcr31;
+ unsigned int bit;
+
+ if (!cpu_has_mmips)
+ return 0;
+
+ switch (insn.mm_i_format.opcode) {
+ case mm_pool32a_op:
+ if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
+ mm_pool32axf_op) {
+ switch (insn.mm_i_format.simmediate >>
+ MM_POOL32A_MINOR_SHIFT) {
+ case mm_jalr_op:
+ case mm_jalrhb_op:
+ case mm_jalrs_op:
+ case mm_jalrshb_op:
+ if (insn.mm_i_format.rt != 0) /* Not mm_jr */
+ regs->regs[insn.mm_i_format.rt] =
+ regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ *contpc = regs->regs[insn.mm_i_format.rs];
+ return 1;
+ }
+ }
+ break;
+ case mm_pool32i_op:
+ switch (insn.mm_i_format.rt) {
+ case mm_bltzals_op:
+ case mm_bltzal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_bltz_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] < 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bgezals_op:
+ case mm_bgezal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_bgez_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_blez_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bgtz_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bc2f_op:
+ case mm_bc1f_op:
+ bc_false = 1;
+ /* Fall through */
+ case mm_bc2t_op:
+ case mm_bc1t_op:
+ preempt_disable();
+ if (is_fpu_owner())
+ asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ else
+ fcr31 = current->thread.fpu.fcr31;
+ preempt_enable();
+
+ if (bc_false)
+ fcr31 = ~fcr31;
+
+ bit = (insn.mm_i_format.rs >> 2);
+ bit += (bit != 0);
+ bit += 23;
+ if (fcr31 & (1 << bit))
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ }
+ break;
+ case mm_pool16c_op:
+ switch (insn.mm_i_format.rt) {
+ case mm_jalr16_op:
+ case mm_jalrs16_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_jr16_op:
+ *contpc = regs->regs[insn.mm_i_format.rs];
+ return 1;
+ }
+ break;
+ case mm_beqz16_op:
+ if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_b1_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_bnez16_op:
+ if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_b1_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_b16_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ (insn.mm_b0_format.simmediate << 1);
+ return 1;
+ case mm_beq32_op:
+ if (regs->regs[insn.mm_i_format.rs] ==
+ regs->regs[insn.mm_i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bne32_op:
+ if (regs->regs[insn.mm_i_format.rs] !=
+ regs->regs[insn.mm_i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_jalx32_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 28;
+ *contpc <<= 28;
+ *contpc |= (insn.j_format.target << 2);
+ return 1;
+ case mm_jals32_op:
+ case mm_jal32_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ /* Fall through */
+ case mm_j32_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 27;
+ *contpc <<= 27;
+ *contpc |= (insn.j_format.target << 1);
+ set_isa16_mode(*contpc);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Compute return address and emulate branch in microMIPS mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __microMIPS_compute_return_epc(struct pt_regs *regs)
+{
+ u16 __user *pc16;
+ u16 halfword;
+ unsigned int word;
+ unsigned long contpc;
+ struct mm_decoded_insn mminsn = { 0 };
+
+ mminsn.micro_mips_mode = 1;
+
+ /* This load never faults. */
+ pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 2;
+ word = ((unsigned int)halfword << 16);
+ mminsn.pc_inc = 2;
+
+ if (!mm_insn_16bit(halfword)) {
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 4;
+ mminsn.pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.insn = word;
+
+ if (get_user(halfword, pc16))
+ goto sigsegv;
+ mminsn.next_pc_inc = 2;
+ word = ((unsigned int)halfword << 16);
+
+ if (!mm_insn_16bit(halfword)) {
+ pc16++;
+ if (get_user(halfword, pc16))
+ goto sigsegv;
+ mminsn.next_pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.next_insn = word;
+
+ mm_isBranchInstr(regs, mminsn, &contpc);
+
+ regs->cp0_epc = contpc;
+
+ return 0;
+
+sigsegv:
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+}
+
+/*
+ * Compute return address and emulate branch in MIPS16e mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+{
+ u16 __user *addr;
+ union mips16e_instruction inst;
+ u16 inst2;
+ u32 fullinst;
long epc;
- union mips_instruction insn;
epc = regs->cp0_epc;
- if (epc & 3)
- goto unaligned;
- /*
- * Read the instruction
- */
- addr = (unsigned int __user *) epc;
- if (__get_user(insn.word, addr)) {
+ /* Read the instruction. */
+ addr = (u16 __user *)msk_isa16_mode(epc);
+ if (__get_user(inst.full, addr)) {
force_sig(SIGSEGV, current);
return -EFAULT;
}
- regs->regs[0] = 0;
+ switch (inst.ri.opcode) {
+ case MIPS16e_extend_op:
+ regs->cp0_epc += 4;
+ return 0;
+
+ /*
+ * JAL and JALX in MIPS16e mode
+ */
+ case MIPS16e_jal_op:
+ addr += 1;
+ if (__get_user(inst2, addr)) {
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+ }
+ fullinst = ((unsigned)inst.full << 16) | inst2;
+ regs->regs[31] = epc + 6;
+ epc += 4;
+ epc >>= 28;
+ epc <<= 28;
+ /*
+ * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
+ *
+ * ......TARGET[15:0].................TARGET[20:16]...........
+ * ......TARGET[25:21]
+ */
+ epc |=
+ ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
+ ((fullinst & 0x1f0000) << 7);
+ if (!inst.jal.x)
+ set_isa16_mode(epc); /* Set ISA mode bit. */
+ regs->cp0_epc = epc;
+ return 0;
+
+ /*
+ * J(AL)R(C)
+ */
+ case MIPS16e_rr_op:
+ if (inst.rr.func == MIPS16e_jr_func) {
+
+ if (inst.rr.ra)
+ regs->cp0_epc = regs->regs[31];
+ else
+ regs->cp0_epc =
+ regs->regs[reg16to32[inst.rr.rx]];
+
+ if (inst.rr.l) {
+ if (inst.rr.nd)
+ regs->regs[31] = epc + 2;
+ else
+ regs->regs[31] = epc + 4;
+ }
+ return 0;
+ }
+ break;
+ }
+
+ /*
+ * All other cases have no branch delay slot and are 16-bits.
+ * Branches do not cause an exception.
+ */
+ regs->cp0_epc += 2;
+
+ return 0;
+}
+
+/**
+ * __compute_return_epc_for_insn - Computes the return address and do emulate
+ * branch simulation, if required.
+ *
+ * @regs: Pointer to pt_regs
+ * @insn: branch instruction to decode
+ * @returns: -EFAULT on error and forces SIGBUS, and on success
+ * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
+ * evaluating the branch.
+ */
+int __compute_return_epc_for_insn(struct pt_regs *regs,
+ union mips_instruction insn)
+{
+ unsigned int bit, fcr31, dspcontrol;
+ long epc = regs->cp0_epc;
+ int ret = 0;
+
switch (insn.i_format.opcode) {
/*
* jr and jalr are in r_format format.
@@ -63,20 +429,24 @@ int __compute_return_epc(struct pt_regs *regs)
*/
case bcond_op:
switch (insn.i_format.rt) {
- case bltz_op:
+ case bltz_op:
case bltzl_op:
- if ((long)regs->regs[insn.i_format.rs] < 0)
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bltzl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
case bgez_op:
case bgezl_op:
- if ((long)regs->regs[insn.i_format.rs] >= 0)
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bgezl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -84,9 +454,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bltzal_op:
case bltzall_op:
regs->regs[31] = epc + 8;
- if ((long)regs->regs[insn.i_format.rs] < 0)
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bltzall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -94,12 +466,15 @@ int __compute_return_epc(struct pt_regs *regs)
case bgezal_op:
case bgezall_op:
regs->regs[31] = epc + 8;
- if ((long)regs->regs[insn.i_format.rs] >= 0)
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == bgezall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
+
case bposge32_op:
if (!cpu_has_dsp)
goto sigill;
@@ -126,6 +501,8 @@ int __compute_return_epc(struct pt_regs *regs)
epc <<= 28;
epc |= (insn.j_format.target << 2);
regs->cp0_epc = epc;
+ if (insn.i_format.opcode == jalx_op)
+ set_isa16_mode(regs->cp0_epc);
break;
/*
@@ -134,9 +511,11 @@ int __compute_return_epc(struct pt_regs *regs)
case beq_op:
case beql_op:
if (regs->regs[insn.i_format.rs] ==
- regs->regs[insn.i_format.rt])
+ regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.opcode == beql_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -144,9 +523,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bne_op:
case bnel_op:
if (regs->regs[insn.i_format.rs] !=
- regs->regs[insn.i_format.rt])
+ regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.opcode == bnel_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -154,9 +535,11 @@ int __compute_return_epc(struct pt_regs *regs)
case blez_op: /* not really i_format */
case blezl_op:
/* rt field assumed to be zero */
- if ((long)regs->regs[insn.i_format.rs] <= 0)
+ if ((long)regs->regs[insn.i_format.rs] <= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.opcode == blezl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -164,9 +547,11 @@ int __compute_return_epc(struct pt_regs *regs)
case bgtz_op:
case bgtzl_op:
/* rt field assumed to be zero */
- if ((long)regs->regs[insn.i_format.rs] > 0)
+ if ((long)regs->regs[insn.i_format.rs] > 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.opcode == bgtzl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
@@ -177,7 +562,11 @@ int __compute_return_epc(struct pt_regs *regs)
case cop1_op:
preempt_disable();
if (is_fpu_owner())
- asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ asm volatile(
+ ".set push\n"
+ "\t.set mips1\n"
+ "\tcfc1\t%0,$31\n"
+ "\t.set pop" : "=r" (fcr31));
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
@@ -186,36 +575,96 @@ int __compute_return_epc(struct pt_regs *regs)
bit += (bit != 0);
bit += 23;
switch (insn.i_format.rt & 3) {
- case 0: /* bc1f */
- case 2: /* bc1fl */
- if (~fcr31 & (1 << bit))
+ case 0: /* bc1f */
+ case 2: /* bc1fl */
+ if (~fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == 2)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
- case 1: /* bc1t */
- case 3: /* bc1tl */
- if (fcr31 & (1 << bit))
+ case 1: /* bc1t */
+ case 3: /* bc1tl */
+ if (fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- else
+ if (insn.i_format.rt == 3)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
epc += 8;
regs->cp0_epc = epc;
break;
}
break;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ == 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case ldc2_op: /* This is bbit032 on Octeon */
+ if ((regs->regs[insn.i_format.rs] &
+ (1ull<<(insn.i_format.rt+32))) == 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case swc2_op: /* This is bbit1 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case sdc2_op: /* This is bbit132 on Octeon */
+ if (regs->regs[insn.i_format.rs] &
+ (1ull<<(insn.i_format.rt+32)))
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+#endif
}
- return 0;
+ return ret;
-unaligned:
- printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+sigill:
+ printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
-sigill:
- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
+int __compute_return_epc(struct pt_regs *regs)
+{
+ unsigned int __user *addr;
+ long epc;
+ union mips_instruction insn;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ /*
+ * Read the instruction
+ */
+ addr = (unsigned int __user *) epc;
+ if (__get_user(insn.word, addr)) {
+ force_sig(SIGSEGV, current);
+ return -EFAULT;
+ }
+
+ return __compute_return_epc_for_insn(regs, insn);
+
+unaligned:
+ printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
}
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 0a57f86945f..7976457184b 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -18,6 +18,8 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
#include <asm/addrspace.h>
#include <asm/io.h>
@@ -39,7 +41,7 @@
* the rest of the system
*/
static void sibyte_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
@@ -107,7 +109,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
-void __cpuinit sb1480_clockevent_init(void)
+void sb1480_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
@@ -126,7 +128,7 @@ void __cpuinit sb1480_clockevent_init(void)
cd->min_delta_ns = clockevent_delta2ns(2, cd);
cd->rating = 200;
cd->irq = irq;
- cd->cpumask = cpumask_of_cpu(cpu);
+ cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
cd->set_mode = sibyte_set_mode;
clockevents_register_device(cd);
@@ -142,12 +144,11 @@ void __cpuinit sb1480_clockevent_init(void)
bcm1480_unmask_irq(cpu, irq);
- action->handler = sibyte_counter_handler;
- action->flags = IRQF_DISABLED | IRQF_PERCPU;
- action->mask = cpumask_of_cpu(cpu);
+ action->handler = sibyte_counter_handler;
+ action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
- irq_set_affinity(irq, cpumask_of_cpu(cpu));
+ irq_set_affinity(irq, cpumask_of(cpu));
setup_irq(irq, action);
}
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
new file mode 100644
index 00000000000..ff1f01b7227
--- /dev/null
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -0,0 +1,130 @@
+/*
+ * DS1287 clockevent driver
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mc146818rtc.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+
+int ds1287_timer_state(void)
+{
+ return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0;
+}
+
+int ds1287_set_base_clock(unsigned int hz)
+{
+ u8 rate;
+
+ switch (hz) {
+ case 128:
+ rate = 0x9;
+ break;
+ case 256:
+ rate = 0x8;
+ break;
+ case 1024:
+ rate = 0x6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A);
+
+ return 0;
+}
+
+static int ds1287_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ return -EINVAL;
+}
+
+static void ds1287_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u8 val;
+
+ spin_lock(&rtc_lock);
+
+ val = CMOS_READ(RTC_REG_B);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ val |= RTC_PIE;
+ break;
+ default:
+ val &= ~RTC_PIE;
+ break;
+ }
+
+ CMOS_WRITE(val, RTC_REG_B);
+
+ spin_unlock(&rtc_lock);
+}
+
+static void ds1287_event_handler(struct clock_event_device *dev)
+{
+}
+
+static struct clock_event_device ds1287_clockevent = {
+ .name = "ds1287",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = ds1287_set_next_event,
+ .set_mode = ds1287_set_mode,
+ .event_handler = ds1287_event_handler,
+};
+
+static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &ds1287_clockevent;
+
+ /* Ack the RTC interrupt. */
+ CMOS_READ(RTC_REG_C);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction ds1287_irqaction = {
+ .handler = ds1287_interrupt,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "ds1287",
+};
+
+int __init ds1287_clockevent_init(int irq)
+{
+ struct clock_event_device *cd;
+
+ cd = &ds1287_clockevent;
+ cd->rating = 100;
+ cd->irq = irq;
+ clockevent_set_clock(cd, 32768);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->cpumask = cpumask_of(0);
+
+ clockevents_register_device(&ds1287_clockevent);
+
+ return setup_irq(irq, &ds1287_irqaction);
+}
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
new file mode 100644
index 00000000000..6093716980b
--- /dev/null
+++ b/arch/mips/kernel/cevt-gic.c
@@ -0,0 +1,105 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/gic.h>
+#include <asm/mips-boards/maltaint.h>
+
+DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
+int gic_timer_irq_installed;
+
+
+static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ u64 cnt;
+ int res;
+
+ cnt = gic_read_count();
+ cnt += (u64)delta;
+ gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
+ res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
+ return res;
+}
+
+void gic_set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ gic_write_compare(gic_read_compare());
+ cd = &per_cpu(gic_clockevent_device, cpu);
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+struct irqaction gic_compare_irqaction = {
+ .handler = gic_compare_interrupt,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .name = "timer",
+};
+
+
+void gic_event_handler(struct clock_event_device *dev)
+{
+}
+
+int gic_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq;
+
+ if (!cpu_has_counter || !gic_frequency)
+ return -ENXIO;
+
+ irq = MIPS_GIC_IRQ_BASE;
+
+ cd = &per_cpu(gic_clockevent_device, cpu);
+
+ cd->name = "MIPS GIC";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP;
+
+ clockevent_set_clock(cd, gic_frequency);
+
+ /* Calculate the min / max delta */
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = gic_next_event;
+ cd->set_mode = gic_set_clock_mode;
+ cd->event_handler = gic_event_handler;
+
+ clockevents_register_device(cd);
+
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
+
+ if (gic_timer_irq_installed)
+ return 0;
+
+ gic_timer_irq_installed = 1;
+
+ setup_irq(irq, &gic_compare_irqaction);
+ irq_set_handler(irq, handle_percpu_irq);
+ return 0;
+}
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index c36772631fe..f069460751a 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -1,7 +1,7 @@
/*
* GT641xx clockevent routines.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,13 +21,12 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/irq.h>
#include <asm/gt64120.h>
#include <asm/time.h>
-#include <irq.h>
-
-static DEFINE_SPINLOCK(gt641xx_timer_lock);
+static DEFINE_RAW_SPINLOCK(gt641xx_timer_lock);
static unsigned int gt641xx_base_clock;
void gt641xx_set_base_clock(unsigned int clock)
@@ -51,7 +50,7 @@ static int gt641xx_timer0_set_next_event(unsigned long delta,
{
u32 ctrl;
- spin_lock(&gt641xx_timer_lock);
+ raw_spin_lock(&gt641xx_timer_lock);
ctrl = GT_READ(GT_TC_CONTROL_OFS);
ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
@@ -60,7 +59,7 @@ static int gt641xx_timer0_set_next_event(unsigned long delta,
GT_WRITE(GT_TC0_OFS, delta);
GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
- spin_unlock(&gt641xx_timer_lock);
+ raw_spin_unlock(&gt641xx_timer_lock);
return 0;
}
@@ -70,7 +69,7 @@ static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
{
u32 ctrl;
- spin_lock(&gt641xx_timer_lock);
+ raw_spin_lock(&gt641xx_timer_lock);
ctrl = GT_READ(GT_TC_CONTROL_OFS);
ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
@@ -88,7 +87,7 @@ static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
- spin_unlock(&gt641xx_timer_lock);
+ raw_spin_unlock(&gt641xx_timer_lock);
}
static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
@@ -98,9 +97,8 @@ static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
static struct clock_event_device gt641xx_timer0_clockevent = {
.name = "gt641xx-timer0",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .cpumask = CPU_MASK_CPU0,
.irq = GT641XX_TIMER0_IRQ,
- .set_next_event = gt641xx_timer0_set_next_event,
+ .set_next_event = gt641xx_timer0_set_next_event,
.set_mode = gt641xx_timer0_set_mode,
.event_handler = gt641xx_timer0_event_handler,
};
@@ -116,7 +114,7 @@ static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
static struct irqaction gt641xx_timer0_irqaction = {
.handler = gt641xx_timer0_interrupt,
- .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
.name = "gt641xx_timer0",
};
@@ -134,6 +132,7 @@ static int __init gt641xx_timer0_clockevent_init(void)
clockevent_set_clock(cd, gt641xx_base_clock);
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->cpumask = cpumask_of(0);
clockevents_register_device(&gt641xx_timer0_clockevent);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 24a2d907aa0..bc127e22fda 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -9,70 +9,36 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
-#include <asm/smtc_ipi.h>
#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+#include <asm/gic.h>
static int mips_next_event(unsigned long delta,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
unsigned int cnt;
int res;
-#ifdef CONFIG_MIPS_MT_SMTC
- {
- unsigned long flags, vpflags;
- local_irq_save(flags);
- vpflags = dvpe();
-#endif
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
- res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
-#ifdef CONFIG_MIPS_MT_SMTC
- evpe(vpflags);
- local_irq_restore(flags);
- }
-#endif
+ res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}
-static void mips_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+void mips_set_clock_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
{
/* Nothing to do ... */
}
-static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
-static int cp0_timer_irq_installed;
-
-/*
- * Timer ack for an R4k-compatible timer of a known frequency.
- */
-static void c0_timer_ack(void)
-{
- write_c0_compare(read_c0_compare());
-}
-
-/*
- * Possibly handle a performance counter interrupt.
- * Return true if the timer interrupt should not be checked
- */
-static inline int handle_perf_irq(int r2)
-{
- /*
- * The performance counter overflow interrupt may be shared with the
- * timer interrupt (cp0_perfcount_irq < 0). If it is and a
- * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
- * and we can't reliably determine if a counter interrupt has also
- * happened (!r2) then don't check for a timer interrupt.
- */
- return (cp0_perfcount_irq < 0) &&
- perf_irq() == IRQ_HANDLED &&
- !r2;
-}
+DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+int cp0_timer_irq_installed;
-static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
const int r2 = cpu_has_mips_r2;
struct clock_event_device *cd;
@@ -88,17 +54,13 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
goto out;
/*
- * The same applies to performance counter interrupts. But with the
+ * The same applies to performance counter interrupts. But with the
* above we now know that the reason we got here must be a timer
* interrupt. Being the paranoiacs we are we check anyway.
*/
if (!r2 || (read_c0_cause() & (1 << 30))) {
- c0_timer_ack();
-#ifdef CONFIG_MIPS_MT_SMTC
- if (cpu_data[cpu].vpe_id)
- goto out;
- cpu = 0;
-#endif
+ /* Clear Count/Compare Interrupt */
+ write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
}
@@ -107,65 +69,14 @@ out:
return IRQ_HANDLED;
}
-static struct irqaction c0_compare_irqaction = {
+struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt,
-#ifdef CONFIG_MIPS_MT_SMTC
- .flags = IRQF_DISABLED,
-#else
- .flags = IRQF_DISABLED | IRQF_PERCPU,
-#endif
+ .flags = IRQF_PERCPU | IRQF_TIMER,
.name = "timer",
};
-#ifdef CONFIG_MIPS_MT_SMTC
-DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
-
-static void smtc_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
-}
-static void mips_broadcast(cpumask_t mask)
-{
- unsigned int cpu;
-
- for_each_cpu_mask(cpu, mask)
- smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
-}
-
-static void setup_smtc_dummy_clockevent_device(void)
-{
- //uint64_t mips_freq = mips_hpt_^frequency;
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
-
- cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
-
- cd->name = "SMTC";
- cd->features = CLOCK_EVT_FEAT_DUMMY;
-
- /* Calculate the min / max delta */
- cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 0; //32;
- cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
-
- cd->rating = 200;
- cd->irq = 17; //-1;
-// if (cpu)
-// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
-// else
- cd->cpumask = cpumask_of_cpu(cpu);
-
- cd->set_mode = smtc_set_mode;
-
- cd->broadcast = mips_broadcast;
-
- clockevents_register_device(cd);
-}
-#endif
-
-static void mips_event_handler(struct clock_event_device *dev)
+void mips_event_handler(struct clock_event_device *dev)
{
}
@@ -174,20 +85,39 @@ static void mips_event_handler(struct clock_event_device *dev)
*/
static int c0_compare_int_pending(void)
{
- return (read_c0_cause() >> cp0_compare_irq) & 0x100;
+#ifdef CONFIG_IRQ_GIC
+ if (cpu_has_veic)
+ return gic_get_timer_pending();
+#endif
+ return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
}
-static int c0_compare_int_usable(void)
+/*
+ * Compare interrupt can be routed and latched outside the core,
+ * so wait up to worst case number of cycle counter ticks for timer interrupt
+ * changes to propagate to the cause register.
+ */
+#define COMPARE_INT_SEEN_TICKS 50
+
+int c0_compare_int_usable(void)
{
unsigned int delta;
unsigned int cnt;
+#ifdef CONFIG_KVM_GUEST
+ return 1;
+#endif
+
/*
- * IP7 already pending? Try to clear it by acking the timer.
+ * IP7 already pending? Try to clear it by acking the timer.
*/
if (c0_compare_int_pending()) {
- write_c0_compare(read_c0_count());
- irq_disable_hazard();
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (!c0_compare_int_pending())
+ break;
if (c0_compare_int_pending())
return 0;
}
@@ -196,7 +126,7 @@ static int c0_compare_int_usable(void)
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
- irq_disable_hazard();
+ back_to_back_c0_hazard();
if ((int)(read_c0_count() - cnt) < 0)
break;
/* increase delta if the timer was already expired */
@@ -205,11 +135,17 @@ static int c0_compare_int_usable(void)
while ((int)(read_c0_count() - cnt) <= 0)
; /* Wait for expiry */
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (c0_compare_int_pending())
+ break;
if (!c0_compare_int_pending())
return 0;
-
- write_c0_compare(read_c0_count());
- irq_disable_hazard();
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (!c0_compare_int_pending())
+ break;
if (c0_compare_int_pending())
return 0;
@@ -219,9 +155,8 @@ static int c0_compare_int_usable(void)
return 1;
}
-int __cpuinit mips_clockevent_init(void)
+int r4k_clockevent_init(void)
{
- uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
@@ -229,17 +164,6 @@ int __cpuinit mips_clockevent_init(void)
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
-#ifdef CONFIG_MIPS_MT_SMTC
- setup_smtc_dummy_clockevent_device();
-
- /*
- * On SMTC we only register VPE0's compare interrupt as clockevent
- * device.
- */
- if (cpu)
- return 0;
-#endif
-
if (!c0_compare_int_usable())
return -ENXIO;
@@ -255,23 +179,21 @@ int __cpuinit mips_clockevent_init(void)
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
- cd->features = CLOCK_EVT_FEAT_ONESHOT;
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP |
+ CLOCK_EVT_FEAT_PERCPU;
+
+ clockevent_set_clock(cd, mips_hpt_frequency);
/* Calculate the min / max delta */
- cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 32;
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 300;
cd->irq = irq;
-#ifdef CONFIG_MIPS_MT_SMTC
- cd->cpumask = CPU_MASK_ALL;
-#else
- cd->cpumask = cpumask_of_cpu(cpu);
-#endif
+ cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
- cd->set_mode = mips_set_mode;
+ cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
@@ -281,12 +203,8 @@ int __cpuinit mips_clockevent_init(void)
cp0_timer_irq_installed = 1;
-#ifdef CONFIG_MIPS_MT_SMTC
-#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
- setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
-#else
setup_irq(irq, &c0_compare_irqaction);
-#endif
return 0;
}
+
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 63ac3ad462b..5ea6d6b1de1 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -17,7 +17,9 @@
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/percpu.h>
+#include <linux/smp.h>
#include <asm/addrspace.h>
#include <asm/io.h>
@@ -37,7 +39,7 @@
* the rest of the system
*/
static void sibyte_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
@@ -105,7 +107,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
-void __cpuinit sb1250_clockevent_init(void)
+void sb1250_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int irq = K_INT_TIMER_0 + cpu;
@@ -125,7 +127,7 @@ void __cpuinit sb1250_clockevent_init(void)
cd->min_delta_ns = clockevent_delta2ns(2, cd);
cd->rating = 200;
cd->irq = irq;
- cd->cpumask = cpumask_of_cpu(cpu);
+ cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
cd->set_mode = sibyte_set_mode;
clockevents_register_device(cd);
@@ -141,12 +143,11 @@ void __cpuinit sb1250_clockevent_init(void)
sb1250_unmask_irq(cpu, irq);
- action->handler = sibyte_counter_handler;
- action->flags = IRQF_DISABLED | IRQF_PERCPU;
- action->mask = cpumask_of_cpu(cpu);
+ action->handler = sibyte_counter_handler;
+ action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
- irq_set_affinity(irq, cpumask_of_cpu(cpu));
+ irq_set_affinity(irq, cpumask_of(cpu));
setup_irq(irq, action);
}
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index 795cb8fb0d7..2ae08462e46 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -4,7 +4,7 @@
* for more details.
*
* Based on linux/arch/mips/kernel/cevt-r4k.c,
- * linux/arch/mips/jmr3927/rbhma3100/setup.c
+ * linux/arch/mips/jmr3927/rbhma3100/setup.c
*
* Copyright 2001 MontaVista Software Inc.
* Copyright (C) 2000-2001 Toshiba Corporation
@@ -13,6 +13,7 @@
*/
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <asm/time.h>
#include <asm/txx9tmr.h>
@@ -20,22 +21,29 @@
#define TIMER_CCD 0 /* 1/2 */
#define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD))
-static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr;
+struct txx9_clocksource {
+ struct clocksource cs;
+ struct txx9_tmr_reg __iomem *tmrptr;
+};
-static cycle_t txx9_cs_read(void)
+static cycle_t txx9_cs_read(struct clocksource *cs)
{
- return __raw_readl(&txx9_cs_tmrptr->trr);
+ struct txx9_clocksource *txx9_cs =
+ container_of(cs, struct txx9_clocksource, cs);
+ return __raw_readl(&txx9_cs->tmrptr->trr);
}
/* Use 1 bit smaller width to use full bits in that width */
#define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1)
-static struct clocksource txx9_clocksource = {
- .name = "TXx9",
- .rating = 200,
- .read = txx9_cs_read,
- .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+static struct txx9_clocksource txx9_clocksource = {
+ .cs = {
+ .name = "TXx9",
+ .rating = 200,
+ .read = txx9_cs_read,
+ .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ },
};
void __init txx9_clocksource_init(unsigned long baseaddr,
@@ -43,8 +51,7 @@ void __init txx9_clocksource_init(unsigned long baseaddr,
{
struct txx9_tmr_reg __iomem *tmrptr;
- clocksource_set_clock(&txx9_clocksource, TIMER_CLK(imbusclk));
- clocksource_register(&txx9_clocksource);
+ clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk));
tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
__raw_writel(TCR_BASE, &tmrptr->tcr);
@@ -53,10 +60,13 @@ void __init txx9_clocksource_init(unsigned long baseaddr,
__raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr);
__raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra);
__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
- txx9_cs_tmrptr = tmrptr;
+ txx9_clocksource.tmrptr = tmrptr;
}
-static struct txx9_tmr_reg __iomem *txx9_tmrptr;
+struct txx9_clock_event_device {
+ struct clock_event_device cd;
+ struct txx9_tmr_reg __iomem *tmrptr;
+};
static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr)
{
@@ -69,7 +79,9 @@ static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr)
static void txx9tmr_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr;
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
switch (mode) {
@@ -99,7 +111,9 @@ static void txx9tmr_set_mode(enum clock_event_mode mode,
static int txx9tmr_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr;
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
txx9tmr_stop_and_clear(tmrptr);
/* start timer */
@@ -108,48 +122,53 @@ static int txx9tmr_set_next_event(unsigned long delta,
return 0;
}
-static struct clock_event_device txx9tmr_clock_event_device = {
- .name = "TXx9",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .rating = 200,
- .cpumask = CPU_MASK_CPU0,
- .set_mode = txx9tmr_set_mode,
- .set_next_event = txx9tmr_set_next_event,
+static struct txx9_clock_event_device txx9_clock_event_device = {
+ .cd = {
+ .name = "TXx9",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_mode = txx9tmr_set_mode,
+ .set_next_event = txx9tmr_set_next_event,
+ },
};
static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
{
- struct clock_event_device *cd = &txx9tmr_clock_event_device;
- struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr;
+ struct txx9_clock_event_device *txx9_cd = dev_id;
+ struct clock_event_device *cd = &txx9_cd->cd;
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
- __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
+ __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
cd->event_handler(cd);
return IRQ_HANDLED;
}
static struct irqaction txx9tmr_irq = {
.handler = txx9tmr_interrupt,
- .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .flags = IRQF_PERCPU | IRQF_TIMER,
.name = "txx9tmr",
+ .dev_id = &txx9_clock_event_device,
};
void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
unsigned int imbusclk)
{
- struct clock_event_device *cd = &txx9tmr_clock_event_device;
+ struct clock_event_device *cd = &txx9_clock_event_device.cd;
struct txx9_tmr_reg __iomem *tmrptr;
tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
txx9tmr_stop_and_clear(tmrptr);
__raw_writel(TIMER_CCD, &tmrptr->ccdr);
__raw_writel(0, &tmrptr->itmr);
- txx9_tmrptr = tmrptr;
+ txx9_clock_event_device.tmrptr = tmrptr;
clockevent_set_clock(cd, TIMER_CLK(imbusclk));
cd->max_delta_ns =
clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
cd->irq = irq;
+ cd->cpumask = cpumask_of(0),
clockevents_register_device(cd);
setup_irq(irq, &txx9tmr_irq);
printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
@@ -161,6 +180,9 @@ void __init txx9_tmr_init(unsigned long baseaddr)
struct txx9_tmr_reg __iomem *tmrptr;
tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ /* Start once to make CounterResetEnable effective */
+ __raw_writel(TXx9_TMTCR_CRE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ /* Stop and reset the counter */
__raw_writel(TXx9_TMTCR_CRE, &tmrptr->tcr);
__raw_writel(0, &tmrptr->tisr);
__raw_writel(0xffffffff, &tmrptr->cpra);
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
new file mode 100644
index 00000000000..6f4f739dad9
--- /dev/null
+++ b/arch/mips/kernel/cps-vec.S
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/pm.h>
+
+#define GCR_CL_COHERENCE_OFS 0x2008
+#define GCR_CL_ID_OFS 0x2028
+
+.extern mips_cm_base
+
+.set noreorder
+
+ /*
+ * Set dest to non-zero if the core supports the MT ASE, else zero. If
+ * MT is not supported then branch to nomt.
+ */
+ .macro has_mt dest, nomt
+ mfc0 \dest, CP0_CONFIG
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 1
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 2
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 3
+ andi \dest, \dest, MIPS_CONF3_MT
+ beqz \dest, \nomt
+ .endm
+
+.section .text.cps-vec
+.balign 0x1000
+
+LEAF(mips_cps_core_entry)
+ /*
+ * These first 12 bytes will be patched by cps_smp_setup to load the
+ * base address of the CM GCRs into register v1 and the CCA to use into
+ * register s0.
+ */
+ .quad 0
+ .word 0
+
+ /* Check whether we're here due to an NMI */
+ mfc0 k0, CP0_STATUS
+ and k0, k0, ST0_NMI
+ beqz k0, not_nmi
+ nop
+
+ /* This is an NMI */
+ la k0, nmi_handler
+ jr k0
+ nop
+
+not_nmi:
+ /* Setup Cause */
+ li t0, CAUSEF_IV
+ mtc0 t0, CP0_CAUSE
+
+ /* Setup Status */
+ li t0, ST0_CU1 | ST0_CU0
+ mtc0 t0, CP0_STATUS
+
+ /*
+ * Clear the bits used to index the caches. Note that the architecture
+ * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
+ * be valid for all MIPS32 CPUs, even those for which said writes are
+ * unnecessary.
+ */
+ mtc0 zero, CP0_TAGLO, 0
+ mtc0 zero, CP0_TAGHI, 0
+ mtc0 zero, CP0_TAGLO, 2
+ mtc0 zero, CP0_TAGHI, 2
+ ehb
+
+ /* Primary cache configuration is indicated by Config1 */
+ mfc0 v0, CP0_CONFIG, 1
+
+ /* Detect I-cache line size */
+ _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
+ beqz t0, icache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect I-cache size */
+ _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addi t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == I-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+ addi t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, KSEG0
+ add a1, a0, t1
+1: cache Index_Store_Tag_I, 0(a0)
+ add a0, a0, t0
+ bne a0, a1, 1b
+ nop
+icache_done:
+
+ /* Detect D-cache line size */
+ _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
+ beqz t0, dcache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect D-cache size */
+ _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addi t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == D-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+ addi t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, KSEG0
+ addu a1, a0, t1
+ subu a1, a1, t0
+1: cache Index_Store_Tag_D, 0(a0)
+ bne a0, a1, 1b
+ add a0, a0, t0
+dcache_done:
+
+ /* Set Kseg0 CCA to that in s0 */
+ mfc0 t0, CP0_CONFIG
+ ori t0, 0x7
+ xori t0, 0x7
+ or t0, t0, s0
+ mtc0 t0, CP0_CONFIG
+ ehb
+
+ /* Enter the coherent domain */
+ li t0, 0xff
+ sw t0, GCR_CL_COHERENCE_OFS(v1)
+ ehb
+
+ /* Jump to kseg0 */
+ la t0, 1f
+ jr t0
+ nop
+
+ /*
+ * We're up, cached & coherent. Perform any further required core-level
+ * initialisation.
+ */
+1: jal mips_cps_core_init
+ nop
+
+ /*
+ * Boot any other VPEs within this core that should be online, and
+ * deactivate this VPE if it should be offline.
+ */
+ jal mips_cps_boot_vpes
+ nop
+
+ /* Off we go! */
+ lw t1, VPEBOOTCFG_PC(v0)
+ lw gp, VPEBOOTCFG_GP(v0)
+ lw sp, VPEBOOTCFG_SP(v0)
+ jr t1
+ nop
+ END(mips_cps_core_entry)
+
+.org 0x200
+LEAF(excep_tlbfill)
+ b .
+ nop
+ END(excep_tlbfill)
+
+.org 0x280
+LEAF(excep_xtlbfill)
+ b .
+ nop
+ END(excep_xtlbfill)
+
+.org 0x300
+LEAF(excep_cache)
+ b .
+ nop
+ END(excep_cache)
+
+.org 0x380
+LEAF(excep_genex)
+ b .
+ nop
+ END(excep_genex)
+
+.org 0x400
+LEAF(excep_intex)
+ b .
+ nop
+ END(excep_intex)
+
+.org 0x480
+LEAF(excep_ejtag)
+ la k0, ejtag_debug_handler
+ jr k0
+ nop
+ END(excep_ejtag)
+
+LEAF(mips_cps_core_init)
+#ifdef CONFIG_MIPS_MT
+ /* Check that the core implements the MT ASE */
+ has_mt t0, 3f
+ nop
+
+ .set push
+ .set mt
+
+ /* Only allow 1 TC per VPE to execute... */
+ dmt
+
+ /* ...and for the moment only 1 VPE */
+ dvpe
+ la t1, 1f
+ jr.hb t1
+ nop
+
+ /* Enter VPE configuration state */
+1: mfc0 t0, CP0_MVPCONTROL
+ ori t0, t0, MVPCONTROL_VPC
+ mtc0 t0, CP0_MVPCONTROL
+
+ /* Retrieve the number of VPEs within the core */
+ mfc0 t0, CP0_MVPCONF0
+ srl t0, t0, MVPCONF0_PVPE_SHIFT
+ andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
+ addi t7, t0, 1
+
+ /* If there's only 1, we're done */
+ beqz t0, 2f
+ nop
+
+ /* Loop through each VPE within this core */
+ li t5, 1
+
+1: /* Operate on the appropriate TC */
+ mtc0 t5, CP0_VPECONTROL
+ ehb
+
+ /* Bind TC to VPE (1:1 TC:VPE mapping) */
+ mttc0 t5, CP0_TCBIND
+
+ /* Set exclusive TC, non-active, master */
+ li t0, VPECONF0_MVP
+ sll t1, t5, VPECONF0_XTC_SHIFT
+ or t0, t0, t1
+ mttc0 t0, CP0_VPECONF0
+
+ /* Set TC non-active, non-allocatable */
+ mttc0 zero, CP0_TCSTATUS
+
+ /* Set TC halted */
+ li t0, TCHALT_H
+ mttc0 t0, CP0_TCHALT
+
+ /* Next VPE */
+ addi t5, t5, 1
+ slt t0, t5, t7
+ bnez t0, 1b
+ nop
+
+ /* Leave VPE configuration state */
+2: mfc0 t0, CP0_MVPCONTROL
+ xori t0, t0, MVPCONTROL_VPC
+ mtc0 t0, CP0_MVPCONTROL
+
+3: .set pop
+#endif
+ jr ra
+ nop
+ END(mips_cps_core_init)
+
+LEAF(mips_cps_boot_vpes)
+ /* Retrieve CM base address */
+ la t0, mips_cm_base
+ lw t0, 0(t0)
+
+ /* Calculate a pointer to this cores struct core_boot_config */
+ lw t0, GCR_CL_ID_OFS(t0)
+ li t1, COREBOOTCFG_SIZE
+ mul t0, t0, t1
+ la t1, mips_cps_core_bootcfg
+ lw t1, 0(t1)
+ addu t0, t0, t1
+
+ /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+ has_mt t6, 1f
+ li t9, 0
+
+ /* Find the number of VPEs present in the core */
+ mfc0 t1, CP0_MVPCONF0
+ srl t1, t1, MVPCONF0_PVPE_SHIFT
+ andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
+ addi t1, t1, 1
+
+ /* Calculate a mask for the VPE ID from EBase.CPUNum */
+ clz t1, t1
+ li t2, 31
+ subu t1, t2, t1
+ li t2, 1
+ sll t1, t2, t1
+ addiu t1, t1, -1
+
+ /* Retrieve the VPE ID from EBase.CPUNum */
+ mfc0 t9, $15, 1
+ and t9, t9, t1
+
+1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
+ li t1, VPEBOOTCFG_SIZE
+ mul v0, t9, t1
+ lw t7, COREBOOTCFG_VPECONFIG(t0)
+ addu v0, v0, t7
+
+#ifdef CONFIG_MIPS_MT
+
+ /* If the core doesn't support MT then return */
+ bnez t6, 1f
+ nop
+ jr ra
+ nop
+
+ .set push
+ .set mt
+
+1: /* Enter VPE configuration state */
+ dvpe
+ la t1, 1f
+ jr.hb t1
+ nop
+1: mfc0 t1, CP0_MVPCONTROL
+ ori t1, t1, MVPCONTROL_VPC
+ mtc0 t1, CP0_MVPCONTROL
+ ehb
+
+ /* Loop through each VPE */
+ lw t6, COREBOOTCFG_VPEMASK(t0)
+ move t8, t6
+ li t5, 0
+
+ /* Check whether the VPE should be running. If not, skip it */
+1: andi t0, t6, 1
+ beqz t0, 2f
+ nop
+
+ /* Operate on the appropriate TC */
+ mfc0 t0, CP0_VPECONTROL
+ ori t0, t0, VPECONTROL_TARGTC
+ xori t0, t0, VPECONTROL_TARGTC
+ or t0, t0, t5
+ mtc0 t0, CP0_VPECONTROL
+ ehb
+
+ /* Skip the VPE if its TC is not halted */
+ mftc0 t0, CP0_TCHALT
+ beqz t0, 2f
+ nop
+
+ /* Calculate a pointer to the VPEs struct vpe_boot_config */
+ li t0, VPEBOOTCFG_SIZE
+ mul t0, t0, t5
+ addu t0, t0, t7
+
+ /* Set the TC restart PC */
+ lw t1, VPEBOOTCFG_PC(t0)
+ mttc0 t1, CP0_TCRESTART
+
+ /* Set the TC stack pointer */
+ lw t1, VPEBOOTCFG_SP(t0)
+ mttgpr t1, sp
+
+ /* Set the TC global pointer */
+ lw t1, VPEBOOTCFG_GP(t0)
+ mttgpr t1, gp
+
+ /* Copy config from this VPE */
+ mfc0 t0, CP0_CONFIG
+ mttc0 t0, CP0_CONFIG
+
+ /* Ensure no software interrupts are pending */
+ mttc0 zero, CP0_CAUSE
+ mttc0 zero, CP0_STATUS
+
+ /* Set TC active, not interrupt exempt */
+ mftc0 t0, CP0_TCSTATUS
+ li t1, ~TCSTATUS_IXMT
+ and t0, t0, t1
+ ori t0, t0, TCSTATUS_A
+ mttc0 t0, CP0_TCSTATUS
+
+ /* Clear the TC halt bit */
+ mttc0 zero, CP0_TCHALT
+
+ /* Set VPE active */
+ mftc0 t0, CP0_VPECONF0
+ ori t0, t0, VPECONF0_VPA
+ mttc0 t0, CP0_VPECONF0
+
+ /* Next VPE */
+2: srl t6, t6, 1
+ addi t5, t5, 1
+ bnez t6, 1b
+ nop
+
+ /* Leave VPE configuration state */
+ mfc0 t1, CP0_MVPCONTROL
+ xori t1, t1, MVPCONTROL_VPC
+ mtc0 t1, CP0_MVPCONTROL
+ ehb
+ evpe
+
+ /* Check whether this VPE is meant to be running */
+ li t0, 1
+ sll t0, t0, t9
+ and t0, t0, t8
+ bnez t0, 2f
+ nop
+
+ /* This VPE should be offline, halt the TC */
+ li t0, TCHALT_H
+ mtc0 t0, CP0_TCHALT
+ la t0, 1f
+1: jr.hb t0
+ nop
+
+2: .set pop
+
+#endif /* CONFIG_MIPS_MT */
+
+ /* Return */
+ jr ra
+ nop
+ END(mips_cps_boot_vpes)
+
+#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
+
+ /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
+ .macro psstate dest
+ .set push
+ .set noat
+ lw $1, TI_CPU(gp)
+ sll $1, $1, LONGLOG
+ la \dest, __per_cpu_offset
+ addu $1, $1, \dest
+ lw $1, 0($1)
+ la \dest, cps_cpu_state
+ addu \dest, \dest, $1
+ .set pop
+ .endm
+
+LEAF(mips_cps_pm_save)
+ /* Save CPU state */
+ SUSPEND_SAVE_REGS
+ psstate t1
+ SUSPEND_SAVE_STATIC
+ jr v0
+ nop
+ END(mips_cps_pm_save)
+
+LEAF(mips_cps_pm_restore)
+ /* Restore CPU state */
+ psstate t1
+ RESUME_RESTORE_STATIC
+ RESUME_RESTORE_REGS_RETURN
+ END(mips_cps_pm_restore)
+
+#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index a1b48af0992..2d80b5f1aea 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -6,6 +6,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/context_tracking.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -16,7 +17,7 @@
#include <asm/cpu.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
-#include <asm/system.h>
+#include <asm/setup.h>
static char bug64hit[] __initdata =
"reliable operation impossible!\n%s";
@@ -38,7 +39,7 @@ static inline void align_mod(const int align, const int mod)
".endr\n\t"
".set pop"
:
- : GCC_IMM_ASM(align), GCC_IMM_ASM(mod));
+ : GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod));
}
static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
@@ -73,7 +74,7 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
: "0" (5), "1" (8), "2" (5));
align_mod(align, mod);
/*
- * The trailing nop is needed to fullfill the two-instruction
+ * The trailing nop is needed to fulfill the two-instruction
* requirement between reading hi/lo and staring a mult/div.
* Leaving it out may cause gas insert a nop itself breaking
* the desired alignment of the next chunk.
@@ -84,9 +85,9 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
".set noreorder\n\t"
".set nomacro\n\t"
"mult %2, %3\n\t"
- "dsll32 %0, %4, %5\n\t"
+ "dsll32 %0, %4, %5\n\t"
"mflo $0\n\t"
- "dsll32 %1, %4, %5\n\t"
+ "dsll32 %1, %4, %5\n\t"
"nop\n\t"
".set pop"
: "=&r" (lv1), "=r" (lw)
@@ -167,12 +168,16 @@ static inline void check_mult_sh(void)
panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
}
-static volatile int daddi_ov __cpuinitdata = 0;
+static volatile int daddi_ov;
asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
daddi_ov = 1;
regs->cp0_epc += 4;
+ exception_exit(prev_state);
}
static inline void check_daddi(void)
@@ -239,7 +244,7 @@ static inline void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug __cpuinitdata = -1;
+int daddiu_bug = -1;
static inline void check_daddiu(void)
{
@@ -273,7 +278,7 @@ static inline void check_daddiu(void)
#ifdef HAVE_AS_SET_DADDI
".set daddi\n\t"
#endif
- "daddiu %0, %2, %4\n\t"
+ "daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2\n\t"
".set pop"
@@ -292,7 +297,7 @@ static inline void check_daddiu(void)
asm volatile(
"addiu %2, $0, %3\n\t"
"dsrl %2, %2, 1\n\t"
- "daddiu %0, %2, %4\n\t"
+ "daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 89c3304cb93..d74f957c561 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -4,7 +4,7 @@
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
- * Copyright (C) 2001, 2004 MIPS Inc.
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -14,221 +14,55 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
+#include <linux/smp.h>
#include <linux/stddef.h>
+#include <linux/export.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
+#include <asm/cpu-type.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
-#include <asm/system.h>
+#include <asm/mipsmtregs.h>
+#include <asm/msa.h>
+#include <asm/watch.h>
+#include <asm/elf.h>
+#include <asm/spram.h>
+#include <asm/uaccess.h>
-/*
- * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
- * the implementation of the "wait" feature differs between CPU families. This
- * points to the function that implements CPU specific wait.
- * The wait instruction stops the pipeline and reduces the power consumption of
- * the CPU very much.
- */
-void (*cpu_wait)(void) = NULL;
-
-static void r3081_wait(void)
-{
- unsigned long cfg = read_c0_conf();
- write_c0_conf(cfg | R30XX_CONF_HALT);
-}
-
-static void r39xx_wait(void)
-{
- local_irq_disable();
- if (!need_resched())
- write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
- local_irq_enable();
-}
+static int mips_fpu_disabled;
-/*
- * There is a race when WAIT instruction executed with interrupt
- * enabled.
- * But it is implementation-dependent wheter the pipelie restarts when
- * a non-enabled interrupt is requested.
- */
-static void r4k_wait(void)
+static int __init fpu_disable(char *s)
{
- __asm__(" .set mips3 \n"
- " wait \n"
- " .set mips0 \n");
-}
+ cpu_data[0].options &= ~MIPS_CPU_FPU;
+ mips_fpu_disabled = 1;
-/*
- * This variant is preferable as it allows testing need_resched and going to
- * sleep depending on the outcome atomically. Unfortunately the "It is
- * implementation-dependent whether the pipeline restarts when a non-enabled
- * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
- * using this version a gamble.
- */
-static void r4k_wait_irqoff(void)
-{
- local_irq_disable();
- if (!need_resched())
- __asm__(" .set mips3 \n"
- " wait \n"
- " .set mips0 \n");
- local_irq_enable();
+ return 1;
}
-/*
- * The RM7000 variant has to handle erratum 38. The workaround is to not
- * have any pending stores when the WAIT instruction is executed.
- */
-static void rm7k_wait_irqoff(void)
-{
- local_irq_disable();
- if (!need_resched())
- __asm__(
- " .set push \n"
- " .set mips3 \n"
- " .set noat \n"
- " mfc0 $1, $12 \n"
- " sync \n"
- " mtc0 $1, $12 # stalls until W stage \n"
- " wait \n"
- " mtc0 $1, $12 # stalls until W stage \n"
- " .set pop \n");
- local_irq_enable();
-}
+__setup("nofpu", fpu_disable);
-/* The Au1xxx wait is available only if using 32khz counter or
- * external timer source, but specifically not CP0 Counter. */
-int allow_au1k_wait;
+int mips_dsp_disabled;
-static void au1k_wait(void)
+static int __init dsp_disable(char *s)
{
- /* using the wait instruction makes CP0 counter unusable */
- __asm__(" .set mips3 \n"
- " cache 0x14, 0(%0) \n"
- " cache 0x14, 32(%0) \n"
- " sync \n"
- " nop \n"
- " wait \n"
- " nop \n"
- " nop \n"
- " nop \n"
- " nop \n"
- " .set mips0 \n"
- : : "r" (au1k_wait));
-}
-
-static int __initdata nowait = 0;
-
-static int __init wait_disable(char *s)
-{
- nowait = 1;
+ cpu_data[0].ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
+ mips_dsp_disabled = 1;
return 1;
}
-__setup("nowait", wait_disable);
-
-static inline void check_wait(void)
-{
- struct cpuinfo_mips *c = &current_cpu_data;
-
- if (nowait) {
- printk("Wait instruction disabled.\n");
- return;
- }
-
- switch (c->cputype) {
- case CPU_R3081:
- case CPU_R3081E:
- cpu_wait = r3081_wait;
- break;
- case CPU_TX3927:
- cpu_wait = r39xx_wait;
- break;
- case CPU_R4200:
-/* case CPU_R4300: */
- case CPU_R4600:
- case CPU_R4640:
- case CPU_R4650:
- case CPU_R4700:
- case CPU_R5000:
- case CPU_NEVADA:
- case CPU_4KC:
- case CPU_4KEC:
- case CPU_4KSC:
- case CPU_5KC:
- case CPU_25KF:
- case CPU_PR4450:
- case CPU_BCM3302:
- cpu_wait = r4k_wait;
- break;
-
- case CPU_RM7000:
- cpu_wait = rm7k_wait_irqoff;
- break;
-
- case CPU_24K:
- case CPU_34K:
- cpu_wait = r4k_wait;
- if (read_c0_config7() & MIPS_CONF7_WII)
- cpu_wait = r4k_wait_irqoff;
- break;
-
- case CPU_74K:
- cpu_wait = r4k_wait;
- if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
- cpu_wait = r4k_wait_irqoff;
- break;
-
- case CPU_TX49XX:
- cpu_wait = r4k_wait_irqoff;
- break;
- case CPU_AU1000:
- case CPU_AU1100:
- case CPU_AU1500:
- case CPU_AU1550:
- case CPU_AU1200:
- case CPU_AU1210:
- case CPU_AU1250:
- if (allow_au1k_wait)
- cpu_wait = au1k_wait;
- break;
- case CPU_20KC:
- /*
- * WAIT on Rev1.0 has E1, E2, E3 and E16.
- * WAIT on Rev2.0 and Rev3.0 has E16.
- * Rev3.1 WAIT is nop, why bother
- */
- if ((c->processor_id & 0xff) <= 0x64)
- break;
-
- /*
- * Another rev is incremeting c0_count at a reduced clock
- * rate while in WAIT mode. So we basically have the choice
- * between using the cp0 timer as clocksource or avoiding
- * the WAIT instruction. Until more details are known,
- * disable the use of WAIT for 20Kc entirely.
- cpu_wait = r4k_wait;
- */
- break;
- case CPU_RM9000:
- if ((c->processor_id & 0x00ff) >= 0x40)
- cpu_wait = r4k_wait;
- break;
- default:
- break;
- }
-}
+__setup("nodsp", dsp_disable);
static inline void check_errata(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- switch (c->cputype) {
+ switch (current_cpu_type()) {
case CPU_34K:
/*
* Erratum "RPS May Cause Incorrect Instruction Execution"
- * This code only handles VPE0, any SMP/SMTC/RTOS code
+ * This code only handles VPE0, any SMP/RTOS code
* making use of VPE1 will be responsable for that VPE.
*/
if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
@@ -241,7 +75,6 @@ static inline void check_errata(void)
void __init check_bugs32(void)
{
- check_wait();
check_errata();
}
@@ -267,6 +100,12 @@ static inline int cpu_has_confreg(void)
#endif
}
+static inline void set_elf_platform(int cpu, const char *plat)
+{
+ if (cpu == 0)
+ __elf_platform = plat;
+}
+
/*
* Get the FPU Implementation/Revision.
*/
@@ -275,7 +114,7 @@ static inline unsigned long cpu_get_fpu_id(void)
unsigned long tmp, fpu_id;
tmp = read_c0_status();
- __enable_fpu();
+ __enable_fpu(FPU_AS_IS);
fpu_id = read_32bit_cp1_register(CP1_REVISION);
write_c0_status(tmp);
return fpu_id;
@@ -286,103 +125,449 @@ static inline unsigned long cpu_get_fpu_id(void)
*/
static inline int __cpu_has_fpu(void)
{
- return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE);
+ return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE);
+}
+
+static inline unsigned long cpu_get_msa_id(void)
+{
+ unsigned long status, conf5, msa_id;
+
+ status = read_c0_status();
+ __enable_fpu(FPU_64BIT);
+ conf5 = read_c0_config5();
+ enable_msa();
+ msa_id = read_msa_ir();
+ write_c0_config5(conf5);
+ write_c0_status(status);
+ return msa_id;
+}
+
+static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+{
+#ifdef __NEED_VMBITS_PROBE
+ write_c0_entryhi(0x3fffffffffffe000ULL);
+ back_to_back_c0_hazard();
+ c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+#endif
+}
+
+static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+{
+ switch (isa) {
+ case MIPS_CPU_ISA_M64R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2;
+ case MIPS_CPU_ISA_M64R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1;
+ case MIPS_CPU_ISA_V:
+ c->isa_level |= MIPS_CPU_ISA_V;
+ case MIPS_CPU_ISA_IV:
+ c->isa_level |= MIPS_CPU_ISA_IV;
+ case MIPS_CPU_ISA_III:
+ c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
+ break;
+
+ case MIPS_CPU_ISA_M32R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2;
+ case MIPS_CPU_ISA_M32R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1;
+ case MIPS_CPU_ISA_II:
+ c->isa_level |= MIPS_CPU_ISA_II;
+ break;
+ }
+}
+
+static char unknown_isa[] = KERN_ERR \
+ "Unsupported ISA type, c0.config0: %d.";
+
+static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
+{
+ unsigned int config6;
+
+ /* It's implementation dependent how the FTLB can be enabled */
+ switch (c->cputype) {
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ /* proAptiv & related cores use Config6 to enable the FTLB */
+ config6 = read_c0_config6();
+ if (enable)
+ /* Enable FTLB */
+ write_c0_config6(config6 | MIPS_CONF6_FTLBEN);
+ else
+ /* Disable FTLB */
+ write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
+ back_to_back_c0_hazard();
+ break;
+ }
+}
+
+static inline unsigned int decode_config0(struct cpuinfo_mips *c)
+{
+ unsigned int config0;
+ int isa;
+
+ config0 = read_c0_config();
+
+ /*
+ * Look for Standard TLB or Dual VTLB and FTLB
+ */
+ if ((((config0 & MIPS_CONF_MT) >> 7) == 1) ||
+ (((config0 & MIPS_CONF_MT) >> 7) == 4))
+ c->options |= MIPS_CPU_TLB;
+
+ isa = (config0 & MIPS_CONF_AT) >> 13;
+ switch (isa) {
+ case 0:
+ switch ((config0 & MIPS_CONF_AR) >> 10) {
+ case 0:
+ set_isa(c, MIPS_CPU_ISA_M32R1);
+ break;
+ case 1:
+ set_isa(c, MIPS_CPU_ISA_M32R2);
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+ case 2:
+ switch ((config0 & MIPS_CONF_AR) >> 10) {
+ case 0:
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ break;
+ case 1:
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+ default:
+ goto unknown;
+ }
+
+ return config0 & MIPS_CONF_M;
+
+unknown:
+ panic(unknown_isa, config0);
+}
+
+static inline unsigned int decode_config1(struct cpuinfo_mips *c)
+{
+ unsigned int config1;
+
+ config1 = read_c0_config1();
+
+ if (config1 & MIPS_CONF1_MD)
+ c->ases |= MIPS_ASE_MDMX;
+ if (config1 & MIPS_CONF1_WR)
+ c->options |= MIPS_CPU_WATCH;
+ if (config1 & MIPS_CONF1_CA)
+ c->ases |= MIPS_ASE_MIPS16;
+ if (config1 & MIPS_CONF1_EP)
+ c->options |= MIPS_CPU_EJTAG;
+ if (config1 & MIPS_CONF1_FP) {
+ c->options |= MIPS_CPU_FPU;
+ c->options |= MIPS_CPU_32FPR;
+ }
+ if (cpu_has_tlb) {
+ c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
+ c->tlbsizevtlb = c->tlbsize;
+ c->tlbsizeftlbsets = 0;
+ }
+
+ return config1 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config2(struct cpuinfo_mips *c)
+{
+ unsigned int config2;
+
+ config2 = read_c0_config2();
+
+ if (config2 & MIPS_CONF2_SL)
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+ return config2 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config3(struct cpuinfo_mips *c)
+{
+ unsigned int config3;
+
+ config3 = read_c0_config3();
+
+ if (config3 & MIPS_CONF3_SM) {
+ c->ases |= MIPS_ASE_SMARTMIPS;
+ c->options |= MIPS_CPU_RIXI;
+ }
+ if (config3 & MIPS_CONF3_RXI)
+ c->options |= MIPS_CPU_RIXI;
+ if (config3 & MIPS_CONF3_DSP)
+ c->ases |= MIPS_ASE_DSP;
+ if (config3 & MIPS_CONF3_DSP2P)
+ c->ases |= MIPS_ASE_DSP2P;
+ if (config3 & MIPS_CONF3_VINT)
+ c->options |= MIPS_CPU_VINT;
+ if (config3 & MIPS_CONF3_VEIC)
+ c->options |= MIPS_CPU_VEIC;
+ if (config3 & MIPS_CONF3_MT)
+ c->ases |= MIPS_ASE_MIPSMT;
+ if (config3 & MIPS_CONF3_ULRI)
+ c->options |= MIPS_CPU_ULRI;
+ if (config3 & MIPS_CONF3_ISA)
+ c->options |= MIPS_CPU_MICROMIPS;
+ if (config3 & MIPS_CONF3_VZ)
+ c->ases |= MIPS_ASE_VZ;
+ if (config3 & MIPS_CONF3_SC)
+ c->options |= MIPS_CPU_SEGMENTS;
+ if (config3 & MIPS_CONF3_MSA)
+ c->ases |= MIPS_ASE_MSA;
+
+ return config3 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config4(struct cpuinfo_mips *c)
+{
+ unsigned int config4;
+ unsigned int newcf4;
+ unsigned int mmuextdef;
+ unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE;
+
+ config4 = read_c0_config4();
+
+ if (cpu_has_tlb) {
+ if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
+ c->options |= MIPS_CPU_TLBINV;
+ mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ switch (mmuextdef) {
+ case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
+ c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
+ c->tlbsizevtlb = c->tlbsize;
+ break;
+ case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
+ c->tlbsizevtlb +=
+ ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT) * 0x40;
+ c->tlbsize = c->tlbsizevtlb;
+ ftlb_page = MIPS_CONF4_VFTLBPAGESIZE;
+ /* fall through */
+ case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
+ newcf4 = (config4 & ~ftlb_page) |
+ (page_size_ftlb(mmuextdef) <<
+ MIPS_CONF4_FTLBPAGESIZE_SHIFT);
+ write_c0_config4(newcf4);
+ back_to_back_c0_hazard();
+ config4 = read_c0_config4();
+ if (config4 != newcf4) {
+ pr_err("PAGE_SIZE 0x%lx is not supported by FTLB (config4=0x%x)\n",
+ PAGE_SIZE, config4);
+ /* Switch FTLB off */
+ set_ftlb_enable(c, 0);
+ break;
+ }
+ c->tlbsizeftlbsets = 1 <<
+ ((config4 & MIPS_CONF4_FTLBSETS) >>
+ MIPS_CONF4_FTLBSETS_SHIFT);
+ c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
+ MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
+ c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets;
+ break;
+ }
+ }
+
+ c->kscratch_mask = (config4 >> 16) & 0xff;
+
+ return config4 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config5(struct cpuinfo_mips *c)
+{
+ unsigned int config5;
+
+ config5 = read_c0_config5();
+ config5 &= ~MIPS_CONF5_UFR;
+ write_c0_config5(config5);
+
+ if (config5 & MIPS_CONF5_EVA)
+ c->options |= MIPS_CPU_EVA;
+
+ return config5 & MIPS_CONF_M;
+}
+
+static void decode_configs(struct cpuinfo_mips *c)
+{
+ int ok;
+
+ /* MIPS32 or MIPS64 compliant CPU. */
+ c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
+
+ c->scache.flags = MIPS_CACHE_NOT_PRESENT;
+
+ /* Enable FTLB if present */
+ set_ftlb_enable(c, 1);
+
+ ok = decode_config0(c); /* Read Config registers. */
+ BUG_ON(!ok); /* Arch spec violation! */
+ if (ok)
+ ok = decode_config1(c);
+ if (ok)
+ ok = decode_config2(c);
+ if (ok)
+ ok = decode_config3(c);
+ if (ok)
+ ok = decode_config4(c);
+ if (ok)
+ ok = decode_config5(c);
+
+ mips_probe_watch_registers(c);
+
+#ifndef CONFIG_MIPS_CPS
+ if (cpu_has_mips_r2) {
+ c->core = get_ebase_cpunum();
+ if (cpu_has_mipsmt)
+ c->core >>= fls(core_nvpes()) - 1;
+ }
+#endif
}
#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
| MIPS_CPU_COUNTER)
-static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
+static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
{
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_R2000:
c->cputype = CPU_R2000;
- c->isa_level = MIPS_CPU_ISA_I;
+ __cpu_name[cpu] = "R2000";
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
- MIPS_CPU_NOFPUEX;
+ MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
break;
case PRID_IMP_R3000:
- if ((c->processor_id & 0xff) == PRID_REV_R3000A)
- if (cpu_has_confreg())
+ if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) {
+ if (cpu_has_confreg()) {
c->cputype = CPU_R3081E;
- else
+ __cpu_name[cpu] = "R3081";
+ } else {
c->cputype = CPU_R3000A;
- else
+ __cpu_name[cpu] = "R3000A";
+ }
+ } else {
c->cputype = CPU_R3000;
- c->isa_level = MIPS_CPU_ISA_I;
+ __cpu_name[cpu] = "R3000";
+ }
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
- MIPS_CPU_NOFPUEX;
+ MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
break;
case PRID_IMP_R4000:
if (read_c0_config() & CONF_SC) {
- if ((c->processor_id & 0xff) >= PRID_REV_R4400)
+ if ((c->processor_id & PRID_REV_MASK) >=
+ PRID_REV_R4400) {
c->cputype = CPU_R4400PC;
- else
+ __cpu_name[cpu] = "R4400PC";
+ } else {
c->cputype = CPU_R4000PC;
+ __cpu_name[cpu] = "R4000PC";
+ }
} else {
- if ((c->processor_id & 0xff) >= PRID_REV_R4400)
- c->cputype = CPU_R4400SC;
- else
- c->cputype = CPU_R4000SC;
+ int cca = read_c0_config() & CONF_CM_CMASK;
+ int mc;
+
+ /*
+ * SC and MC versions can't be reliably told apart,
+ * but only the latter support coherent caching
+ * modes so assume the firmware has set the KSEG0
+ * coherency attribute reasonably (if uncached, we
+ * assume SC).
+ */
+ switch (cca) {
+ case CONF_CM_CACHABLE_CE:
+ case CONF_CM_CACHABLE_COW:
+ case CONF_CM_CACHABLE_CUW:
+ mc = 1;
+ break;
+ default:
+ mc = 0;
+ break;
+ }
+ if ((c->processor_id & PRID_REV_MASK) >=
+ PRID_REV_R4400) {
+ c->cputype = mc ? CPU_R4400MC : CPU_R4400SC;
+ __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
+ } else {
+ c->cputype = mc ? CPU_R4000MC : CPU_R4000SC;
+ __cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
+ }
}
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_VCE |
- MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_VCE |
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_VR41XX:
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->options = R4K_OPTS;
+ c->tlbsize = 32;
switch (c->processor_id & 0xf0) {
case PRID_REV_VR4111:
c->cputype = CPU_VR4111;
+ __cpu_name[cpu] = "NEC VR4111";
break;
case PRID_REV_VR4121:
c->cputype = CPU_VR4121;
+ __cpu_name[cpu] = "NEC VR4121";
break;
case PRID_REV_VR4122:
- if ((c->processor_id & 0xf) < 0x3)
+ if ((c->processor_id & 0xf) < 0x3) {
c->cputype = CPU_VR4122;
- else
+ __cpu_name[cpu] = "NEC VR4122";
+ } else {
c->cputype = CPU_VR4181A;
+ __cpu_name[cpu] = "NEC VR4181A";
+ }
break;
case PRID_REV_VR4130:
- if ((c->processor_id & 0xf) < 0x4)
+ if ((c->processor_id & 0xf) < 0x4) {
c->cputype = CPU_VR4131;
- else
+ __cpu_name[cpu] = "NEC VR4131";
+ } else {
c->cputype = CPU_VR4133;
+ c->options |= MIPS_CPU_LLSC;
+ __cpu_name[cpu] = "NEC VR4133";
+ }
break;
default:
printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n");
c->cputype = CPU_VR41XX;
+ __cpu_name[cpu] = "NEC Vr41xx";
break;
}
- c->isa_level = MIPS_CPU_ISA_III;
- c->options = R4K_OPTS;
- c->tlbsize = 32;
break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
- c->isa_level = MIPS_CPU_ISA_III;
+ __cpu_name[cpu] = "R4300";
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R4600:
c->cputype = CPU_R4600;
- c->isa_level = MIPS_CPU_ISA_III;
+ __cpu_name[cpu] = "R4600";
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
#if 0
- case PRID_IMP_R4650:
+ case PRID_IMP_R4650:
/*
* This processor doesn't have an MMU, so it's not
* "real easy" to run Linux on it. It is left purely
@@ -390,44 +575,46 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
* it's c0_prid id number with the TX3900.
*/
c->cputype = CPU_R4650;
- c->isa_level = MIPS_CPU_ISA_III;
+ __cpu_name[cpu] = "R4650";
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
- c->tlbsize = 48;
+ c->tlbsize = 48;
break;
#endif
case PRID_IMP_TX39:
- c->isa_level = MIPS_CPU_ISA_I;
c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
c->cputype = CPU_TX3927;
+ __cpu_name[cpu] = "TX3927";
c->tlbsize = 64;
} else {
- switch (c->processor_id & 0xff) {
+ switch (c->processor_id & PRID_REV_MASK) {
case PRID_REV_TX3912:
c->cputype = CPU_TX3912;
+ __cpu_name[cpu] = "TX3912";
c->tlbsize = 32;
break;
case PRID_REV_TX3922:
c->cputype = CPU_TX3922;
+ __cpu_name[cpu] = "TX3922";
c->tlbsize = 64;
break;
- default:
- c->cputype = CPU_UNKNOWN;
- break;
}
}
break;
case PRID_IMP_R4700:
c->cputype = CPU_R4700;
- c->isa_level = MIPS_CPU_ISA_III;
+ __cpu_name[cpu] = "R4700";
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_TX49:
c->cputype = CPU_TX49XX;
- c->isa_level = MIPS_CPU_ISA_III;
+ __cpu_name[cpu] = "R49XX";
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_LLSC;
if (!(c->processor_id & 0x08))
c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
@@ -435,503 +622,558 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
break;
case PRID_IMP_R5000:
c->cputype = CPU_R5000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R5000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5432:
c->cputype = CPU_R5432;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R5432";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5500:
c->cputype = CPU_R5500;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R5500";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_NEVADA:
c->cputype = CPU_NEVADA;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "Nevada";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R6000:
c->cputype = CPU_R6000;
- c->isa_level = MIPS_CPU_ISA_II;
+ __cpu_name[cpu] = "R6000";
+ set_isa(c, MIPS_CPU_ISA_II);
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R6000A:
c->cputype = CPU_R6000A;
- c->isa_level = MIPS_CPU_ISA_II;
+ __cpu_name[cpu] = "R6000A";
+ set_isa(c, MIPS_CPU_ISA_II);
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_RM7000:
c->cputype = CPU_RM7000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "RM7000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
/*
- * Undocumented RM7000: Bit 29 in the info register of
+ * Undocumented RM7000: Bit 29 in the info register of
* the RM7000 v2.0 indicates if the TLB has 48 or 64
* entries.
*
- * 29 1 => 64 entry JTLB
- * 0 => 48 entry JTLB
- */
- c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
- break;
- case PRID_IMP_RM9000:
- c->cputype = CPU_RM9000;
- c->isa_level = MIPS_CPU_ISA_IV;
- c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
- /*
- * Bit 29 in the info register of the RM9000
- * indicates if the TLB has 48 or 64 entries.
- *
- * 29 1 => 64 entry JTLB
- * 0 => 48 entry JTLB
+ * 29 1 => 64 entry JTLB
+ * 0 => 48 entry JTLB
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_R8000:
c->cputype = CPU_R8000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "RM8000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
c->tlbsize = 384; /* has weird TLB: 3-way x 128 */
break;
case PRID_IMP_R10000:
c->cputype = CPU_R10000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R10000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R12000:
c->cputype = CPU_R12000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R12000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R14000:
c->cputype = CPU_R14000;
- c->isa_level = MIPS_CPU_ISA_IV;
+ __cpu_name[cpu] = "R14000";
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
- case PRID_IMP_LOONGSON2:
- c->cputype = CPU_LOONGSON2;
- c->isa_level = MIPS_CPU_ISA_III;
+ case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON2E:
+ c->cputype = CPU_LOONGSON2;
+ __cpu_name[cpu] = "ICT Loongson-2";
+ set_elf_platform(cpu, "loongson2e");
+ break;
+ case PRID_REV_LOONGSON2F:
+ c->cputype = CPU_LOONGSON2;
+ __cpu_name[cpu] = "ICT Loongson-2";
+ set_elf_platform(cpu, "loongson2f");
+ break;
+ case PRID_REV_LOONGSON3A:
+ c->cputype = CPU_LOONGSON3;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ break;
+ }
+
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS |
MIPS_CPU_FPU | MIPS_CPU_LLSC |
MIPS_CPU_32FPR;
c->tlbsize = 64;
break;
- }
-}
-
-static char unknown_isa[] __cpuinitdata = KERN_ERR \
- "Unsupported ISA type, c0.config0: %d.";
+ case PRID_IMP_LOONGSON_32: /* Loongson-1 */
+ decode_configs(c);
-static inline unsigned int decode_config0(struct cpuinfo_mips *c)
-{
- unsigned int config0;
- int isa;
+ c->cputype = CPU_LOONGSON1;
- config0 = read_c0_config();
-
- if (((config0 & MIPS_CONF_MT) >> 7) == 1)
- c->options |= MIPS_CPU_TLB;
- isa = (config0 & MIPS_CONF_AT) >> 13;
- switch (isa) {
- case 0:
- switch ((config0 & MIPS_CONF_AR) >> 10) {
- case 0:
- c->isa_level = MIPS_CPU_ISA_M32R1;
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON1B:
+ __cpu_name[cpu] = "Loongson 1B";
break;
- case 1:
- c->isa_level = MIPS_CPU_ISA_M32R2;
- break;
- default:
- goto unknown;
- }
- break;
- case 2:
- switch ((config0 & MIPS_CONF_AR) >> 10) {
- case 0:
- c->isa_level = MIPS_CPU_ISA_M64R1;
- break;
- case 1:
- c->isa_level = MIPS_CPU_ISA_M64R2;
- break;
- default:
- goto unknown;
}
- break;
- default:
- goto unknown;
- }
-
- return config0 & MIPS_CONF_M;
-
-unknown:
- panic(unknown_isa, config0);
-}
-
-static inline unsigned int decode_config1(struct cpuinfo_mips *c)
-{
- unsigned int config1;
-
- config1 = read_c0_config1();
- if (config1 & MIPS_CONF1_MD)
- c->ases |= MIPS_ASE_MDMX;
- if (config1 & MIPS_CONF1_WR)
- c->options |= MIPS_CPU_WATCH;
- if (config1 & MIPS_CONF1_CA)
- c->ases |= MIPS_ASE_MIPS16;
- if (config1 & MIPS_CONF1_EP)
- c->options |= MIPS_CPU_EJTAG;
- if (config1 & MIPS_CONF1_FP) {
- c->options |= MIPS_CPU_FPU;
- c->options |= MIPS_CPU_32FPR;
+ break;
}
- if (cpu_has_tlb)
- c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
-
- return config1 & MIPS_CONF_M;
-}
-
-static inline unsigned int decode_config2(struct cpuinfo_mips *c)
-{
- unsigned int config2;
-
- config2 = read_c0_config2();
-
- if (config2 & MIPS_CONF2_SL)
- c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
-
- return config2 & MIPS_CONF_M;
}
-static inline unsigned int decode_config3(struct cpuinfo_mips *c)
+static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
{
- unsigned int config3;
-
- config3 = read_c0_config3();
-
- if (config3 & MIPS_CONF3_SM)
- c->ases |= MIPS_ASE_SMARTMIPS;
- if (config3 & MIPS_CONF3_DSP)
- c->ases |= MIPS_ASE_DSP;
- if (config3 & MIPS_CONF3_VINT)
- c->options |= MIPS_CPU_VINT;
- if (config3 & MIPS_CONF3_VEIC)
- c->options |= MIPS_CPU_VEIC;
- if (config3 & MIPS_CONF3_MT)
- c->ases |= MIPS_ASE_MIPSMT;
- if (config3 & MIPS_CONF3_ULRI)
- c->options |= MIPS_CPU_ULRI;
-
- return config3 & MIPS_CONF_M;
-}
-
-static void __cpuinit decode_configs(struct cpuinfo_mips *c)
-{
- /* MIPS32 or MIPS64 compliant CPU. */
- c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
- MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
-
- c->scache.flags = MIPS_CACHE_NOT_PRESENT;
-
- /* Read Config registers. */
- if (!decode_config0(c))
- return; /* actually worth a panic() */
- if (!decode_config1(c))
- return;
- if (!decode_config2(c))
- return;
- if (!decode_config3(c))
- return;
-}
-
-static inline void cpu_probe_mips(struct cpuinfo_mips *c)
-{
- decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_4KC:
c->cputype = CPU_4KC;
+ __cpu_name[cpu] = "MIPS 4Kc";
break;
case PRID_IMP_4KEC:
- c->cputype = CPU_4KEC;
- break;
case PRID_IMP_4KECR2:
c->cputype = CPU_4KEC;
+ __cpu_name[cpu] = "MIPS 4KEc";
break;
case PRID_IMP_4KSC:
case PRID_IMP_4KSD:
c->cputype = CPU_4KSC;
+ __cpu_name[cpu] = "MIPS 4KSc";
break;
case PRID_IMP_5KC:
c->cputype = CPU_5KC;
+ __cpu_name[cpu] = "MIPS 5Kc";
+ break;
+ case PRID_IMP_5KE:
+ c->cputype = CPU_5KE;
+ __cpu_name[cpu] = "MIPS 5KE";
break;
case PRID_IMP_20KC:
c->cputype = CPU_20KC;
+ __cpu_name[cpu] = "MIPS 20Kc";
break;
case PRID_IMP_24K:
+ c->cputype = CPU_24K;
+ __cpu_name[cpu] = "MIPS 24Kc";
+ break;
case PRID_IMP_24KE:
c->cputype = CPU_24K;
+ __cpu_name[cpu] = "MIPS 24KEc";
break;
case PRID_IMP_25KF:
c->cputype = CPU_25KF;
+ __cpu_name[cpu] = "MIPS 25Kc";
break;
case PRID_IMP_34K:
c->cputype = CPU_34K;
+ __cpu_name[cpu] = "MIPS 34Kc";
break;
case PRID_IMP_74K:
c->cputype = CPU_74K;
+ __cpu_name[cpu] = "MIPS 74Kc";
+ break;
+ case PRID_IMP_M14KC:
+ c->cputype = CPU_M14KC;
+ __cpu_name[cpu] = "MIPS M14Kc";
+ break;
+ case PRID_IMP_M14KEC:
+ c->cputype = CPU_M14KEC;
+ __cpu_name[cpu] = "MIPS M14KEc";
+ break;
+ case PRID_IMP_1004K:
+ c->cputype = CPU_1004K;
+ __cpu_name[cpu] = "MIPS 1004Kc";
+ break;
+ case PRID_IMP_1074K:
+ c->cputype = CPU_1074K;
+ __cpu_name[cpu] = "MIPS 1074Kc";
+ break;
+ case PRID_IMP_INTERAPTIV_UP:
+ c->cputype = CPU_INTERAPTIV;
+ __cpu_name[cpu] = "MIPS interAptiv";
+ break;
+ case PRID_IMP_INTERAPTIV_MP:
+ c->cputype = CPU_INTERAPTIV;
+ __cpu_name[cpu] = "MIPS interAptiv (multi)";
+ break;
+ case PRID_IMP_PROAPTIV_UP:
+ c->cputype = CPU_PROAPTIV;
+ __cpu_name[cpu] = "MIPS proAptiv";
+ break;
+ case PRID_IMP_PROAPTIV_MP:
+ c->cputype = CPU_PROAPTIV;
+ __cpu_name[cpu] = "MIPS proAptiv (multi)";
+ break;
+ case PRID_IMP_P5600:
+ c->cputype = CPU_P5600;
+ __cpu_name[cpu] = "MIPS P5600";
+ break;
+ case PRID_IMP_M5150:
+ c->cputype = CPU_M5150;
+ __cpu_name[cpu] = "MIPS M5150";
break;
}
+
+ decode_configs(c);
+
+ spram_config();
}
-static inline void cpu_probe_alchemy(struct cpuinfo_mips *c)
+static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_AU1_REV1:
case PRID_IMP_AU1_REV2:
+ c->cputype = CPU_ALCHEMY;
switch ((c->processor_id >> 24) & 0xff) {
case 0:
- c->cputype = CPU_AU1000;
+ __cpu_name[cpu] = "Au1000";
break;
case 1:
- c->cputype = CPU_AU1500;
+ __cpu_name[cpu] = "Au1500";
break;
case 2:
- c->cputype = CPU_AU1100;
+ __cpu_name[cpu] = "Au1100";
break;
case 3:
- c->cputype = CPU_AU1550;
+ __cpu_name[cpu] = "Au1550";
break;
case 4:
- c->cputype = CPU_AU1200;
- if (2 == (c->processor_id & 0xff))
- c->cputype = CPU_AU1250;
+ __cpu_name[cpu] = "Au1200";
+ if ((c->processor_id & PRID_REV_MASK) == 2)
+ __cpu_name[cpu] = "Au1250";
break;
case 5:
- c->cputype = CPU_AU1210;
+ __cpu_name[cpu] = "Au1210";
break;
default:
- panic("Unknown Au Core!");
+ __cpu_name[cpu] = "Au1xxx";
break;
}
break;
}
}
-static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
+static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_SB1:
c->cputype = CPU_SB1;
+ __cpu_name[cpu] = "SiByte SB1";
/* FPU in pass1 is known to have issues. */
- if ((c->processor_id & 0xff) < 0x02)
+ if ((c->processor_id & PRID_REV_MASK) < 0x02)
c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
break;
case PRID_IMP_SB1A:
c->cputype = CPU_SB1A;
+ __cpu_name[cpu] = "SiByte SB1A";
break;
}
}
-static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c)
+static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_SR71000:
c->cputype = CPU_SR71000;
+ __cpu_name[cpu] = "Sandcraft SR71000";
c->scache.ways = 8;
c->tlbsize = 64;
break;
}
}
-static inline void cpu_probe_philips(struct cpuinfo_mips *c)
+static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
+ switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_PR4450:
c->cputype = CPU_PR4450;
- c->isa_level = MIPS_CPU_ISA_M32R1;
- break;
- default:
- panic("Unknown Philips Core!"); /* REVISIT: die? */
+ __cpu_name[cpu] = "Philips PR4450";
+ set_isa(c, MIPS_CPU_ISA_M32R1);
break;
}
}
-
-static inline void cpu_probe_broadcom(struct cpuinfo_mips *c)
+static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
- switch (c->processor_id & 0xff00) {
- case PRID_IMP_BCM3302:
- c->cputype = CPU_BCM3302;
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_BMIPS32_REV4:
+ case PRID_IMP_BMIPS32_REV8:
+ c->cputype = CPU_BMIPS32;
+ __cpu_name[cpu] = "Broadcom BMIPS32";
+ set_elf_platform(cpu, "bmips32");
+ break;
+ case PRID_IMP_BMIPS3300:
+ case PRID_IMP_BMIPS3300_ALT:
+ case PRID_IMP_BMIPS3300_BUG:
+ c->cputype = CPU_BMIPS3300;
+ __cpu_name[cpu] = "Broadcom BMIPS3300";
+ set_elf_platform(cpu, "bmips3300");
+ break;
+ case PRID_IMP_BMIPS43XX: {
+ int rev = c->processor_id & PRID_REV_MASK;
+
+ if (rev >= PRID_REV_BMIPS4380_LO &&
+ rev <= PRID_REV_BMIPS4380_HI) {
+ c->cputype = CPU_BMIPS4380;
+ __cpu_name[cpu] = "Broadcom BMIPS4380";
+ set_elf_platform(cpu, "bmips4380");
+ } else {
+ c->cputype = CPU_BMIPS4350;
+ __cpu_name[cpu] = "Broadcom BMIPS4350";
+ set_elf_platform(cpu, "bmips4350");
+ }
break;
- case PRID_IMP_BCM4710:
- c->cputype = CPU_BCM4710;
+ }
+ case PRID_IMP_BMIPS5000:
+ c->cputype = CPU_BMIPS5000;
+ __cpu_name[cpu] = "Broadcom BMIPS5000";
+ set_elf_platform(cpu, "bmips5000");
+ c->options |= MIPS_CPU_ULRI;
+ break;
+ }
+}
+
+static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_CAVIUM_CN38XX:
+ case PRID_IMP_CAVIUM_CN31XX:
+ case PRID_IMP_CAVIUM_CN30XX:
+ c->cputype = CPU_CAVIUM_OCTEON;
+ __cpu_name[cpu] = "Cavium Octeon";
+ goto platform;
+ case PRID_IMP_CAVIUM_CN58XX:
+ case PRID_IMP_CAVIUM_CN56XX:
+ case PRID_IMP_CAVIUM_CN50XX:
+ case PRID_IMP_CAVIUM_CN52XX:
+ c->cputype = CPU_CAVIUM_OCTEON_PLUS;
+ __cpu_name[cpu] = "Cavium Octeon+";
+platform:
+ set_elf_platform(cpu, "octeon");
+ break;
+ case PRID_IMP_CAVIUM_CN61XX:
+ case PRID_IMP_CAVIUM_CN63XX:
+ case PRID_IMP_CAVIUM_CN66XX:
+ case PRID_IMP_CAVIUM_CN68XX:
+ case PRID_IMP_CAVIUM_CNF71XX:
+ c->cputype = CPU_CAVIUM_OCTEON2;
+ __cpu_name[cpu] = "Cavium Octeon II";
+ set_elf_platform(cpu, "octeon2");
+ break;
+ case PRID_IMP_CAVIUM_CN70XX:
+ case PRID_IMP_CAVIUM_CN78XX:
+ c->cputype = CPU_CAVIUM_OCTEON3;
+ __cpu_name[cpu] = "Cavium Octeon III";
+ set_elf_platform(cpu, "octeon3");
break;
default:
+ printk(KERN_INFO "Unknown Octeon chip!\n");
c->cputype = CPU_UNKNOWN;
break;
}
}
-const char *__cpu_name[NR_CPUS];
+static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ /* JZRISC does not implement the CP0 counter. */
+ c->options &= ~MIPS_CPU_COUNTER;
+ BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_JZRISC:
+ c->cputype = CPU_JZRISC;
+ __cpu_name[cpu] = "Ingenic JZRISC";
+ break;
+ default:
+ panic("Unknown Ingenic Processor ID!");
+ break;
+ }
+}
-/*
- * Name a CPU
- */
-static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c)
+static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
{
- const char *name = NULL;
+ decode_configs(c);
+
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_NETLOGIC_AU13XX) {
+ c->cputype = CPU_ALCHEMY;
+ __cpu_name[cpu] = "Au1300";
+ /* following stuff is not for Alchemy */
+ return;
+ }
+
+ c->options = (MIPS_CPU_TLB |
+ MIPS_CPU_4KEX |
+ MIPS_CPU_COUNTER |
+ MIPS_CPU_DIVEC |
+ MIPS_CPU_WATCH |
+ MIPS_CPU_EJTAG |
+ MIPS_CPU_LLSC);
+
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_NETLOGIC_XLP2XX:
+ case PRID_IMP_NETLOGIC_XLP9XX:
+ case PRID_IMP_NETLOGIC_XLP5XX:
+ c->cputype = CPU_XLP;
+ __cpu_name[cpu] = "Broadcom XLPII";
+ break;
+
+ case PRID_IMP_NETLOGIC_XLP8XX:
+ case PRID_IMP_NETLOGIC_XLP3XX:
+ c->cputype = CPU_XLP;
+ __cpu_name[cpu] = "Netlogic XLP";
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR732:
+ case PRID_IMP_NETLOGIC_XLR716:
+ case PRID_IMP_NETLOGIC_XLR532:
+ case PRID_IMP_NETLOGIC_XLR308:
+ case PRID_IMP_NETLOGIC_XLR532C:
+ case PRID_IMP_NETLOGIC_XLR516C:
+ case PRID_IMP_NETLOGIC_XLR508C:
+ case PRID_IMP_NETLOGIC_XLR308C:
+ c->cputype = CPU_XLR;
+ __cpu_name[cpu] = "Netlogic XLR";
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS608:
+ case PRID_IMP_NETLOGIC_XLS408:
+ case PRID_IMP_NETLOGIC_XLS404:
+ case PRID_IMP_NETLOGIC_XLS208:
+ case PRID_IMP_NETLOGIC_XLS204:
+ case PRID_IMP_NETLOGIC_XLS108:
+ case PRID_IMP_NETLOGIC_XLS104:
+ case PRID_IMP_NETLOGIC_XLS616B:
+ case PRID_IMP_NETLOGIC_XLS608B:
+ case PRID_IMP_NETLOGIC_XLS416B:
+ case PRID_IMP_NETLOGIC_XLS412B:
+ case PRID_IMP_NETLOGIC_XLS408B:
+ case PRID_IMP_NETLOGIC_XLS404B:
+ c->cputype = CPU_XLR;
+ __cpu_name[cpu] = "Netlogic XLS";
+ break;
- switch (c->cputype) {
- case CPU_UNKNOWN: name = "unknown"; break;
- case CPU_R2000: name = "R2000"; break;
- case CPU_R3000: name = "R3000"; break;
- case CPU_R3000A: name = "R3000A"; break;
- case CPU_R3041: name = "R3041"; break;
- case CPU_R3051: name = "R3051"; break;
- case CPU_R3052: name = "R3052"; break;
- case CPU_R3081: name = "R3081"; break;
- case CPU_R3081E: name = "R3081E"; break;
- case CPU_R4000PC: name = "R4000PC"; break;
- case CPU_R4000SC: name = "R4000SC"; break;
- case CPU_R4000MC: name = "R4000MC"; break;
- case CPU_R4200: name = "R4200"; break;
- case CPU_R4400PC: name = "R4400PC"; break;
- case CPU_R4400SC: name = "R4400SC"; break;
- case CPU_R4400MC: name = "R4400MC"; break;
- case CPU_R4600: name = "R4600"; break;
- case CPU_R6000: name = "R6000"; break;
- case CPU_R6000A: name = "R6000A"; break;
- case CPU_R8000: name = "R8000"; break;
- case CPU_R10000: name = "R10000"; break;
- case CPU_R12000: name = "R12000"; break;
- case CPU_R14000: name = "R14000"; break;
- case CPU_R4300: name = "R4300"; break;
- case CPU_R4650: name = "R4650"; break;
- case CPU_R4700: name = "R4700"; break;
- case CPU_R5000: name = "R5000"; break;
- case CPU_R5000A: name = "R5000A"; break;
- case CPU_R4640: name = "R4640"; break;
- case CPU_NEVADA: name = "Nevada"; break;
- case CPU_RM7000: name = "RM7000"; break;
- case CPU_RM9000: name = "RM9000"; break;
- case CPU_R5432: name = "R5432"; break;
- case CPU_4KC: name = "MIPS 4Kc"; break;
- case CPU_5KC: name = "MIPS 5Kc"; break;
- case CPU_R4310: name = "R4310"; break;
- case CPU_SB1: name = "SiByte SB1"; break;
- case CPU_SB1A: name = "SiByte SB1A"; break;
- case CPU_TX3912: name = "TX3912"; break;
- case CPU_TX3922: name = "TX3922"; break;
- case CPU_TX3927: name = "TX3927"; break;
- case CPU_AU1000: name = "Au1000"; break;
- case CPU_AU1500: name = "Au1500"; break;
- case CPU_AU1100: name = "Au1100"; break;
- case CPU_AU1550: name = "Au1550"; break;
- case CPU_AU1200: name = "Au1200"; break;
- case CPU_AU1210: name = "Au1210"; break;
- case CPU_AU1250: name = "Au1250"; break;
- case CPU_4KEC: name = "MIPS 4KEc"; break;
- case CPU_4KSC: name = "MIPS 4KSc"; break;
- case CPU_VR41XX: name = "NEC Vr41xx"; break;
- case CPU_R5500: name = "R5500"; break;
- case CPU_TX49XX: name = "TX49xx"; break;
- case CPU_20KC: name = "MIPS 20Kc"; break;
- case CPU_24K: name = "MIPS 24K"; break;
- case CPU_25KF: name = "MIPS 25Kf"; break;
- case CPU_34K: name = "MIPS 34K"; break;
- case CPU_74K: name = "MIPS 74K"; break;
- case CPU_VR4111: name = "NEC VR4111"; break;
- case CPU_VR4121: name = "NEC VR4121"; break;
- case CPU_VR4122: name = "NEC VR4122"; break;
- case CPU_VR4131: name = "NEC VR4131"; break;
- case CPU_VR4133: name = "NEC VR4133"; break;
- case CPU_VR4181: name = "NEC VR4181"; break;
- case CPU_VR4181A: name = "NEC VR4181A"; break;
- case CPU_SR71000: name = "Sandcraft SR71000"; break;
- case CPU_BCM3302: name = "Broadcom BCM3302"; break;
- case CPU_BCM4710: name = "Broadcom BCM4710"; break;
- case CPU_PR4450: name = "Philips PR4450"; break;
- case CPU_LOONGSON2: name = "ICT Loongson-2"; break;
default:
- BUG();
+ pr_info("Unknown Netlogic chip id [%02x]!\n",
+ c->processor_id);
+ c->cputype = CPU_XLR;
+ break;
}
- return name;
+ if (c->cputype == CPU_XLP) {
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK);
+ /* This will be updated again after all threads are woken up */
+ c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+ } else {
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
+ }
+ c->kscratch_mask = 0xf;
}
-__cpuinit void cpu_probe(void)
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
+const char *__cpu_name[NR_CPUS];
+const char *__elf_platform;
+
+void cpu_probe(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int cpu = smp_processor_id();
- c->processor_id = PRID_IMP_UNKNOWN;
+ c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
c->cputype = CPU_UNKNOWN;
c->processor_id = read_c0_prid();
- switch (c->processor_id & 0xff0000) {
+ switch (c->processor_id & PRID_COMP_MASK) {
case PRID_COMP_LEGACY:
- cpu_probe_legacy(c);
+ cpu_probe_legacy(c, cpu);
break;
case PRID_COMP_MIPS:
- cpu_probe_mips(c);
+ cpu_probe_mips(c, cpu);
break;
case PRID_COMP_ALCHEMY:
- cpu_probe_alchemy(c);
+ cpu_probe_alchemy(c, cpu);
break;
case PRID_COMP_SIBYTE:
- cpu_probe_sibyte(c);
+ cpu_probe_sibyte(c, cpu);
break;
case PRID_COMP_BROADCOM:
- cpu_probe_broadcom(c);
+ cpu_probe_broadcom(c, cpu);
break;
case PRID_COMP_SANDCRAFT:
- cpu_probe_sandcraft(c);
+ cpu_probe_sandcraft(c, cpu);
break;
- case PRID_COMP_PHILIPS:
- cpu_probe_philips(c);
+ case PRID_COMP_NXP:
+ cpu_probe_nxp(c, cpu);
+ break;
+ case PRID_COMP_CAVIUM:
+ cpu_probe_cavium(c, cpu);
+ break;
+ case PRID_COMP_INGENIC:
+ cpu_probe_ingenic(c, cpu);
+ break;
+ case PRID_COMP_NETLOGIC:
+ cpu_probe_netlogic(c, cpu);
break;
- default:
- c->cputype = CPU_UNKNOWN;
}
+ BUG_ON(!__cpu_name[cpu]);
+ BUG_ON(c->cputype == CPU_UNKNOWN);
+
/*
* Platform code can force the cpu type to optimize code
* generation. In that case be sure the cpu type is correctly
@@ -939,32 +1181,52 @@ __cpuinit void cpu_probe(void)
*/
BUG_ON(current_cpu_type() != c->cputype);
+ if (mips_fpu_disabled)
+ c->options &= ~MIPS_CPU_FPU;
+
+ if (mips_dsp_disabled)
+ c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
+
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
- if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
- c->isa_level == MIPS_CPU_ISA_M32R2 ||
- c->isa_level == MIPS_CPU_ISA_M64R1 ||
- c->isa_level == MIPS_CPU_ISA_M64R2) {
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
}
}
- __cpu_name[cpu] = cpu_to_name(c);
-
- if (cpu_has_mips_r2)
+ if (cpu_has_mips_r2) {
c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
+ /* R2 has Performance Counter Interrupt indicator */
+ c->options |= MIPS_CPU_PCI;
+ }
else
c->srsets = 1;
+
+ if (cpu_has_msa) {
+ c->msa_id = cpu_get_msa_id();
+ WARN(c->msa_id & MSA_IR_WRPF,
+ "Vector register partitioning unimplemented!");
+ }
+
+ cpu_probe_vmbits(c);
+
+#ifdef CONFIG_64BIT
+ if (cpu == 0)
+ __ua_limit = ~((1ull << cpu_vmbits) - 1);
+#endif
}
-__cpuinit void cpu_report(void)
+void cpu_report(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
- printk(KERN_INFO "CPU revision is: %08x (%s)\n",
- c->processor_id, cpu_name_string());
+ pr_info("CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_name_string());
if (c->options & MIPS_CPU_FPU)
printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
+ if (cpu_has_msa)
+ pr_info("MSA revision is: %08x\n", c->msa_id);
}
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
new file mode 100644
index 00000000000..d21264681e9
--- /dev/null
+++ b/arch/mips/kernel/crash.c
@@ -0,0 +1,70 @@
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/bootmem.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+
+/* This keeps a track of which one is crashing cpu. */
+static int crashing_cpu = -1;
+static cpumask_t cpus_in_crash = CPU_MASK_NONE;
+
+#ifdef CONFIG_SMP
+static void crash_shutdown_secondary(void *ignore)
+{
+ struct pt_regs *regs;
+ int cpu = smp_processor_id();
+
+ regs = task_pt_regs(current);
+
+ if (!cpu_online(cpu))
+ return;
+
+ local_irq_disable();
+ if (!cpu_isset(cpu, cpus_in_crash))
+ crash_save_cpu(regs, cpu);
+ cpu_set(cpu, cpus_in_crash);
+
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+ relocated_kexec_smp_wait(NULL);
+ /* NOTREACHED */
+}
+
+static void crash_kexec_prepare_cpus(void)
+{
+ unsigned int msecs;
+
+ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+
+ dump_send_ipi(crash_shutdown_secondary);
+ smp_wmb();
+
+ /*
+ * The crash CPU sends an IPI and wait for other CPUs to
+ * respond. Delay of at least 10 seconds.
+ */
+ pr_emerg("Sending IPI to other cpus...\n");
+ msecs = 10000;
+ while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
+ cpu_relax();
+ mdelay(1);
+ }
+}
+
+#else /* !defined(CONFIG_SMP) */
+static void crash_kexec_prepare_cpus(void) {}
+#endif /* !defined(CONFIG_SMP) */
+
+void default_machine_crash_shutdown(struct pt_regs *regs)
+{
+ local_irq_disable();
+ crashing_cpu = smp_processor_id();
+ crash_save_cpu(regs, crashing_cpu);
+ crash_kexec_prepare_cpus();
+ cpu_set(crashing_cpu, cpus_in_crash);
+}
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
new file mode 100644
index 00000000000..f291cf99b03
--- /dev/null
+++ b/arch/mips/kernel/crash_dump.c
@@ -0,0 +1,66 @@
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+#include <linux/crash_dump.h>
+#include <asm/uaccess.h>
+#include <linux/slab.h>
+
+static void *kdump_buf_page;
+
+/**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from "oldmem". For this page, there is no pte mapped
+ * in the current kernel.
+ *
+ * Calling copy_to_user() in atomic context is not desirable. Hence first
+ * copying the data to a pre-allocated kernel page and then copying to user
+ * space in non-atomic context.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = kmap_atomic_pfn(pfn);
+
+ if (!userbuf) {
+ memcpy(buf, (vaddr + offset), csize);
+ kunmap_atomic(vaddr);
+ } else {
+ if (!kdump_buf_page) {
+ pr_warning("Kdump: Kdump buffer page not allocated\n");
+
+ return -EFAULT;
+ }
+ copy_page(kdump_buf_page, vaddr);
+ kunmap_atomic(vaddr);
+ if (copy_to_user(buf, (kdump_buf_page + offset), csize))
+ return -EFAULT;
+ }
+
+ return csize;
+}
+
+static int __init kdump_buf_page_init(void)
+{
+ int ret = 0;
+
+ kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!kdump_buf_page) {
+ pr_warning("Kdump: Failed to allocate kdump buffer page\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+arch_initcall(kdump_buf_page_init);
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c
index 868745e7184..468f3eba413 100644
--- a/arch/mips/kernel/csrc-bcm1480.c
+++ b/arch/mips/kernel/csrc-bcm1480.c
@@ -28,14 +28,14 @@
#include <asm/sibyte/sb1250.h>
-static cycle_t bcm1480_hpt_read(void)
+static cycle_t bcm1480_hpt_read(struct clocksource *cs)
{
return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
}
struct clocksource bcm1480_clocksource = {
.name = "zbbus-cycles",
- .rating = 200,
+ .rating = 200,
.read = bcm1480_hpt_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -49,6 +49,5 @@ void __init sb1480_clocksource_init(void)
plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000);
- clocksource_set_clock(cs, zbbus);
- clocksource_register(cs);
+ clocksource_register_hz(cs, zbbus);
}
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
new file mode 100644
index 00000000000..e0262090111
--- /dev/null
+++ b/arch/mips/kernel/csrc-gic.c
@@ -0,0 +1,40 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/time.h>
+
+#include <asm/gic.h>
+
+static cycle_t gic_hpt_read(struct clocksource *cs)
+{
+ return gic_read_count();
+}
+
+static struct clocksource gic_clocksource = {
+ .name = "GIC",
+ .read = gic_hpt_read,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init gic_clocksource_init(unsigned int frequency)
+{
+ unsigned int config, bits;
+
+ /* Calculate the clocksource mask. */
+ GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config);
+ bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
+ (GIC_SH_CONFIG_COUNTBITS_SHF - 2));
+
+ /* Set clocksource mask. */
+ gic_clocksource.mask = CLOCKSOURCE_MASK(bits);
+
+ /* Calculate a somewhat reasonable rating value. */
+ gic_clocksource.rating = 200 + frequency / 10000000;
+
+ clocksource_register_hz(&gic_clocksource, frequency);
+}
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
new file mode 100644
index 00000000000..6cbbf6e106b
--- /dev/null
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -0,0 +1,69 @@
+/*
+ * DEC I/O ASIC's counter clocksource
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/ds1287.h>
+#include <asm/time.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+
+static cycle_t dec_ioasic_hpt_read(struct clocksource *cs)
+{
+ return ioasic_read(IO_REG_FCTR);
+}
+
+static struct clocksource clocksource_dec = {
+ .name = "dec-ioasic",
+ .read = dec_ioasic_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init dec_ioasic_clocksource_init(void)
+{
+ unsigned int freq;
+ u32 start, end;
+ int i = HZ / 8;
+
+ ds1287_timer_state();
+ while (!ds1287_timer_state())
+ ;
+
+ start = dec_ioasic_hpt_read(&clocksource_dec);
+
+ while (i--)
+ while (!ds1287_timer_state())
+ ;
+
+ end = dec_ioasic_hpt_read(&clocksource_dec);
+
+ freq = (end - start) * 8;
+
+ /* An early revision of the I/O ASIC didn't have the counter. */
+ if (!freq)
+ return -ENXIO;
+
+ printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
+
+ clocksource_dec.rating = 200 + freq / 10000000;
+ clocksource_register_hz(&clocksource_dec, freq);
+ return 0;
+}
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index 86e026f067b..decd1fa38d5 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -10,7 +10,7 @@
#include <asm/time.h>
-static cycle_t c0_hpt_read(void)
+static cycle_t c0_hpt_read(struct clocksource *cs)
{
return read_c0_count();
}
@@ -22,17 +22,15 @@ static struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-int __init init_mips_clocksource(void)
+int __init init_r4k_clocksource(void)
{
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
- /* Calclate a somewhat reasonable rating value */
+ /* Calculate a somewhat reasonable rating value */
clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
- clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
-
- clocksource_register(&clocksource_mips);
+ clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
return 0;
}
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c
index 92212bbb8e4..6ecb77d8206 100644
--- a/arch/mips/kernel/csrc-sb1250.c
+++ b/arch/mips/kernel/csrc-sb1250.c
@@ -33,7 +33,7 @@
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
* again.
*/
-static cycle_t sb1250_hpt_read(void)
+static cycle_t sb1250_hpt_read(struct clocksource *cs)
{
unsigned int count;
@@ -44,7 +44,7 @@ static cycle_t sb1250_hpt_read(void)
struct clocksource bcm1250_clocksource = {
.name = "bcm1250-counter-3",
- .rating = 200,
+ .rating = 200,
.read = sb1250_hpt_read,
.mask = CLOCKSOURCE_MASK(23),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -65,6 +65,5 @@ void __init sb1250_clocksource_init(void)
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
R_SCD_TIMER_CFG)));
- clocksource_set_clock(cs, V_SCD_TIMER_FREQ);
- clocksource_register(cs);
+ clocksource_register_hz(cs, V_SCD_TIMER_FREQ);
}
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
index 9dccfa4752b..505cb77d128 100644
--- a/arch/mips/kernel/early_printk.c
+++ b/arch/mips/kernel/early_printk.c
@@ -7,13 +7,16 @@
* Copyright (C) 2007 MIPS Technologies, Inc.
* written by Ralf Baechle (ralf@linux-mips.org)
*/
+#include <linux/kernel.h>
#include <linux/console.h>
+#include <linux/printk.h>
#include <linux/init.h>
+#include <asm/setup.h>
+
extern void prom_putchar(char);
-static void __init
-early_console_write(struct console *con, const char *s, unsigned n)
+static void early_console_write(struct console *con, const char *s, unsigned n)
{
while (n-- && *s) {
if (*s == '\n')
@@ -23,20 +26,18 @@ early_console_write(struct console *con, const char *s, unsigned n)
}
}
-static struct console early_console __initdata = {
+static struct console early_console_prom = {
.name = "early",
.write = early_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1
};
-static int early_console_initialized __initdata;
-
void __init setup_early_printk(void)
{
- if (early_console_initialized)
+ if (early_console)
return;
- early_console_initialized = 1;
+ early_console = &early_console_prom;
- register_console(&early_console);
+ register_console(&early_console_prom);
}
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c
new file mode 100644
index 00000000000..83cea376755
--- /dev/null
+++ b/arch/mips/kernel/early_printk_8250.c
@@ -0,0 +1,66 @@
+/*
+ * 8250/16550-type serial ports prom_putchar()
+ *
+ * Copyright (C) 2010 Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+
+static void __iomem *serial8250_base;
+static unsigned int serial8250_reg_shift;
+static unsigned int serial8250_tx_timeout;
+
+void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift,
+ unsigned int timeout)
+{
+ serial8250_base = (void __iomem *)base;
+ serial8250_reg_shift = reg_shift;
+ serial8250_tx_timeout = timeout;
+}
+
+static inline u8 serial_in(int offset)
+{
+ return readb(serial8250_base + (offset << serial8250_reg_shift));
+}
+
+static inline void serial_out(int offset, char value)
+{
+ writeb(value, serial8250_base + (offset << serial8250_reg_shift));
+}
+
+void prom_putchar(char c)
+{
+ unsigned int timeout;
+ int status, bits;
+
+ if (!serial8250_base)
+ return;
+
+ timeout = serial8250_tx_timeout;
+ bits = UART_LSR_TEMT | UART_LSR_THRE;
+
+ do {
+ status = serial_in(UART_LSR);
+
+ if (--timeout == 0)
+ break;
+ } while ((status & bits) != bits);
+
+ if (timeout)
+ serial_out(UART_TX, c);
+}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e29598ae939..4353d323f01 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -16,9 +16,6 @@
#include <asm/isadep.h>
#include <asm/thread_info.h>
#include <asm/war.h>
-#ifdef CONFIG_MIPS_MT_SMTC
-#include <asm/mipsmtregs.h>
-#endif
#ifndef CONFIG_PREEMPT
#define resume_kernel restore_all
@@ -36,6 +33,11 @@ FEXPORT(ret_from_exception)
FEXPORT(ret_from_irq)
LONG_S s0, TI_REGS($28)
FEXPORT(__ret_from_irq)
+/*
+ * We can be coming here from a syscall done in the kernel space,
+ * e.g. a failed kernel_execve().
+ */
+resume_userspace_check:
LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
andi t0, t0, KU_USER
beqz t0, resume_kernel
@@ -65,6 +67,12 @@ need_resched:
b need_resched
#endif
+FEXPORT(ret_from_kernel_thread)
+ jal schedule_tail # a0 = struct task_struct *prev
+ move a0, s1
+ jal s0
+ j syscall_exit
+
FEXPORT(ret_from_fork)
jal schedule_tail # a0 = struct task_struct *prev
@@ -77,47 +85,12 @@ FEXPORT(syscall_exit)
and t0, a2, t0
bnez t0, syscall_exit_work
-FEXPORT(restore_all) # restore full frame
-#ifdef CONFIG_MIPS_MT_SMTC
-/* Detect and execute deferred IPI "interrupts" */
- LONG_L s0, TI_REGS($28)
- LONG_S sp, TI_REGS($28)
- jal deferred_smtc_ipi
- LONG_S s0, TI_REGS($28)
-#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
-/* Re-arm any temporarily masked interrupts not explicitly "acked" */
- mfc0 v0, CP0_TCSTATUS
- ori v1, v0, TCSTATUS_IXMT
- mtc0 v1, CP0_TCSTATUS
- andi v0, TCSTATUS_IXMT
- _ehb
- mfc0 t0, CP0_TCCONTEXT
- DMT 9 # dmt t1
- jal mips_ihb
- mfc0 t2, CP0_STATUS
- andi t3, t0, 0xff00
- or t2, t2, t3
- mtc0 t2, CP0_STATUS
- _ehb
- andi t1, t1, VPECONTROL_TE
- beqz t1, 1f
- EMT
-1:
- mfc0 v1, CP0_TCSTATUS
- /* We set IXMT above, XOR should clear it here */
- xori v1, v1, TCSTATUS_IXMT
- or v1, v0, v1
- mtc0 v1, CP0_TCSTATUS
- _ehb
- xor t0, t0, t3
- mtc0 t0, CP0_TCCONTEXT
-#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
-#endif /* CONFIG_MIPS_MT_SMTC */
+restore_all: # restore full frame
.set noat
RESTORE_TEMP
RESTORE_AT
RESTORE_STATIC
-FEXPORT(restore_partial) # restore partial frame
+restore_partial: # restore partial frame
#ifdef CONFIG_TRACE_IRQFLAGS
SAVE_STATIC
SAVE_AT
@@ -162,19 +135,27 @@ work_notifysig: # deal with pending signals and
move a0, sp
li a1, 0
jal do_notify_resume # a2 already loaded
- j resume_userspace
+ j resume_userspace_check
-FEXPORT(syscall_exit_work_partial)
+FEXPORT(syscall_exit_partial)
+ local_irq_disable # make sure need_resched doesn't
+ # change between and return
+ LONG_L a2, TI_FLAGS($28) # current->work
+ li t0, _TIF_ALLWORK_MASK
+ and t0, a2
+ beqz t0, restore_partial
SAVE_STATIC
syscall_exit_work:
- li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
+ andi t0, t0, KU_USER
+ beqz t0, resume_kernel
+ li t0, _TIF_WORK_SYSCALL_EXIT
and t0, a2 # a2 is preloaded with TI_FLAGS
beqz t0, work_pending # trace bit set?
- local_irq_enable # could let do_syscall_trace()
+ local_irq_enable # could let syscall_trace_leave()
# call schedule() instead
move a0, sp
- li a1, 1
- jal do_syscall_trace
+ jal syscall_trace_leave
b resume_userspace
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
new file mode 100644
index 00000000000..60e7e5e45af
--- /dev/null
+++ b/arch/mips/kernel/ftrace.c
@@ -0,0 +1,399 @@
+/*
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ *
+ * Thanks goes to Steven Rostedt for writing the original x86 version.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/ftrace.h>
+#include <linux/syscalls.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/syscall.h>
+#include <asm/uasm.h>
+#include <asm/unistd.h>
+
+#include <asm-generic/sections.h>
+
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+#define MCOUNT_OFFSET_INSNS 5
+#else
+#define MCOUNT_OFFSET_INSNS 4
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+/* Arch override because MIPS doesn't need to run this from stop_machine() */
+void arch_ftrace_update_code(int command)
+{
+ ftrace_modify_all_code(command);
+}
+
+#endif
+
+/*
+ * Check if the address is in kernel space
+ *
+ * Clone core_kernel_text() from kernel/extable.c, but doesn't call
+ * init_kernel_text() for Ftrace doesn't trace functions in init sections.
+ */
+static inline int in_kernel_space(unsigned long ip)
+{
+ if (ip >= (unsigned long)_stext &&
+ ip <= (unsigned long)_etext)
+ return 1;
+ return 0;
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
+#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
+#define JUMP_RANGE_MASK ((1UL << 28) - 1)
+
+#define INSN_NOP 0x00000000 /* nop */
+#define INSN_JAL(addr) \
+ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
+
+static unsigned int insn_jal_ftrace_caller __read_mostly;
+static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
+static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
+
+static inline void ftrace_dyn_arch_init_insns(void)
+{
+ u32 *buf;
+ unsigned int v1;
+
+ /* lui v1, hi16_mcount */
+ v1 = 3;
+ buf = (u32 *)&insn_lui_v1_hi16_mcount;
+ UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
+
+ /* jal (ftrace_caller + 8), jump over the first two instruction */
+ buf = (u32 *)&insn_jal_ftrace_caller;
+ uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* j ftrace_graph_caller */
+ buf = (u32 *)&insn_j_ftrace_graph_caller;
+ uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
+#endif
+}
+
+static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
+{
+ int faulted;
+ mm_segment_t old_fs;
+
+ /* *(unsigned int *)ip = new_code; */
+ safe_store_code(new_code, ip, faulted);
+
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ flush_icache_range(ip, ip + 8);
+ set_fs(old_fs);
+
+ return 0;
+}
+
+#ifndef CONFIG_64BIT
+static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
+ unsigned int new_code2)
+{
+ int faulted;
+
+ safe_store_code(new_code1, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+ safe_store_code(new_code2, ip + 4, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+ flush_icache_range(ip, ip + 8);
+ return 0;
+}
+#endif
+
+/*
+ * The details about the calling site of mcount on MIPS
+ *
+ * 1. For kernel:
+ *
+ * move at, ra
+ * jal _mcount --> nop
+ *
+ * 2. For modules:
+ *
+ * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
+ *
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ * sub sp, sp, 8
+ * 1: offset = 5 instructions
+ * 2.2 For the Other situations
+ *
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount
+ * move at, ra
+ * jalr v1
+ * nop | move $12, ra_address | sub sp, sp, 8
+ * 1: offset = 4 instructions
+ */
+
+#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int new;
+ unsigned long ip = rec->ip;
+
+ /*
+ * If ip is in kernel space, no long call, otherwise, long call is
+ * needed.
+ */
+ new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+#ifdef CONFIG_64BIT
+ return ftrace_modify_code(ip, new);
+#else
+ /*
+ * On 32 bit MIPS platforms, gcc adds a stack adjust
+ * instruction in the delay slot after the branch to
+ * mcount and expects mcount to restore the sp on return.
+ * This is based on a legacy API and does nothing but
+ * waste instructions so it's being removed at runtime.
+ */
+ return ftrace_modify_code_2(ip, new, INSN_NOP);
+#endif
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int new;
+ unsigned long ip = rec->ip;
+
+ new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
+ insn_lui_v1_hi16_mcount;
+
+ return ftrace_modify_code(ip, new);
+}
+
+#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned int new;
+
+ new = INSN_JAL((unsigned long)func);
+
+ return ftrace_modify_code(FTRACE_CALL_IP, new);
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ /* Encode the instructions when booting */
+ ftrace_dyn_arch_init_insns();
+
+ /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
+ ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
+
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+extern void ftrace_graph_call(void);
+#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
+ insn_j_ftrace_graph_caller);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
+}
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifndef KBUILD_MCOUNT_RA_ADDRESS
+
+#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
+#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
+#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
+
+unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+ old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
+{
+ unsigned long sp, ip, tmp;
+ unsigned int code;
+ int faulted;
+
+ /*
+ * For module, move the ip from the return address after the
+ * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
+ * kernel, move after the instruction "move ra, at"(offset is 16)
+ */
+ ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
+
+ /*
+ * search the text until finding the non-store instruction or "s{d,w}
+ * ra, offset(sp)" instruction
+ */
+ do {
+ /* get the code at "ip": code = *(unsigned int *)ip; */
+ safe_load_code(code, ip, faulted);
+
+ if (unlikely(faulted))
+ return 0;
+ /*
+ * If we hit the non-store instruction before finding where the
+ * ra is stored, then this is a leaf function and it does not
+ * store the ra on the stack
+ */
+ if ((code & S_R_SP) != S_R_SP)
+ return parent_ra_addr;
+
+ /* Move to the next instruction */
+ ip -= 4;
+ } while ((code & S_RA_SP) != S_RA_SP);
+
+ sp = fp + (code & OFFSET_MASK);
+
+ /* tmp = *(unsigned long *)sp; */
+ safe_load_stack(tmp, sp, faulted);
+ if (unlikely(faulted))
+ return 0;
+
+ if (tmp == old_parent_ra)
+ return sp;
+ return 0;
+}
+
+#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
+ unsigned long fp)
+{
+ unsigned long old_parent_ra;
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)
+ &return_to_handler;
+ int faulted, insns;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * "parent_ra_addr" is the stack address saved the return address of
+ * the caller of _mcount.
+ *
+ * if the gcc < 4.5, a leaf function does not save the return address
+ * in the stack address, so, we "emulate" one in _mcount's stack space,
+ * and hijack it directly, but for a non-leaf function, it save the
+ * return address to the its own stack space, we can not hijack it
+ * directly, but need to find the real stack address,
+ * ftrace_get_parent_addr() does it!
+ *
+ * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
+ * non-leaf function, the location of the return address will be saved
+ * to $12 for us, and for a leaf function, only put a zero into $12. we
+ * do it in ftrace_graph_caller of mcount.S.
+ */
+
+ /* old_parent_ra = *parent_ra_addr; */
+ safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
+ if (unlikely(faulted))
+ goto out;
+#ifndef KBUILD_MCOUNT_RA_ADDRESS
+ parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
+ old_parent_ra, (unsigned long)parent_ra_addr, fp);
+ /*
+ * If fails when getting the stack address of the non-leaf function's
+ * ra, stop function graph tracer and return
+ */
+ if (parent_ra_addr == 0)
+ goto out;
+#endif
+ /* *parent_ra_addr = return_hooker; */
+ safe_store_stack(return_hooker, parent_ra_addr, faulted);
+ if (unlikely(faulted))
+ goto out;
+
+ if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
+ == -EBUSY) {
+ *parent_ra_addr = old_parent_ra;
+ return;
+ }
+
+ /*
+ * Get the recorded ip of the current mcount calling site in the
+ * __mcount_loc section, which will be used to filter the function
+ * entries configured through the tracing/set_graph_function interface.
+ */
+
+ insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+ trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ *parent_ra_addr = old_parent_ra;
+ }
+ return;
+out:
+ ftrace_graph_stop();
+ WARN_ON(1);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+#ifdef CONFIG_32BIT
+unsigned long __init arch_syscall_addr(int nr)
+{
+ return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
+}
+#endif
+
+#ifdef CONFIG_64BIT
+
+unsigned long __init arch_syscall_addr(int nr)
+{
+#ifdef CONFIG_MIPS32_N32
+ if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
+ return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
+#endif
+ if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
+ return (unsigned long)sys_call_table[nr - __NR_64_Linux];
+#ifdef CONFIG_MIPS32_O32
+ if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
+ return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
+#endif
+
+ return (unsigned long) &sys_ni_syscall;
+}
+#endif
+
+#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S
deleted file mode 100644
index 2c446063636..00000000000
--- a/arch/mips/kernel/gdb-low.S
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- * gdb-low.S contains the low-level trap handler for the GDB stub.
- *
- * Copyright (C) 1995 Andreas Busse
- */
-#include <linux/sys.h>
-
-#include <asm/asm.h>
-#include <asm/errno.h>
-#include <asm/irqflags.h>
-#include <asm/mipsregs.h>
-#include <asm/regdef.h>
-#include <asm/stackframe.h>
-#include <asm/gdb-stub.h>
-
-#ifdef CONFIG_32BIT
-#define DMFC0 mfc0
-#define DMTC0 mtc0
-#define LDC1 lwc1
-#define SDC1 lwc1
-#endif
-#ifdef CONFIG_64BIT
-#define DMFC0 dmfc0
-#define DMTC0 dmtc0
-#define LDC1 ldc1
-#define SDC1 ldc1
-#endif
-
-/*
- * [jsun] We reserves about 2x GDB_FR_SIZE in stack. The lower (addressed)
- * part is used to store registers and passed to exception handler.
- * The upper part is reserved for "call func" feature where gdb client
- * saves some of the regs, setups call frame and passes args.
- *
- * A trace shows about 200 bytes are used to store about half of all regs.
- * The rest should be big enough for frame setup and passing args.
- */
-
-/*
- * The low level trap handler
- */
- .align 5
- NESTED(trap_low, GDB_FR_SIZE, sp)
- .set noat
- .set noreorder
-
- mfc0 k0, CP0_STATUS
- sll k0, 3 /* extract cu0 bit */
- bltz k0, 1f
- move k1, sp
-
- /*
- * Called from user mode, go somewhere else.
- */
- mfc0 k0, CP0_CAUSE
- andi k0, k0, 0x7c
-#ifdef CONFIG_64BIT
- dsll k0, k0, 1
-#endif
- PTR_L k1, saved_vectors(k0)
- jr k1
- nop
-1:
- move k0, sp
- PTR_SUBU sp, k1, GDB_FR_SIZE*2 # see comment above
- LONG_S k0, GDB_FR_REG29(sp)
- LONG_S $2, GDB_FR_REG2(sp)
-
-/*
- * First save the CP0 and special registers
- */
-
- mfc0 v0, CP0_STATUS
- LONG_S v0, GDB_FR_STATUS(sp)
- mfc0 v0, CP0_CAUSE
- LONG_S v0, GDB_FR_CAUSE(sp)
- DMFC0 v0, CP0_EPC
- LONG_S v0, GDB_FR_EPC(sp)
- DMFC0 v0, CP0_BADVADDR
- LONG_S v0, GDB_FR_BADVADDR(sp)
- mfhi v0
- LONG_S v0, GDB_FR_HI(sp)
- mflo v0
- LONG_S v0, GDB_FR_LO(sp)
-
-/*
- * Now the integer registers
- */
-
- LONG_S zero, GDB_FR_REG0(sp) /* I know... */
- LONG_S $1, GDB_FR_REG1(sp)
- /* v0 already saved */
- LONG_S $3, GDB_FR_REG3(sp)
- LONG_S $4, GDB_FR_REG4(sp)
- LONG_S $5, GDB_FR_REG5(sp)
- LONG_S $6, GDB_FR_REG6(sp)
- LONG_S $7, GDB_FR_REG7(sp)
- LONG_S $8, GDB_FR_REG8(sp)
- LONG_S $9, GDB_FR_REG9(sp)
- LONG_S $10, GDB_FR_REG10(sp)
- LONG_S $11, GDB_FR_REG11(sp)
- LONG_S $12, GDB_FR_REG12(sp)
- LONG_S $13, GDB_FR_REG13(sp)
- LONG_S $14, GDB_FR_REG14(sp)
- LONG_S $15, GDB_FR_REG15(sp)
- LONG_S $16, GDB_FR_REG16(sp)
- LONG_S $17, GDB_FR_REG17(sp)
- LONG_S $18, GDB_FR_REG18(sp)
- LONG_S $19, GDB_FR_REG19(sp)
- LONG_S $20, GDB_FR_REG20(sp)
- LONG_S $21, GDB_FR_REG21(sp)
- LONG_S $22, GDB_FR_REG22(sp)
- LONG_S $23, GDB_FR_REG23(sp)
- LONG_S $24, GDB_FR_REG24(sp)
- LONG_S $25, GDB_FR_REG25(sp)
- LONG_S $26, GDB_FR_REG26(sp)
- LONG_S $27, GDB_FR_REG27(sp)
- LONG_S $28, GDB_FR_REG28(sp)
- /* sp already saved */
- LONG_S $30, GDB_FR_REG30(sp)
- LONG_S $31, GDB_FR_REG31(sp)
-
- CLI /* disable interrupts */
- TRACE_IRQS_OFF
-
-/*
- * Followed by the floating point registers
- */
- mfc0 v0, CP0_STATUS /* FPU enabled? */
- srl v0, v0, 16
- andi v0, v0, (ST0_CU1 >> 16)
-
- beqz v0,2f /* disabled, skip */
- nop
-
- SDC1 $0, GDB_FR_FPR0(sp)
- SDC1 $1, GDB_FR_FPR1(sp)
- SDC1 $2, GDB_FR_FPR2(sp)
- SDC1 $3, GDB_FR_FPR3(sp)
- SDC1 $4, GDB_FR_FPR4(sp)
- SDC1 $5, GDB_FR_FPR5(sp)
- SDC1 $6, GDB_FR_FPR6(sp)
- SDC1 $7, GDB_FR_FPR7(sp)
- SDC1 $8, GDB_FR_FPR8(sp)
- SDC1 $9, GDB_FR_FPR9(sp)
- SDC1 $10, GDB_FR_FPR10(sp)
- SDC1 $11, GDB_FR_FPR11(sp)
- SDC1 $12, GDB_FR_FPR12(sp)
- SDC1 $13, GDB_FR_FPR13(sp)
- SDC1 $14, GDB_FR_FPR14(sp)
- SDC1 $15, GDB_FR_FPR15(sp)
- SDC1 $16, GDB_FR_FPR16(sp)
- SDC1 $17, GDB_FR_FPR17(sp)
- SDC1 $18, GDB_FR_FPR18(sp)
- SDC1 $19, GDB_FR_FPR19(sp)
- SDC1 $20, GDB_FR_FPR20(sp)
- SDC1 $21, GDB_FR_FPR21(sp)
- SDC1 $22, GDB_FR_FPR22(sp)
- SDC1 $23, GDB_FR_FPR23(sp)
- SDC1 $24, GDB_FR_FPR24(sp)
- SDC1 $25, GDB_FR_FPR25(sp)
- SDC1 $26, GDB_FR_FPR26(sp)
- SDC1 $27, GDB_FR_FPR27(sp)
- SDC1 $28, GDB_FR_FPR28(sp)
- SDC1 $29, GDB_FR_FPR29(sp)
- SDC1 $30, GDB_FR_FPR30(sp)
- SDC1 $31, GDB_FR_FPR31(sp)
-
-/*
- * FPU control registers
- */
-
- cfc1 v0, CP1_STATUS
- LONG_S v0, GDB_FR_FSR(sp)
- cfc1 v0, CP1_REVISION
- LONG_S v0, GDB_FR_FIR(sp)
-
-/*
- * Current stack frame ptr
- */
-
-2:
- LONG_S sp, GDB_FR_FRP(sp)
-
-/*
- * CP0 registers (R4000/R4400 unused registers skipped)
- */
-
- mfc0 v0, CP0_INDEX
- LONG_S v0, GDB_FR_CP0_INDEX(sp)
- mfc0 v0, CP0_RANDOM
- LONG_S v0, GDB_FR_CP0_RANDOM(sp)
- DMFC0 v0, CP0_ENTRYLO0
- LONG_S v0, GDB_FR_CP0_ENTRYLO0(sp)
- DMFC0 v0, CP0_ENTRYLO1
- LONG_S v0, GDB_FR_CP0_ENTRYLO1(sp)
- DMFC0 v0, CP0_CONTEXT
- LONG_S v0, GDB_FR_CP0_CONTEXT(sp)
- mfc0 v0, CP0_PAGEMASK
- LONG_S v0, GDB_FR_CP0_PAGEMASK(sp)
- mfc0 v0, CP0_WIRED
- LONG_S v0, GDB_FR_CP0_WIRED(sp)
- DMFC0 v0, CP0_ENTRYHI
- LONG_S v0, GDB_FR_CP0_ENTRYHI(sp)
- mfc0 v0, CP0_PRID
- LONG_S v0, GDB_FR_CP0_PRID(sp)
-
- .set at
-
-/*
- * Continue with the higher level handler
- */
-
- move a0,sp
-
- jal handle_exception
- nop
-
-/*
- * Restore all writable registers, in reverse order
- */
-
- .set noat
-
- LONG_L v0, GDB_FR_CP0_ENTRYHI(sp)
- LONG_L v1, GDB_FR_CP0_WIRED(sp)
- DMTC0 v0, CP0_ENTRYHI
- mtc0 v1, CP0_WIRED
- LONG_L v0, GDB_FR_CP0_PAGEMASK(sp)
- LONG_L v1, GDB_FR_CP0_ENTRYLO1(sp)
- mtc0 v0, CP0_PAGEMASK
- DMTC0 v1, CP0_ENTRYLO1
- LONG_L v0, GDB_FR_CP0_ENTRYLO0(sp)
- LONG_L v1, GDB_FR_CP0_INDEX(sp)
- DMTC0 v0, CP0_ENTRYLO0
- LONG_L v0, GDB_FR_CP0_CONTEXT(sp)
- mtc0 v1, CP0_INDEX
- DMTC0 v0, CP0_CONTEXT
-
-
-/*
- * Next, the floating point registers
- */
- mfc0 v0, CP0_STATUS /* check if the FPU is enabled */
- srl v0, v0, 16
- andi v0, v0, (ST0_CU1 >> 16)
-
- beqz v0, 3f /* disabled, skip */
- nop
-
- LDC1 $31, GDB_FR_FPR31(sp)
- LDC1 $30, GDB_FR_FPR30(sp)
- LDC1 $29, GDB_FR_FPR29(sp)
- LDC1 $28, GDB_FR_FPR28(sp)
- LDC1 $27, GDB_FR_FPR27(sp)
- LDC1 $26, GDB_FR_FPR26(sp)
- LDC1 $25, GDB_FR_FPR25(sp)
- LDC1 $24, GDB_FR_FPR24(sp)
- LDC1 $23, GDB_FR_FPR23(sp)
- LDC1 $22, GDB_FR_FPR22(sp)
- LDC1 $21, GDB_FR_FPR21(sp)
- LDC1 $20, GDB_FR_FPR20(sp)
- LDC1 $19, GDB_FR_FPR19(sp)
- LDC1 $18, GDB_FR_FPR18(sp)
- LDC1 $17, GDB_FR_FPR17(sp)
- LDC1 $16, GDB_FR_FPR16(sp)
- LDC1 $15, GDB_FR_FPR15(sp)
- LDC1 $14, GDB_FR_FPR14(sp)
- LDC1 $13, GDB_FR_FPR13(sp)
- LDC1 $12, GDB_FR_FPR12(sp)
- LDC1 $11, GDB_FR_FPR11(sp)
- LDC1 $10, GDB_FR_FPR10(sp)
- LDC1 $9, GDB_FR_FPR9(sp)
- LDC1 $8, GDB_FR_FPR8(sp)
- LDC1 $7, GDB_FR_FPR7(sp)
- LDC1 $6, GDB_FR_FPR6(sp)
- LDC1 $5, GDB_FR_FPR5(sp)
- LDC1 $4, GDB_FR_FPR4(sp)
- LDC1 $3, GDB_FR_FPR3(sp)
- LDC1 $2, GDB_FR_FPR2(sp)
- LDC1 $1, GDB_FR_FPR1(sp)
- LDC1 $0, GDB_FR_FPR0(sp)
-
-/*
- * Now the CP0 and integer registers
- */
-
-3:
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify write of Status must be atomic */
- mfc0 t2, CP0_TCSTATUS
- ori t1, t2, TCSTATUS_IXMT
- mtc0 t1, CP0_TCSTATUS
- andi t2, t2, TCSTATUS_IXMT
- _ehb
- DMT 9 # dmt t1
- jal mips_ihb
- nop
-#endif /* CONFIG_MIPS_MT_SMTC */
- mfc0 t0, CP0_STATUS
- ori t0, 0x1f
- xori t0, 0x1f
- mtc0 t0, CP0_STATUS
-#ifdef CONFIG_MIPS_MT_SMTC
- andi t1, t1, VPECONTROL_TE
- beqz t1, 9f
- nop
- EMT # emt
-9:
- mfc0 t1, CP0_TCSTATUS
- xori t1, t1, TCSTATUS_IXMT
- or t1, t1, t2
- mtc0 t1, CP0_TCSTATUS
- _ehb
-#endif /* CONFIG_MIPS_MT_SMTC */
- LONG_L v0, GDB_FR_STATUS(sp)
- LONG_L v1, GDB_FR_EPC(sp)
- mtc0 v0, CP0_STATUS
- DMTC0 v1, CP0_EPC
- LONG_L v0, GDB_FR_HI(sp)
- LONG_L v1, GDB_FR_LO(sp)
- mthi v0
- mtlo v1
- LONG_L $31, GDB_FR_REG31(sp)
- LONG_L $30, GDB_FR_REG30(sp)
- LONG_L $28, GDB_FR_REG28(sp)
- LONG_L $27, GDB_FR_REG27(sp)
- LONG_L $26, GDB_FR_REG26(sp)
- LONG_L $25, GDB_FR_REG25(sp)
- LONG_L $24, GDB_FR_REG24(sp)
- LONG_L $23, GDB_FR_REG23(sp)
- LONG_L $22, GDB_FR_REG22(sp)
- LONG_L $21, GDB_FR_REG21(sp)
- LONG_L $20, GDB_FR_REG20(sp)
- LONG_L $19, GDB_FR_REG19(sp)
- LONG_L $18, GDB_FR_REG18(sp)
- LONG_L $17, GDB_FR_REG17(sp)
- LONG_L $16, GDB_FR_REG16(sp)
- LONG_L $15, GDB_FR_REG15(sp)
- LONG_L $14, GDB_FR_REG14(sp)
- LONG_L $13, GDB_FR_REG13(sp)
- LONG_L $12, GDB_FR_REG12(sp)
- LONG_L $11, GDB_FR_REG11(sp)
- LONG_L $10, GDB_FR_REG10(sp)
- LONG_L $9, GDB_FR_REG9(sp)
- LONG_L $8, GDB_FR_REG8(sp)
- LONG_L $7, GDB_FR_REG7(sp)
- LONG_L $6, GDB_FR_REG6(sp)
- LONG_L $5, GDB_FR_REG5(sp)
- LONG_L $4, GDB_FR_REG4(sp)
- LONG_L $3, GDB_FR_REG3(sp)
- LONG_L $2, GDB_FR_REG2(sp)
- LONG_L $1, GDB_FR_REG1(sp)
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
- LONG_L k0, GDB_FR_EPC(sp)
- LONG_L $29, GDB_FR_REG29(sp) /* Deallocate stack */
- jr k0
- rfe
-#else
- LONG_L sp, GDB_FR_REG29(sp) /* Deallocate stack */
-
- .set mips3
- eret
- .set mips0
-#endif
- .set at
- .set reorder
- END(trap_low)
-
-LEAF(kgdb_read_byte)
-4: lb t0, (a0)
- sb t0, (a1)
- li v0, 0
- jr ra
- .section __ex_table,"a"
- PTR 4b, kgdbfault
- .previous
- END(kgdb_read_byte)
-
-LEAF(kgdb_write_byte)
-5: sb a0, (a1)
- li v0, 0
- jr ra
- .section __ex_table,"a"
- PTR 5b, kgdbfault
- .previous
- END(kgdb_write_byte)
-
- .type kgdbfault@function
- .ent kgdbfault
-
-kgdbfault: li v0, -EFAULT
- jr ra
- .end kgdbfault
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
deleted file mode 100644
index 25f4eab8ea9..00000000000
--- a/arch/mips/kernel/gdb-stub.c
+++ /dev/null
@@ -1,1155 +0,0 @@
-/*
- * arch/mips/kernel/gdb-stub.c
- *
- * Originally written by Glenn Engel, Lake Stevens Instrument Division
- *
- * Contributed by HP Systems
- *
- * Modified for SPARC by Stu Grossman, Cygnus Support.
- *
- * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
- * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
- *
- * Copyright (C) 1995 Andreas Busse
- *
- * Copyright (C) 2003 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- */
-
-/*
- * To enable debugger support, two things need to happen. One, a
- * call to set_debug_traps() is necessary in order to allow any breakpoints
- * or error conditions to be properly intercepted and reported to gdb.
- * Two, a breakpoint needs to be generated to begin communication. This
- * is most easily accomplished by a call to breakpoint(). Breakpoint()
- * simulates a breakpoint by executing a BREAK instruction.
- *
- *
- * The following gdb commands are supported:
- *
- * command function Return value
- *
- * g return the value of the CPU registers hex data or ENN
- * G set the value of the CPU registers OK or ENN
- *
- * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
- * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
- *
- * c Resume at current address SNN ( signal NN)
- * cAA..AA Continue at address AA..AA SNN
- *
- * s Step one instruction SNN
- * sAA..AA Step one instruction from AA..AA SNN
- *
- * k kill
- *
- * ? What was the last sigval ? SNN (signal NN)
- *
- * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
- * baud rate
- *
- * All commands and responses are sent with a packet which includes a
- * checksum. A packet consists of
- *
- * $<packet info>#<checksum>.
- *
- * where
- * <packet info> :: <characters representing the command or response>
- * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
- *
- * When a packet is received, it is first acknowledged with either '+' or '-'.
- * '+' indicates a successful transfer. '-' indicates a failed transfer.
- *
- * Example:
- *
- * Host: Reply:
- * $m0,10#2a +$00010203040506070809101112131415#42
- *
- *
- * ==============
- * MORE EXAMPLES:
- * ==============
- *
- * For reference -- the following are the steps that one
- * company took (RidgeRun Inc) to get remote gdb debugging
- * going. In this scenario the host machine was a PC and the
- * target platform was a Galileo EVB64120A MIPS evaluation
- * board.
- *
- * Step 1:
- * First download gdb-5.0.tar.gz from the internet.
- * and then build/install the package.
- *
- * Example:
- * $ tar zxf gdb-5.0.tar.gz
- * $ cd gdb-5.0
- * $ ./configure --target=mips-linux-elf
- * $ make
- * $ install
- * $ which mips-linux-elf-gdb
- * /usr/local/bin/mips-linux-elf-gdb
- *
- * Step 2:
- * Configure linux for remote debugging and build it.
- *
- * Example:
- * $ cd ~/linux
- * $ make menuconfig <go to "Kernel Hacking" and turn on remote debugging>
- * $ make
- *
- * Step 3:
- * Download the kernel to the remote target and start
- * the kernel running. It will promptly halt and wait
- * for the host gdb session to connect. It does this
- * since the "Kernel Hacking" option has defined
- * CONFIG_KGDB which in turn enables your calls
- * to:
- * set_debug_traps();
- * breakpoint();
- *
- * Step 4:
- * Start the gdb session on the host.
- *
- * Example:
- * $ mips-linux-elf-gdb vmlinux
- * (gdb) set remotebaud 115200
- * (gdb) target remote /dev/ttyS1
- * ...at this point you are connected to
- * the remote target and can use gdb
- * in the normal fasion. Setting
- * breakpoints, single stepping,
- * printing variables, etc.
- */
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/reboot.h>
-
-#include <asm/asm.h>
-#include <asm/cacheflush.h>
-#include <asm/mipsregs.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-#include <asm/gdb-stub.h>
-#include <asm/inst.h>
-
-/*
- * external low-level support routines
- */
-
-extern int putDebugChar(char c); /* write a single character */
-extern char getDebugChar(void); /* read and return a single char */
-extern void trap_low(void);
-
-/*
- * breakpoint and test functions
- */
-extern void breakpoint(void);
-extern void breakinst(void);
-extern void async_breakpoint(void);
-extern void async_breakinst(void);
-extern void adel(void);
-
-/*
- * local prototypes
- */
-
-static void getpacket(char *buffer);
-static void putpacket(char *buffer);
-static int computeSignal(int tt);
-static int hex(unsigned char ch);
-static int hexToInt(char **ptr, int *intValue);
-static int hexToLong(char **ptr, long *longValue);
-static unsigned char *mem2hex(char *mem, char *buf, int count, int may_fault);
-void handle_exception(struct gdb_regs *regs);
-
-int kgdb_enabled;
-
-/*
- * spin locks for smp case
- */
-static DEFINE_SPINLOCK(kgdb_lock);
-static raw_spinlock_t kgdb_cpulock[NR_CPUS] = {
- [0 ... NR_CPUS-1] = __RAW_SPIN_LOCK_UNLOCKED,
-};
-
-/*
- * BUFMAX defines the maximum number of characters in inbound/outbound buffers
- * at least NUMREGBYTES*2 are needed for register packets
- */
-#define BUFMAX 2048
-
-static char input_buffer[BUFMAX];
-static char output_buffer[BUFMAX];
-static int initialized; /* !0 means we've been initialized */
-static int kgdb_started;
-static const char hexchars[]="0123456789abcdef";
-
-/* Used to prevent crashes in memory access. Note that they'll crash anyway if
- we haven't set up fault handlers yet... */
-int kgdb_read_byte(unsigned char *address, unsigned char *dest);
-int kgdb_write_byte(unsigned char val, unsigned char *dest);
-
-/*
- * Convert ch from a hex digit to an int
- */
-static int hex(unsigned char ch)
-{
- if (ch >= 'a' && ch <= 'f')
- return ch-'a'+10;
- if (ch >= '0' && ch <= '9')
- return ch-'0';
- if (ch >= 'A' && ch <= 'F')
- return ch-'A'+10;
- return -1;
-}
-
-/*
- * scan for the sequence $<data>#<checksum>
- */
-static void getpacket(char *buffer)
-{
- unsigned char checksum;
- unsigned char xmitcsum;
- int i;
- int count;
- unsigned char ch;
-
- do {
- /*
- * wait around for the start character,
- * ignore all other characters
- */
- while ((ch = (getDebugChar() & 0x7f)) != '$') ;
-
- checksum = 0;
- xmitcsum = -1;
- count = 0;
-
- /*
- * now, read until a # or end of buffer is found
- */
- while (count < BUFMAX) {
- ch = getDebugChar();
- if (ch == '#')
- break;
- checksum = checksum + ch;
- buffer[count] = ch;
- count = count + 1;
- }
-
- if (count >= BUFMAX)
- continue;
-
- buffer[count] = 0;
-
- if (ch == '#') {
- xmitcsum = hex(getDebugChar() & 0x7f) << 4;
- xmitcsum |= hex(getDebugChar() & 0x7f);
-
- if (checksum != xmitcsum)
- putDebugChar('-'); /* failed checksum */
- else {
- putDebugChar('+'); /* successful transfer */
-
- /*
- * if a sequence char is present,
- * reply the sequence ID
- */
- if (buffer[2] == ':') {
- putDebugChar(buffer[0]);
- putDebugChar(buffer[1]);
-
- /*
- * remove sequence chars from buffer
- */
- count = strlen(buffer);
- for (i=3; i <= count; i++)
- buffer[i-3] = buffer[i];
- }
- }
- }
- }
- while (checksum != xmitcsum);
-}
-
-/*
- * send the packet in buffer.
- */
-static void putpacket(char *buffer)
-{
- unsigned char checksum;
- int count;
- unsigned char ch;
-
- /*
- * $<packet info>#<checksum>.
- */
-
- do {
- putDebugChar('$');
- checksum = 0;
- count = 0;
-
- while ((ch = buffer[count]) != 0) {
- if (!(putDebugChar(ch)))
- return;
- checksum += ch;
- count += 1;
- }
-
- putDebugChar('#');
- putDebugChar(hexchars[checksum >> 4]);
- putDebugChar(hexchars[checksum & 0xf]);
-
- }
- while ((getDebugChar() & 0x7f) != '+');
-}
-
-
-/*
- * Convert the memory pointed to by mem into hex, placing result in buf.
- * Return a pointer to the last char put in buf (null), in case of mem fault,
- * return 0.
- * may_fault is non-zero if we are reading from arbitrary memory, but is currently
- * not used.
- */
-static unsigned char *mem2hex(char *mem, char *buf, int count, int may_fault)
-{
- unsigned char ch;
-
- while (count-- > 0) {
- if (kgdb_read_byte(mem++, &ch) != 0)
- return 0;
- *buf++ = hexchars[ch >> 4];
- *buf++ = hexchars[ch & 0xf];
- }
-
- *buf = 0;
-
- return buf;
-}
-
-/*
- * convert the hex array pointed to by buf into binary to be placed in mem
- * return a pointer to the character AFTER the last byte written
- * may_fault is non-zero if we are reading from arbitrary memory, but is currently
- * not used.
- */
-static char *hex2mem(char *buf, char *mem, int count, int binary, int may_fault)
-{
- int i;
- unsigned char ch;
-
- for (i=0; i<count; i++)
- {
- if (binary) {
- ch = *buf++;
- if (ch == 0x7d)
- ch = 0x20 ^ *buf++;
- }
- else {
- ch = hex(*buf++) << 4;
- ch |= hex(*buf++);
- }
- if (kgdb_write_byte(ch, mem++) != 0)
- return 0;
- }
-
- return mem;
-}
-
-/*
- * This table contains the mapping between SPARC hardware trap types, and
- * signals, which are primarily what GDB understands. It also indicates
- * which hardware traps we need to commandeer when initializing the stub.
- */
-static struct hard_trap_info {
- unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
- unsigned char signo; /* Signal that we map this trap into */
-} hard_trap_info[] = {
- { 6, SIGBUS }, /* instruction bus error */
- { 7, SIGBUS }, /* data bus error */
- { 9, SIGTRAP }, /* break */
- { 10, SIGILL }, /* reserved instruction */
-/* { 11, SIGILL }, */ /* CPU unusable */
- { 12, SIGFPE }, /* overflow */
- { 13, SIGTRAP }, /* trap */
- { 14, SIGSEGV }, /* virtual instruction cache coherency */
- { 15, SIGFPE }, /* floating point exception */
- { 23, SIGSEGV }, /* watch */
- { 31, SIGSEGV }, /* virtual data cache coherency */
- { 0, 0} /* Must be last */
-};
-
-/* Save the normal trap handlers for user-mode traps. */
-void *saved_vectors[32];
-
-/*
- * Set up exception handlers for tracing and breakpoints
- */
-void set_debug_traps(void)
-{
- struct hard_trap_info *ht;
- unsigned long flags;
- unsigned char c;
-
- local_irq_save(flags);
- for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
- saved_vectors[ht->tt] = set_except_vector(ht->tt, trap_low);
-
- putDebugChar('+'); /* 'hello world' */
- /*
- * In case GDB is started before us, ack any packets
- * (presumably "$?#xx") sitting there.
- */
- while((c = getDebugChar()) != '$');
- while((c = getDebugChar()) != '#');
- c = getDebugChar(); /* eat first csum byte */
- c = getDebugChar(); /* eat second csum byte */
- putDebugChar('+'); /* ack it */
-
- initialized = 1;
- local_irq_restore(flags);
-}
-
-void restore_debug_traps(void)
-{
- struct hard_trap_info *ht;
- unsigned long flags;
-
- local_irq_save(flags);
- for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
- set_except_vector(ht->tt, saved_vectors[ht->tt]);
- local_irq_restore(flags);
-}
-
-/*
- * Convert the MIPS hardware trap type code to a Unix signal number.
- */
-static int computeSignal(int tt)
-{
- struct hard_trap_info *ht;
-
- for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
- if (ht->tt == tt)
- return ht->signo;
-
- return SIGHUP; /* default for things we don't know about */
-}
-
-/*
- * While we find nice hex chars, build an int.
- * Return number of chars processed.
- */
-static int hexToInt(char **ptr, int *intValue)
-{
- int numChars = 0;
- int hexValue;
-
- *intValue = 0;
-
- while (**ptr) {
- hexValue = hex(**ptr);
- if (hexValue < 0)
- break;
-
- *intValue = (*intValue << 4) | hexValue;
- numChars ++;
-
- (*ptr)++;
- }
-
- return (numChars);
-}
-
-static int hexToLong(char **ptr, long *longValue)
-{
- int numChars = 0;
- int hexValue;
-
- *longValue = 0;
-
- while (**ptr) {
- hexValue = hex(**ptr);
- if (hexValue < 0)
- break;
-
- *longValue = (*longValue << 4) | hexValue;
- numChars ++;
-
- (*ptr)++;
- }
-
- return numChars;
-}
-
-
-#if 0
-/*
- * Print registers (on target console)
- * Used only to debug the stub...
- */
-void show_gdbregs(struct gdb_regs * regs)
-{
- /*
- * Saved main processor registers
- */
- printk("$0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- regs->reg0, regs->reg1, regs->reg2, regs->reg3,
- regs->reg4, regs->reg5, regs->reg6, regs->reg7);
- printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- regs->reg8, regs->reg9, regs->reg10, regs->reg11,
- regs->reg12, regs->reg13, regs->reg14, regs->reg15);
- printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- regs->reg16, regs->reg17, regs->reg18, regs->reg19,
- regs->reg20, regs->reg21, regs->reg22, regs->reg23);
- printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
- regs->reg24, regs->reg25, regs->reg26, regs->reg27,
- regs->reg28, regs->reg29, regs->reg30, regs->reg31);
-
- /*
- * Saved cp0 registers
- */
- printk("epc : %08lx\nStatus: %08lx\nCause : %08lx\n",
- regs->cp0_epc, regs->cp0_status, regs->cp0_cause);
-}
-#endif /* dead code */
-
-/*
- * We single-step by setting breakpoints. When an exception
- * is handled, we need to restore the instructions hoisted
- * when the breakpoints were set.
- *
- * This is where we save the original instructions.
- */
-static struct gdb_bp_save {
- unsigned long addr;
- unsigned int val;
-} step_bp[2];
-
-#define BP 0x0000000d /* break opcode */
-
-/*
- * Set breakpoint instructions for single stepping.
- */
-static void single_step(struct gdb_regs *regs)
-{
- union mips_instruction insn;
- unsigned long targ;
- int is_branch, is_cond, i;
-
- targ = regs->cp0_epc;
- insn.word = *(unsigned int *)targ;
- is_branch = is_cond = 0;
-
- switch (insn.i_format.opcode) {
- /*
- * jr and jalr are in r_format format.
- */
- case spec_op:
- switch (insn.r_format.func) {
- case jalr_op:
- case jr_op:
- targ = *(&regs->reg0 + insn.r_format.rs);
- is_branch = 1;
- break;
- }
- break;
-
- /*
- * This group contains:
- * bltz_op, bgez_op, bltzl_op, bgezl_op,
- * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
- */
- case bcond_op:
- is_branch = is_cond = 1;
- targ += 4 + (insn.i_format.simmediate << 2);
- break;
-
- /*
- * These are unconditional and in j_format.
- */
- case jal_op:
- case j_op:
- is_branch = 1;
- targ += 4;
- targ >>= 28;
- targ <<= 28;
- targ |= (insn.j_format.target << 2);
- break;
-
- /*
- * These are conditional.
- */
- case beq_op:
- case beql_op:
- case bne_op:
- case bnel_op:
- case blez_op:
- case blezl_op:
- case bgtz_op:
- case bgtzl_op:
- case cop0_op:
- case cop1_op:
- case cop2_op:
- case cop1x_op:
- is_branch = is_cond = 1;
- targ += 4 + (insn.i_format.simmediate << 2);
- break;
- }
-
- if (is_branch) {
- i = 0;
- if (is_cond && targ != (regs->cp0_epc + 8)) {
- step_bp[i].addr = regs->cp0_epc + 8;
- step_bp[i++].val = *(unsigned *)(regs->cp0_epc + 8);
- *(unsigned *)(regs->cp0_epc + 8) = BP;
- }
- step_bp[i].addr = targ;
- step_bp[i].val = *(unsigned *)targ;
- *(unsigned *)targ = BP;
- } else {
- step_bp[0].addr = regs->cp0_epc + 4;
- step_bp[0].val = *(unsigned *)(regs->cp0_epc + 4);
- *(unsigned *)(regs->cp0_epc + 4) = BP;
- }
-}
-
-/*
- * If asynchronously interrupted by gdb, then we need to set a breakpoint
- * at the interrupted instruction so that we wind up stopped with a
- * reasonable stack frame.
- */
-static struct gdb_bp_save async_bp;
-
-/*
- * Swap the interrupted EPC with our asynchronous breakpoint routine.
- * This is safer than stuffing the breakpoint in-place, since no cache
- * flushes (or resulting smp_call_functions) are required. The
- * assumption is that only one CPU will be handling asynchronous bp's,
- * and only one can be active at a time.
- */
-extern spinlock_t smp_call_lock;
-
-void set_async_breakpoint(unsigned long *epc)
-{
- /* skip breaking into userland */
- if ((*epc & 0x80000000) == 0)
- return;
-
-#ifdef CONFIG_SMP
- /* avoid deadlock if someone is make IPC */
- if (spin_is_locked(&smp_call_lock))
- return;
-#endif
-
- async_bp.addr = *epc;
- *epc = (unsigned long)async_breakpoint;
-}
-
-#ifdef CONFIG_SMP
-static void kgdb_wait(void *arg)
-{
- unsigned flags;
- int cpu = smp_processor_id();
-
- local_irq_save(flags);
-
- __raw_spin_lock(&kgdb_cpulock[cpu]);
- __raw_spin_unlock(&kgdb_cpulock[cpu]);
-
- local_irq_restore(flags);
-}
-#endif
-
-/*
- * GDB stub needs to call kgdb_wait on all processor with interrupts
- * disabled, so it uses it's own special variant.
- */
-static int kgdb_smp_call_kgdb_wait(void)
-{
-#ifdef CONFIG_SMP
- cpumask_t mask = cpu_online_map;
- struct call_data_struct data;
- int cpu = smp_processor_id();
- int cpus;
-
- /*
- * Can die spectacularly if this CPU isn't yet marked online
- */
- BUG_ON(!cpu_online(cpu));
-
- cpu_clear(cpu, mask);
- cpus = cpus_weight(mask);
- if (!cpus)
- return 0;
-
- if (spin_is_locked(&smp_call_lock)) {
- /*
- * Some other processor is trying to make us do something
- * but we're not going to respond... give up
- */
- return -1;
- }
-
- /*
- * We will continue here, accepting the fact that
- * the kernel may deadlock if another CPU attempts
- * to call smp_call_function now...
- */
-
- data.func = kgdb_wait;
- data.info = NULL;
- atomic_set(&data.started, 0);
- data.wait = 0;
-
- spin_lock(&smp_call_lock);
- call_data = &data;
- mb();
-
- core_send_ipi_mask(mask, SMP_CALL_FUNCTION);
-
- /* Wait for response */
- /* FIXME: lock-up detection, backtrace on lock-up */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- call_data = NULL;
- spin_unlock(&smp_call_lock);
-#endif
-
- return 0;
-}
-
-/*
- * This function does all command processing for interfacing to gdb. It
- * returns 1 if you should skip the instruction at the trap address, 0
- * otherwise.
- */
-void handle_exception(struct gdb_regs *regs)
-{
- int trap; /* Trap type */
- int sigval;
- long addr;
- int length;
- char *ptr;
- unsigned long *stack;
- int i;
- int bflag = 0;
-
- kgdb_started = 1;
-
- /*
- * acquire the big kgdb spinlock
- */
- if (!spin_trylock(&kgdb_lock)) {
- /*
- * some other CPU has the lock, we should go back to
- * receive the gdb_wait IPC
- */
- return;
- }
-
- /*
- * If we're in async_breakpoint(), restore the real EPC from
- * the breakpoint.
- */
- if (regs->cp0_epc == (unsigned long)async_breakinst) {
- regs->cp0_epc = async_bp.addr;
- async_bp.addr = 0;
- }
-
- /*
- * acquire the CPU spinlocks
- */
- for_each_online_cpu(i)
- if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0)
- panic("kgdb: couldn't get cpulock %d\n", i);
-
- /*
- * force other cpus to enter kgdb
- */
- kgdb_smp_call_kgdb_wait();
-
- /*
- * If we're in breakpoint() increment the PC
- */
- trap = (regs->cp0_cause & 0x7c) >> 2;
- if (trap == 9 && regs->cp0_epc == (unsigned long)breakinst)
- regs->cp0_epc += 4;
-
- /*
- * If we were single_stepping, restore the opcodes hoisted
- * for the breakpoint[s].
- */
- if (step_bp[0].addr) {
- *(unsigned *)step_bp[0].addr = step_bp[0].val;
- step_bp[0].addr = 0;
-
- if (step_bp[1].addr) {
- *(unsigned *)step_bp[1].addr = step_bp[1].val;
- step_bp[1].addr = 0;
- }
- }
-
- stack = (long *)regs->reg29; /* stack ptr */
- sigval = computeSignal(trap);
-
- /*
- * reply to host that an exception has occurred
- */
- ptr = output_buffer;
-
- /*
- * Send trap type (converted to signal)
- */
- *ptr++ = 'T';
- *ptr++ = hexchars[sigval >> 4];
- *ptr++ = hexchars[sigval & 0xf];
-
- /*
- * Send Error PC
- */
- *ptr++ = hexchars[REG_EPC >> 4];
- *ptr++ = hexchars[REG_EPC & 0xf];
- *ptr++ = ':';
- ptr = mem2hex((char *)&regs->cp0_epc, ptr, sizeof(long), 0);
- *ptr++ = ';';
-
- /*
- * Send frame pointer
- */
- *ptr++ = hexchars[REG_FP >> 4];
- *ptr++ = hexchars[REG_FP & 0xf];
- *ptr++ = ':';
- ptr = mem2hex((char *)&regs->reg30, ptr, sizeof(long), 0);
- *ptr++ = ';';
-
- /*
- * Send stack pointer
- */
- *ptr++ = hexchars[REG_SP >> 4];
- *ptr++ = hexchars[REG_SP & 0xf];
- *ptr++ = ':';
- ptr = mem2hex((char *)&regs->reg29, ptr, sizeof(long), 0);
- *ptr++ = ';';
-
- *ptr++ = 0;
- putpacket(output_buffer); /* send it off... */
-
- /*
- * Wait for input from remote GDB
- */
- while (1) {
- output_buffer[0] = 0;
- getpacket(input_buffer);
-
- switch (input_buffer[0])
- {
- case '?':
- output_buffer[0] = 'S';
- output_buffer[1] = hexchars[sigval >> 4];
- output_buffer[2] = hexchars[sigval & 0xf];
- output_buffer[3] = 0;
- break;
-
- /*
- * Detach debugger; let CPU run
- */
- case 'D':
- putpacket(output_buffer);
- goto finish_kgdb;
- break;
-
- case 'd':
- /* toggle debug flag */
- break;
-
- /*
- * Return the value of the CPU registers
- */
- case 'g':
- ptr = output_buffer;
- ptr = mem2hex((char *)&regs->reg0, ptr, 32*sizeof(long), 0); /* r0...r31 */
- ptr = mem2hex((char *)&regs->cp0_status, ptr, 6*sizeof(long), 0); /* cp0 */
- ptr = mem2hex((char *)&regs->fpr0, ptr, 32*sizeof(long), 0); /* f0...31 */
- ptr = mem2hex((char *)&regs->cp1_fsr, ptr, 2*sizeof(long), 0); /* cp1 */
- ptr = mem2hex((char *)&regs->frame_ptr, ptr, 2*sizeof(long), 0); /* frp */
- ptr = mem2hex((char *)&regs->cp0_index, ptr, 16*sizeof(long), 0); /* cp0 */
- break;
-
- /*
- * set the value of the CPU registers - return OK
- */
- case 'G':
- {
- ptr = &input_buffer[1];
- hex2mem(ptr, (char *)&regs->reg0, 32*sizeof(long), 0, 0);
- ptr += 32*(2*sizeof(long));
- hex2mem(ptr, (char *)&regs->cp0_status, 6*sizeof(long), 0, 0);
- ptr += 6*(2*sizeof(long));
- hex2mem(ptr, (char *)&regs->fpr0, 32*sizeof(long), 0, 0);
- ptr += 32*(2*sizeof(long));
- hex2mem(ptr, (char *)&regs->cp1_fsr, 2*sizeof(long), 0, 0);
- ptr += 2*(2*sizeof(long));
- hex2mem(ptr, (char *)&regs->frame_ptr, 2*sizeof(long), 0, 0);
- ptr += 2*(2*sizeof(long));
- hex2mem(ptr, (char *)&regs->cp0_index, 16*sizeof(long), 0, 0);
- strcpy(output_buffer, "OK");
- }
- break;
-
- /*
- * mAA..AA,LLLL Read LLLL bytes at address AA..AA
- */
- case 'm':
- ptr = &input_buffer[1];
-
- if (hexToLong(&ptr, &addr)
- && *ptr++ == ','
- && hexToInt(&ptr, &length)) {
- if (mem2hex((char *)addr, output_buffer, length, 1))
- break;
- strcpy(output_buffer, "E03");
- } else
- strcpy(output_buffer, "E01");
- break;
-
- /*
- * XAA..AA,LLLL: Write LLLL escaped binary bytes at address AA.AA
- */
- case 'X':
- bflag = 1;
- /* fall through */
-
- /*
- * MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK
- */
- case 'M':
- ptr = &input_buffer[1];
-
- if (hexToLong(&ptr, &addr)
- && *ptr++ == ','
- && hexToInt(&ptr, &length)
- && *ptr++ == ':') {
- if (hex2mem(ptr, (char *)addr, length, bflag, 1))
- strcpy(output_buffer, "OK");
- else
- strcpy(output_buffer, "E03");
- }
- else
- strcpy(output_buffer, "E02");
- break;
-
- /*
- * cAA..AA Continue at address AA..AA(optional)
- */
- case 'c':
- /* try to read optional parameter, pc unchanged if no parm */
-
- ptr = &input_buffer[1];
- if (hexToLong(&ptr, &addr))
- regs->cp0_epc = addr;
-
- goto exit_kgdb_exception;
- break;
-
- /*
- * kill the program; let us try to restart the machine
- * Reset the whole machine.
- */
- case 'k':
- case 'r':
- machine_restart("kgdb restarts machine");
- break;
-
- /*
- * Step to next instruction
- */
- case 's':
- /*
- * There is no single step insn in the MIPS ISA, so we
- * use breakpoints and continue, instead.
- */
- single_step(regs);
- goto exit_kgdb_exception;
- /* NOTREACHED */
- break;
-
- /*
- * Set baud rate (bBB)
- * FIXME: Needs to be written
- */
- case 'b':
- {
-#if 0
- int baudrate;
- extern void set_timer_3();
-
- ptr = &input_buffer[1];
- if (!hexToInt(&ptr, &baudrate))
- {
- strcpy(output_buffer, "B01");
- break;
- }
-
- /* Convert baud rate to uart clock divider */
-
- switch (baudrate)
- {
- case 38400:
- baudrate = 16;
- break;
- case 19200:
- baudrate = 33;
- break;
- case 9600:
- baudrate = 65;
- break;
- default:
- baudrate = 0;
- strcpy(output_buffer, "B02");
- goto x1;
- }
-
- if (baudrate) {
- putpacket("OK"); /* Ack before changing speed */
- set_timer_3(baudrate); /* Set it */
- }
-#endif
- }
- break;
-
- } /* switch */
-
- /*
- * reply to the request
- */
-
- putpacket(output_buffer);
-
- } /* while */
-
- return;
-
-finish_kgdb:
- restore_debug_traps();
-
-exit_kgdb_exception:
- /* release locks so other CPUs can go */
- for_each_online_cpu(i)
- __raw_spin_unlock(&kgdb_cpulock[i]);
- spin_unlock(&kgdb_lock);
-
- __flush_cache_all();
- return;
-}
-
-/*
- * This function will generate a breakpoint exception. It is used at the
- * beginning of a program to sync up with a debugger and can be used
- * otherwise as a quick means to stop program execution and "break" into
- * the debugger.
- */
-void breakpoint(void)
-{
- if (!initialized)
- return;
-
- __asm__ __volatile__(
- ".globl breakinst\n\t"
- ".set\tnoreorder\n\t"
- "nop\n"
- "breakinst:\tbreak\n\t"
- "nop\n\t"
- ".set\treorder"
- );
-}
-
-/* Nothing but the break; don't pollute any registers */
-void async_breakpoint(void)
-{
- __asm__ __volatile__(
- ".globl async_breakinst\n\t"
- ".set\tnoreorder\n\t"
- "nop\n"
- "async_breakinst:\tbreak\n\t"
- "nop\n\t"
- ".set\treorder"
- );
-}
-
-void adel(void)
-{
- __asm__ __volatile__(
- ".globl\tadel\n\t"
- "lui\t$8,0x8000\n\t"
- "lw\t$9,1($8)\n\t"
- );
-}
-
-/*
- * malloc is needed by gdb client in "call func()", even a private one
- * will make gdb happy
- */
-static void __used *malloc(size_t size)
-{
- return kmalloc(size, GFP_ATOMIC);
-}
-
-static void __used free(void *where)
-{
- kfree(where);
-}
-
-#ifdef CONFIG_GDB_CONSOLE
-
-void gdb_putsn(const char *str, int l)
-{
- char outbuf[18];
-
- if (!kgdb_started)
- return;
-
- outbuf[0]='O';
-
- while(l) {
- int i = (l>8)?8:l;
- mem2hex((char *)str, &outbuf[1], i, 0);
- outbuf[(i*2)+1]=0;
- putpacket(outbuf);
- str += i;
- l -= i;
- }
-}
-
-static void gdb_console_write(struct console *con, const char *s, unsigned n)
-{
- gdb_putsn(s, n);
-}
-
-static struct console gdb_console = {
- .name = "gdb",
- .write = gdb_console_write,
- .flags = CON_PRINTBUFFER,
- .index = -1
-};
-
-static int __init register_gdb_console(void)
-{
- register_console(&gdb_console);
-
- return 0;
-}
-
-console_initcall(register_gdb_console);
-
-#endif
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index c6ada98ee04..ac35e12cb1f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -5,8 +5,8 @@
*
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
@@ -19,29 +19,10 @@
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/war.h>
-#include <asm/page.h>
-
-#define PANIC_PIC(msg) \
- .set push; \
- .set reorder; \
- PTR_LA a0,8f; \
- .set noat; \
- PTR_LA AT, panic; \
- jr AT; \
-9: b 9b; \
- .set pop; \
- TEXT(msg)
+#include <asm/thread_info.h>
__INIT
-NESTED(except_vec0_generic, 0, sp)
- PANIC_PIC("Exception vector 0 called")
- END(except_vec0_generic)
-
-NESTED(except_vec1_generic, 0, sp)
- PANIC_PIC("Exception vector 1 called")
- END(except_vec1_generic)
-
/*
* General exception vector for all other CPUs.
*
@@ -72,7 +53,7 @@ NESTED(except_vec3_generic, 0, sp)
*/
NESTED(except_vec3_r4000, 0, sp)
.set push
- .set mips3
+ .set arch=r4000
.set noat
mfc0 k1, CP0_CAUSE
li k0, 31<<2
@@ -126,7 +107,49 @@ handle_vcei:
__FINIT
- .align 5
+ .align 5 /* 32 byte rollback region */
+LEAF(__r4k_wait)
+ .set push
+ .set noreorder
+ /* start of rollback region */
+ LONG_L t0, TI_FLAGS($28)
+ nop
+ andi t0, _TIF_NEED_RESCHED
+ bnez t0, 1f
+ nop
+ nop
+ nop
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+ nop
+ nop
+ nop
+#endif
+ .set arch=r4000
+ wait
+ /* end of rollback region (the region size must be power of two) */
+1:
+ jr ra
+ nop
+ .set pop
+ END(__r4k_wait)
+
+ .macro BUILD_ROLLBACK_PROLOGUE handler
+ FEXPORT(rollback_\handler)
+ .set push
+ .set noat
+ MFC0 k0, CP0_EPC
+ PTR_LA k1, __r4k_wait
+ ori k0, 0x1f /* 32 byte rollback region */
+ xori k0, 0x1f
+ bne k0, k1, 9f
+ MTC0 k0, CP0_EPC
+9:
+ .set pop
+ .endm
+
+ .align 5
+BUILD_ROLLBACK_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
#ifdef CONFIG_TRACE_IRQFLAGS
/*
@@ -166,7 +189,11 @@ NESTED(handle_int, PT_SIZE, sp)
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
- j plat_irq_dispatch
+ PTR_LA v0, plat_irq_dispatch
+ jr v0
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
END(handle_int)
__INIT
@@ -187,11 +214,14 @@ NESTED(except_vec4, 0, sp)
/*
* EJTAG debug exception handler.
* The EJTAG debug exception entry point is 0xbfc00480, which
- * normally is in the boot PROM, so the boot PROM must do a
+ * normally is in the boot PROM, so the boot PROM must do an
* unconditional jump to this vector.
*/
NESTED(except_vec_ejtag_debug, 0, sp)
j ejtag_debug_handler
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
END(except_vec_ejtag_debug)
__FINIT
@@ -201,23 +231,16 @@ NESTED(except_vec_ejtag_debug, 0, sp)
* This prototype is copied to ebase + n*IntCtl.VS and patched
* to invoke the handler
*/
+BUILD_ROLLBACK_PROLOGUE except_vec_vi
NESTED(except_vec_vi, 0, sp)
SAVE_SOME
SAVE_AT
.set push
.set noreorder
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * To keep from blindly blocking *all* interrupts
- * during service by SMTC kernel, we also want to
- * pass the IM value to be cleared.
- */
-FEXPORT(except_vec_vi_mori)
- ori a0, $0, 0
-#endif /* CONFIG_MIPS_MT_SMTC */
+ PTR_LA v1, except_vec_vi_handler
FEXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
- j except_vec_vi_handler
+ jr v1
FEXPORT(except_vec_vi_ori)
ori v0, 0 /* Patched */
.set pop
@@ -231,37 +254,10 @@ EXPORT(except_vec_vi_end)
NESTED(except_vec_vi_handler, 0, sp)
SAVE_TEMP
SAVE_STATIC
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * SMTC has an interesting problem that interrupts are level-triggered,
- * and the CLI macro will clear EXL, potentially causing a duplicate
- * interrupt service invocation. So we need to clear the associated
- * IM bit of Status prior to doing CLI, and restore it after the
- * service routine has been invoked - we must assume that the
- * service routine will have cleared the state, and any active
- * level represents a new or otherwised unserviced event...
- */
- mfc0 t1, CP0_STATUS
- and t0, a0, t1
-#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
- mfc0 t2, CP0_TCCONTEXT
- or t0, t0, t2
- mtc0 t0, CP0_TCCONTEXT
-#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
- xor t1, t1, t0
- mtc0 t1, CP0_STATUS
- _ehb
-#endif /* CONFIG_MIPS_MT_SMTC */
CLI
#ifdef CONFIG_TRACE_IRQFLAGS
move s0, v0
-#ifdef CONFIG_MIPS_MT_SMTC
- move s1, a0
-#endif
TRACE_IRQS_OFF
-#ifdef CONFIG_MIPS_MT_SMTC
- move a0, s1
-#endif
move v0, s0
#endif
@@ -318,6 +314,9 @@ EXPORT(ejtag_debug_buffer)
*/
NESTED(except_vec_nmi, 0, sp)
j nmi_handler
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
END(except_vec_nmi)
__FINIT
@@ -325,12 +324,20 @@ NESTED(except_vec_nmi, 0, sp)
NESTED(nmi_handler, PT_SIZE, sp)
.set push
.set noat
+ /*
+ * Clear ERL - restore segment mapping
+ * Clear BEV - required for page fault exception handler to work
+ */
+ mfc0 k0, CP0_STATUS
+ ori k0, k0, ST0_EXL
+ li k1, ~(ST0_BEV | ST0_ERL)
+ and k0, k0, k1
+ mtc0 k0, CP0_STATUS
+ _ehb
SAVE_ALL
- move a0, sp
+ move a0, sp
jal nmi_exception_handler
- RESTORE_ALL
- .set mips3
- eret
+ /* nmi_exception_handler never returns */
.set pop
END(nmi_handler)
@@ -348,10 +355,14 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm
.macro __build_clear_fpe
+ .set push
+ /* gas fails to assemble cfc1 for some archs (octeon).*/ \
+ .set mips1
cfc1 a1, fcr31
li a2, ~(0x3f << 12)
and a2, a1
ctc1 a2, fcr31
+ .set pop
TRACE_IRQS_ON
STI
.endm
@@ -369,7 +380,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
string escapes and emits bogus warnings if it believes to
recognize an unknown escape code. So make the arguments
start with an n and gas will believe \n is ok ... */
- .macro __BUILD_verbose nexception
+ .macro __BUILD_verbose nexception
LONG_L a1, PT_EPC(sp)
#ifdef CONFIG_32BIT
PRINT("Got \nexception at %08lx\012")
@@ -402,7 +413,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm
.macro BUILD_HANDLER exception handler clear verbose
- __BUILD_HANDLER \exception \handler \clear \verbose _int
+ __BUILD_HANDLER \exception \handler \clear \verbose _int
.endm
BUILD_HANDLER adel ade ade silent /* #4 */
@@ -414,9 +425,20 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER cpu cpu sti silent /* #11 */
BUILD_HANDLER ov ov sti silent /* #12 */
BUILD_HANDLER tr tr sti silent /* #13 */
+ BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */
+ BUILD_HANDLER ftlb ftlb none silent /* #16 */
+ BUILD_HANDLER msa msa sti silent /* #21 */
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
+#ifdef CONFIG_HARDWARE_WATCHPOINTS
+ /*
+ * For watch, interrupts will be enabled after the watch
+ * registers are read.
+ */
+ BUILD_HANDLER watch watch cli silent /* #23 */
+#else
BUILD_HANDLER watch watch sti verbose /* #23 */
+#endif
BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
BUILD_HANDLER mt mt sti silent /* #25 */
BUILD_HANDLER dsp dsp sti silent /* #26 */
@@ -424,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
.align 5
LEAF(handle_ri_rdhwr_vivt)
-#ifdef CONFIG_MIPS_MT_SMTC
- PANIC_PIC("handle_ri_rdhwr_vivt called")
-#else
.set push
.set noat
.set noreorder
@@ -434,8 +453,8 @@ NESTED(nmi_handler, PT_SIZE, sp)
MFC0 k1, CP0_ENTRYHI
andi k1, 0xff /* ASID_MASK */
MFC0 k0, CP0_EPC
- PTR_SRL k0, PAGE_SHIFT + 1
- PTR_SLL k0, PAGE_SHIFT + 1
+ PTR_SRL k0, _PAGE_SHIFT + 1
+ PTR_SLL k0, _PAGE_SHIFT + 1
or k1, k0
MTC0 k1, CP0_ENTRYHI
mtc0_tlbw_hazard
@@ -445,20 +464,41 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set pop
bltz k1, handle_ri /* slow path */
/* fall thru */
-#endif
END(handle_ri_rdhwr_vivt)
LEAF(handle_ri_rdhwr)
.set push
.set noat
.set noreorder
- /* 0x7c03e83b: rdhwr v1,$29 */
+ /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
+ /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
MFC0 k1, CP0_EPC
- lui k0, 0x7c03
- lw k1, (k1)
- ori k0, 0xe83b
- .set reorder
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
+ and k0, k1, 1
+ beqz k0, 1f
+ xor k1, k0
+ lhu k0, (k1)
+ lhu k1, 2(k1)
+ ins k1, k0, 16, 16
+ lui k0, 0x007d
+ b docheck
+ ori k0, 0x6b3c
+1:
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+#else
+ andi k0, k1, 1
+ bnez k0, handle_ri
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+#endif
+ .set reorder
+docheck:
bne k0, k1, handle_ri /* if not ours */
+
+isrdhwr:
/* The insn is rdhwr. No need to check CAUSE.BD here. */
get_saved_sp /* k1 := current_thread_info */
.set noreorder
@@ -483,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
ori k1, _THREAD_MASK
xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1)
- .set mips3
+ .set arch=r4000
eret
.set mips0
#endif
diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c
new file mode 100644
index 00000000000..c6854d9df92
--- /dev/null
+++ b/arch/mips/kernel/gpio_txx9.c
@@ -0,0 +1,89 @@
+/*
+ * A gpio chip driver for TXx9 SoCs
+ *
+ * Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <asm/txx9pio.h>
+
+static DEFINE_SPINLOCK(txx9_gpio_lock);
+
+static struct txx9_pio_reg __iomem *txx9_pioptr;
+
+static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return __raw_readl(&txx9_pioptr->din) & (1 << offset);
+}
+
+static void txx9_gpio_set_raw(unsigned int offset, int value)
+{
+ u32 val;
+ val = __raw_readl(&txx9_pioptr->dout);
+ if (value)
+ val |= 1 << offset;
+ else
+ val &= ~(1 << offset);
+ __raw_writel(val, &txx9_pioptr->dout);
+}
+
+static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ txx9_gpio_set_raw(offset, value);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+}
+
+static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ __raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset),
+ &txx9_pioptr->dir);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+ return 0;
+}
+
+static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ txx9_gpio_set_raw(offset, value);
+ __raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset),
+ &txx9_pioptr->dir);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+ return 0;
+}
+
+static struct gpio_chip txx9_gpio_chip = {
+ .get = txx9_gpio_get,
+ .set = txx9_gpio_set,
+ .direction_input = txx9_gpio_dir_in,
+ .direction_output = txx9_gpio_dir_out,
+ .label = "TXx9",
+};
+
+int __init txx9_gpio_init(unsigned long baseaddr,
+ unsigned int base, unsigned int num)
+{
+ txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg));
+ if (!txx9_pioptr)
+ return -ENODEV;
+ txx9_gpio_chip.base = base;
+ txx9_gpio_chip.ngpio = num;
+ return gpiochip_add(&txx9_gpio_chip);
+}
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 361364501d3..95afd663cd4 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -21,52 +21,13 @@
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
-#include <asm/page.h>
+#include <asm/pgtable-bits.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <kernel-entry-init.h>
/*
- * inputs are the text nasid in t1, data nasid in t2.
- */
- .macro MAPPED_KERNEL_SETUP_TLB
-#ifdef CONFIG_MAPPED_KERNEL
- /*
- * This needs to read the nasid - assume 0 for now.
- * Drop in 0xffffffffc0000000 in tlbhi, 0+VG in tlblo_0,
- * 0+DVG in tlblo_1.
- */
- dli t0, 0xffffffffc0000000
- dmtc0 t0, CP0_ENTRYHI
- li t0, 0x1c000 # Offset of text into node memory
- dsll t1, NASID_SHFT # Shift text nasid into place
- dsll t2, NASID_SHFT # Same for data nasid
- or t1, t1, t0 # Physical load address of kernel text
- or t2, t2, t0 # Physical load address of kernel data
- dsrl t1, 12 # 4K pfn
- dsrl t2, 12 # 4K pfn
- dsll t1, 6 # Get pfn into place
- dsll t2, 6 # Get pfn into place
- li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _CACHE_CACHABLE_COW) >> 6)
- or t0, t0, t1
- mtc0 t0, CP0_ENTRYLO0 # physaddr, VG, cach exlwr
- li t0, ((_PAGE_GLOBAL|_PAGE_VALID| _PAGE_DIRTY|_CACHE_CACHABLE_COW) >> 6)
- or t0, t0, t2
- mtc0 t0, CP0_ENTRYLO1 # physaddr, DVG, cach exlwr
- li t0, 0x1ffe000 # MAPPED_KERN_TLBMASK, TLBPGMASK_16M
- mtc0 t0, CP0_PAGEMASK
- li t0, 0 # KMAP_INX
- mtc0 t0, CP0_INDEX
- li t0, 1
- mtc0 t0, CP0_WIRED
- tlbwi
-#else
- mtc0 zero, CP0_WIRED
-#endif
- .endm
-
- /*
* For the moment disable interrupts, mark the kernel mode and
* set ST0_KX so that the CPU does not spit fire when using
* 64-bit addresses. A full initialization of the CPU's status
@@ -74,33 +35,12 @@
*/
.macro setup_c0_status set clr
.set push
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * For SMTC, we need to set privilege and disable interrupts only for
- * the current TC, using the TCStatus register.
- */
- mfc0 t0, CP0_TCSTATUS
- /* Fortunately CU 0 is in the same place in both registers */
- /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
- li t1, ST0_CU0 | 0x08001c00
- or t0, t1
- /* Clear TKSU, leave IXMT */
- xori t0, 0x00001800
- mtc0 t0, CP0_TCSTATUS
- _ehb
- /* We need to leave the global IE bit set, but clear EXL...*/
- mfc0 t0, CP0_STATUS
- or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
- xor t0, ST0_EXL | ST0_ERL | \clr
- mtc0 t0, CP0_STATUS
-#else
mfc0 t0, CP0_STATUS
or t0, ST0_CU0|\set|0x1f|\clr
xor t0, 0x1f|\clr
mtc0 t0, CP0_STATUS
.set noreorder
sll zero,3 # ehb
-#endif
.set pop
.endm
@@ -133,7 +73,7 @@ EXPORT(_stext)
#ifdef CONFIG_BOOT_RAW
/*
* Give us a fighting chance of running if execution beings at the
- * kernel load address. This is needed because this platform does
+ * kernel load address. This is needed because this platform does
* not have a ELF loader yet.
*/
FEXPORT(__kernel_entry)
@@ -154,24 +94,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0
0:
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * In SMTC kernel, "CLI" is thread-specific, in TCStatus.
- * We still need to enable interrupts globally in Status,
- * and clear EXL/ERL.
- *
- * TCContext is used to track interrupt levels under
- * service in SMTC kernel. Clear for boot TC before
- * allowing any interrupts.
- */
- mtc0 zero, CP0_TCCONTEXT
-
- mfc0 t0, CP0_STATUS
- ori t0, t0, 0xff1f
- xori t0, t0, 0x001e
- mtc0 t0, CP0_STATUS
-#endif /* CONFIG_MIPS_MT_SMTC */
-
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
@@ -187,43 +109,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
MTC0 zero, CP0_CONTEXT # clear context register
PTR_LA $28, init_thread_union
- PTR_LI sp, _THREAD_SIZE - 32
+ /* Set the SP after an empty pt_regs. */
+ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE
PTR_ADDU sp, $28
+ back_to_back_c0_hazard
set_saved_sp sp, t0, t1
PTR_SUBU sp, 4 * SZREG # init stack pointer
j start_kernel
END(kernel_entry)
- __CPUINIT
-
#ifdef CONFIG_SMP
/*
- * SMP slave cpus entry point. Board specific code for bootstrap calls this
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
* function after setting up the stack and gp registers.
*/
NESTED(smp_bootstrap, 16, sp)
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * Read-modify-writes of Status must be atomic, and this
- * is one case where CLI is invoked without EXL being
- * necessarily set. The CLI and setup_c0_status will
- * in fact be redundant for all but the first TC of
- * each VPE being booted.
- */
- DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
- jal mips_ihb
-#endif /* CONFIG_MIPS_MT_SMTC */
- setup_c0_status_sec
smp_slave_setup
-#ifdef CONFIG_MIPS_MT_SMTC
- andi t2, t2, VPECONTROL_TE
- beqz t2, 2f
- EMT # emt
-2:
-#endif /* CONFIG_MIPS_MT_SMTC */
+ setup_c0_status_sec
j start_secondary
END(smp_bootstrap)
#endif /* CONFIG_SMP */
-
- __FINIT
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 38fa1a194bf..c5bc344fc74 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -3,209 +3,37 @@
*
*/
#include <linux/clockchips.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
+#include <linux/i8253.h>
+#include <linux/export.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
-#include <asm/delay.h>
-#include <asm/i8253.h>
-#include <asm/io.h>
#include <asm/time.h>
-DEFINE_SPINLOCK(i8253_lock);
-EXPORT_SYMBOL(i8253_lock);
-
-/*
- * Initialize the PIT timer.
- *
- * This is also called after resume to bring the PIT into operation again.
- */
-static void init_pit_timer(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- spin_lock(&i8253_lock);
-
- switch(mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- /* binary, mode 2, LSB/MSB, ch 0 */
- outb_p(0x34, PIT_MODE);
- outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
- outb(LATCH >> 8 , PIT_CH0); /* MSB */
- break;
-
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
- if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
- evt->mode == CLOCK_EVT_MODE_ONESHOT) {
- outb_p(0x30, PIT_MODE);
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
- }
- break;
-
- case CLOCK_EVT_MODE_ONESHOT:
- /* One shot setup */
- outb_p(0x38, PIT_MODE);
- break;
-
- case CLOCK_EVT_MODE_RESUME:
- /* Nothing to do here */
- break;
- }
- spin_unlock(&i8253_lock);
-}
-
-/*
- * Program the next event in oneshot mode
- *
- * Delta is given in PIT ticks
- */
-static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
-{
- spin_lock(&i8253_lock);
- outb_p(delta & 0xff , PIT_CH0); /* LSB */
- outb(delta >> 8 , PIT_CH0); /* MSB */
- spin_unlock(&i8253_lock);
-
- return 0;
-}
-
-/*
- * On UP the PIT can serve all of the possible timer functions. On SMP systems
- * it can be solely used for the global tick.
- *
- * The profiling and update capabilites are switched off once the local apic is
- * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
- * !using_apic_timer decisions in do_timer_interrupt_hook()
- */
-struct clock_event_device pit_clockevent = {
- .name = "pit",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = init_pit_timer,
- .set_next_event = pit_next_event,
- .irq = 0,
-};
-
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
- pit_clockevent.event_handler(&pit_clockevent);
+ i8253_clockevent.event_handler(&i8253_clockevent);
return IRQ_HANDLED;
}
static struct irqaction irq0 = {
.handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_NOBALANCING,
- .mask = CPU_MASK_NONE,
+ .flags = IRQF_NOBALANCING | IRQF_TIMER,
.name = "timer"
};
-/*
- * Initialize the conversion factor and the min/max deltas of the clock event
- * structure and register the clock event source with the framework.
- */
void __init setup_pit_timer(void)
{
- struct clock_event_device *cd = &pit_clockevent;
- unsigned int cpu = smp_processor_id();
-
- /*
- * Start pit with the boot cpu mask and make it global after the
- * IO_APIC has been initialized.
- */
- cd->cpumask = cpumask_of_cpu(cpu);
- clockevent_set_clock(cd, CLOCK_TICK_RATE);
- cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd);
- cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
- clockevents_register_device(cd);
-
- irq0.mask = cpumask_of_cpu(cpu);
+ clockevent_i8253_init(true);
setup_irq(0, &irq0);
}
-/*
- * Since the PIT overflows every tick, its not very useful
- * to just read by itself. So use jiffies to emulate a free
- * running counter:
- */
-static cycle_t pit_read(void)
-{
- unsigned long flags;
- int count;
- u32 jifs;
- static int old_count;
- static u32 old_jifs;
-
- spin_lock_irqsave(&i8253_lock, flags);
- /*
- * Although our caller may have the read side of xtime_lock,
- * this is now a seqlock, and we are cheating in this routine
- * by having side effects on state that we cannot undo if
- * there is a collision on the seqlock and our caller has to
- * retry. (Namely, old_jifs and old_count.) So we must treat
- * jiffies as volatile despite the lock. We read jiffies
- * before latching the timer count to guarantee that although
- * the jiffies value might be older than the count (that is,
- * the counter may underflow between the last point where
- * jiffies was incremented and the point where we latch the
- * count), it cannot be newer.
- */
- jifs = jiffies;
- outb_p(0x00, PIT_MODE); /* latch the count ASAP */
- count = inb_p(PIT_CH0); /* read the latched count */
- count |= inb_p(PIT_CH0) << 8;
-
- /* VIA686a test code... reset the latch if count > max + 1 */
- if (count > LATCH) {
- outb_p(0x34, PIT_MODE);
- outb_p(LATCH & 0xff, PIT_CH0);
- outb(LATCH >> 8, PIT_CH0);
- count = LATCH - 1;
- }
-
- /*
- * It's possible for count to appear to go the wrong way for a
- * couple of reasons:
- *
- * 1. The timer counter underflows, but we haven't handled the
- * resulting interrupt and incremented jiffies yet.
- * 2. Hardware problem with the timer, not giving us continuous time,
- * the counter does small "jumps" upwards on some Pentium systems,
- * (see c't 95/10 page 335 for Neptun bug.)
- *
- * Previous attempts to handle these cases intelligently were
- * buggy, so we just do the simple thing now.
- */
- if (count > old_count && jifs == old_jifs) {
- count = old_count;
- }
- old_count = count;
- old_jifs = jifs;
-
- spin_unlock_irqrestore(&i8253_lock, flags);
-
- count = (LATCH - 1) - count;
-
- return (cycle_t)(jifs * LATCH) + count;
-}
-
-static struct clocksource clocksource_pit = {
- .name = "pit",
- .rating = 110,
- .read = pit_read,
- .mask = CLOCKSOURCE_MASK(32),
- .mult = 0,
- .shift = 20,
-};
-
static int __init init_pit_clocksource(void)
{
if (num_possible_cpus() > 1) /* PIT does not scale! */
return 0;
- clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
- return clocksource_register(&clocksource_pit);
+ return clocksource_i8253_init();
}
arch_initcall(init_pit_clocksource);
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 413bd1d37f5..50b364897dd 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -14,7 +14,8 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
+#include <linux/irq.h>
#include <asm/i8259.h>
#include <asm/io.h>
@@ -29,21 +30,18 @@
*/
static int i8259A_auto_eoi = -1;
-DEFINE_SPINLOCK(i8259A_lock);
-static void disable_8259A_irq(unsigned int irq);
-static void enable_8259A_irq(unsigned int irq);
-static void mask_and_ack_8259A(unsigned int irq);
+DEFINE_RAW_SPINLOCK(i8259A_lock);
+static void disable_8259A_irq(struct irq_data *d);
+static void enable_8259A_irq(struct irq_data *d);
+static void mask_and_ack_8259A(struct irq_data *d);
static void init_8259A(int auto_eoi);
static struct irq_chip i8259A_chip = {
- .name = "XT-PIC",
- .mask = disable_8259A_irq,
- .disable = disable_8259A_irq,
- .unmask = enable_8259A_irq,
- .mask_ack = mask_and_ack_8259A,
-#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
- .set_affinity = plat_set_irq_affinity,
-#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+ .name = "XT-PIC",
+ .irq_mask = disable_8259A_irq,
+ .irq_disable = disable_8259A_irq,
+ .irq_unmask = enable_8259A_irq,
+ .irq_mask_ack = mask_and_ack_8259A,
};
/*
@@ -58,36 +56,34 @@ static unsigned int cached_irq_mask = 0xffff;
#define cached_master_mask (cached_irq_mask)
#define cached_slave_mask (cached_irq_mask >> 8)
-static void disable_8259A_irq(unsigned int irq)
+static void disable_8259A_irq(struct irq_data *d)
{
- unsigned int mask;
+ unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
unsigned long flags;
- irq -= I8259A_IRQ_BASE;
mask = 1 << irq;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
-static void enable_8259A_irq(unsigned int irq)
+static void enable_8259A_irq(struct irq_data *d)
{
- unsigned int mask;
+ unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
unsigned long flags;
- irq -= I8259A_IRQ_BASE;
mask = ~(1 << irq);
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
outb(cached_slave_mask, PIC_SLAVE_IMR);
else
outb(cached_master_mask, PIC_MASTER_IMR);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
int i8259A_irq_pending(unsigned int irq)
@@ -98,12 +94,12 @@ int i8259A_irq_pending(unsigned int irq)
irq -= I8259A_IRQ_BASE;
mask = 1 << irq;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
ret = inb(PIC_MASTER_CMD) & mask;
else
ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return ret;
}
@@ -111,7 +107,7 @@ int i8259A_irq_pending(unsigned int irq)
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
enable_irq(irq);
}
@@ -144,14 +140,13 @@ static inline int i8259A_irq_real(unsigned int irq)
* first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important!
*/
-static void mask_and_ack_8259A(unsigned int irq)
+static void mask_and_ack_8259A(struct irq_data *d)
{
- unsigned int irqmask;
+ unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
unsigned long flags;
- irq -= I8259A_IRQ_BASE;
irqmask = 1 << irq;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
@@ -180,10 +175,9 @@ handle_real_irq:
} else {
inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
outb(cached_master_mask, PIC_MASTER_IMR);
- outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
+ outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
- smtc_im_ack_irq(irq);
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return;
spurious_8259A_irq:
@@ -217,14 +211,13 @@ spurious_8259A_irq:
}
}
-static int i8259A_resume(struct sys_device *dev)
+static void i8259A_resume(void)
{
if (i8259A_auto_eoi >= 0)
init_8259A(i8259A_auto_eoi);
- return 0;
}
-static int i8259A_shutdown(struct sys_device *dev)
+static void i8259A_shutdown(void)
{
/* Put the i8259A into a quiescent state that
* the kernel initialization code can get it
@@ -232,28 +225,19 @@ static int i8259A_shutdown(struct sys_device *dev)
*/
if (i8259A_auto_eoi >= 0) {
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
- outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
+ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
}
- return 0;
}
-static struct sysdev_class i8259_sysdev_class = {
- .name = "i8259",
+static struct syscore_ops i8259_syscore_ops = {
.resume = i8259A_resume,
.shutdown = i8259A_shutdown,
};
-static struct sys_device device_i8259A = {
- .id = 0,
- .cls = &i8259_sysdev_class,
-};
-
static int __init i8259A_init_sysfs(void)
{
- int error = sysdev_class_register(&i8259_sysdev_class);
- if (!error)
- error = sysdev_register(&device_i8259A);
- return error;
+ register_syscore_ops(&i8259_syscore_ops);
+ return 0;
}
device_initcall(i8259A_init_sysfs);
@@ -264,7 +248,7 @@ static void init_8259A(int auto_eoi)
i8259A_auto_eoi = auto_eoi;
- spin_lock_irqsave(&i8259A_lock, flags);
+ raw_spin_lock_irqsave(&i8259A_lock, flags);
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
@@ -289,16 +273,16 @@ static void init_8259A(int auto_eoi)
* In AEOI mode we just have to mask the interrupt
* when acking.
*/
- i8259A_chip.mask_ack = disable_8259A_irq;
+ i8259A_chip.irq_mask_ack = disable_8259A_irq;
else
- i8259A_chip.mask_ack = mask_and_ack_8259A;
+ i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
udelay(100); /* wait for 8259A to initialize */
outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
- spin_unlock_irqrestore(&i8259A_lock, flags);
+ raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
/*
@@ -306,8 +290,8 @@ static void init_8259A(int auto_eoi)
*/
static struct irqaction irq2 = {
.handler = no_action,
- .mask = CPU_MASK_NONE,
.name = "cascade",
+ .flags = IRQF_NO_THREAD,
};
static struct resource pic1_io_resource = {
@@ -339,8 +323,8 @@ void __init init_i8259_irqs(void)
init_8259A(0);
for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) {
- set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
- set_irq_probe(i);
+ irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq);
+ irq_set_probe(i);
}
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
new file mode 100644
index 00000000000..09ce4598075
--- /dev/null
+++ b/arch/mips/kernel/idle.c
@@ -0,0 +1,249 @@
+/*
+ * MIPS idle loop and WAIT instruction support.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/cpu-type.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+
+/*
+ * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
+ * the implementation of the "wait" feature differs between CPU families. This
+ * points to the function that implements CPU specific wait.
+ * The wait instruction stops the pipeline and reduces the power consumption of
+ * the CPU very much.
+ */
+void (*cpu_wait)(void);
+EXPORT_SYMBOL(cpu_wait);
+
+static void r3081_wait(void)
+{
+ unsigned long cfg = read_c0_conf();
+ write_c0_conf(cfg | R30XX_CONF_HALT);
+ local_irq_enable();
+}
+
+static void r39xx_wait(void)
+{
+ if (!need_resched())
+ write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
+ local_irq_enable();
+}
+
+void r4k_wait(void)
+{
+ local_irq_enable();
+ __r4k_wait();
+}
+
+/*
+ * This variant is preferable as it allows testing need_resched and going to
+ * sleep depending on the outcome atomically. Unfortunately the "It is
+ * implementation-dependent whether the pipeline restarts when a non-enabled
+ * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
+ * using this version a gamble.
+ */
+void r4k_wait_irqoff(void)
+{
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " wait \n"
+ " .set pop \n");
+ local_irq_enable();
+ __asm__(
+ " .globl __pastwait \n"
+ "__pastwait: \n");
+}
+
+/*
+ * The RM7000 variant has to handle erratum 38. The workaround is to not
+ * have any pending stores when the WAIT instruction is executed.
+ */
+static void rm7k_wait_irqoff(void)
+{
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " .set noat \n"
+ " mfc0 $1, $12 \n"
+ " sync \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " wait \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " .set pop \n");
+ local_irq_enable();
+}
+
+/*
+ * Au1 'wait' is only useful when the 32kHz counter is used as timer,
+ * since coreclock (and the cp0 counter) stops upon executing it. Only an
+ * interrupt can wake it, so they must be enabled before entering idle modes.
+ */
+static void au1k_wait(void)
+{
+ unsigned long c0status = read_c0_status() | 1; /* irqs on */
+
+ __asm__(
+ " .set arch=r4000 \n"
+ " cache 0x14, 0(%0) \n"
+ " cache 0x14, 32(%0) \n"
+ " sync \n"
+ " mtc0 %1, $12 \n" /* wr c0status */
+ " wait \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " .set mips0 \n"
+ : : "r" (au1k_wait), "r" (c0status));
+}
+
+static int __initdata nowait;
+
+static int __init wait_disable(char *s)
+{
+ nowait = 1;
+
+ return 1;
+}
+
+__setup("nowait", wait_disable);
+
+void __init check_wait(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ if (nowait) {
+ printk("Wait instruction disabled.\n");
+ return;
+ }
+
+ switch (current_cpu_type()) {
+ case CPU_R3081:
+ case CPU_R3081E:
+ cpu_wait = r3081_wait;
+ break;
+ case CPU_TX3927:
+ cpu_wait = r39xx_wait;
+ break;
+ case CPU_R4200:
+/* case CPU_R4300: */
+ case CPU_R4600:
+ case CPU_R4640:
+ case CPU_R4650:
+ case CPU_R4700:
+ case CPU_R5000:
+ case CPU_R5500:
+ case CPU_NEVADA:
+ case CPU_4KC:
+ case CPU_4KEC:
+ case CPU_4KSC:
+ case CPU_5KC:
+ case CPU_25KF:
+ case CPU_PR4450:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_BMIPS5000:
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ case CPU_JZRISC:
+ case CPU_LOONGSON1:
+ case CPU_XLR:
+ case CPU_XLP:
+ cpu_wait = r4k_wait;
+ break;
+
+ case CPU_RM7000:
+ cpu_wait = rm7k_wait_irqoff;
+ break;
+
+ case CPU_M14KC:
+ case CPU_M14KEC:
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_M5150:
+ cpu_wait = r4k_wait;
+ if (read_c0_config7() & MIPS_CONF7_WII)
+ cpu_wait = r4k_wait_irqoff;
+ break;
+
+ case CPU_74K:
+ cpu_wait = r4k_wait;
+ if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
+ cpu_wait = r4k_wait_irqoff;
+ break;
+
+ case CPU_TX49XX:
+ cpu_wait = r4k_wait_irqoff;
+ break;
+ case CPU_ALCHEMY:
+ cpu_wait = au1k_wait;
+ break;
+ case CPU_20KC:
+ /*
+ * WAIT on Rev1.0 has E1, E2, E3 and E16.
+ * WAIT on Rev2.0 and Rev3.0 has E16.
+ * Rev3.1 WAIT is nop, why bother
+ */
+ if ((c->processor_id & 0xff) <= 0x64)
+ break;
+
+ /*
+ * Another rev is incremeting c0_count at a reduced clock
+ * rate while in WAIT mode. So we basically have the choice
+ * between using the cp0 timer as clocksource or avoiding
+ * the WAIT instruction. Until more details are known,
+ * disable the use of WAIT for 20Kc entirely.
+ cpu_wait = r4k_wait;
+ */
+ break;
+ default:
+ break;
+ }
+}
+
+void arch_cpu_idle(void)
+{
+ if (cpu_wait)
+ cpu_wait();
+ else
+ local_irq_enable();
+}
+
+#ifdef CONFIG_CPU_IDLE
+
+int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ arch_cpu_idle();
+ return index;
+}
+
+#endif
diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c
deleted file mode 100644
index aeda7f58391..00000000000
--- a/arch/mips/kernel/init_task.c
+++ /dev/null
@@ -1,42 +0,0 @@
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init_task.h>
-#include <linux/fs.h>
-#include <linux/mqueue.h>
-
-#include <asm/thread_info.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
-/*
- * Initial thread structure.
- *
- * We need to make sure that this is 8192-byte aligned due to the
- * way process stacks are handled. This is done by making sure
- * the linker maps this in the .text segment right after head.S,
- * and making head.S ensure the proper alignment.
- *
- * The things we do for performance..
- */
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"),
- __aligned__(THREAD_SIZE))) =
- { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
diff --git a/arch/mips/kernel/irix5sys.S b/arch/mips/kernel/irix5sys.S
deleted file mode 100644
index eeef891093e..00000000000
--- a/arch/mips/kernel/irix5sys.S
+++ /dev/null
@@ -1,1041 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * 32-bit IRIX5 ABI system call table derived from original file 'irix5sys.h'
- * created by David S. Miller.
- *
- * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
- * Copyright (C) 2004 Steven J. Hill <sjhill@realitydiluted.com>
- */
-#include <asm/asm.h>
-
- /*
- * Key:
- * V == Valid and should work as expected for most cases.
- * HV == Half Valid, some things will work, some likely will not
- * IV == InValid, certainly will not work at all yet
- * ?V == ?'ably Valid, I have not done enough looking into it
- * DC == Don't Care, a rats ass we couldn't give
- */
-
- .macro irix5syscalltable
-
- sys sys_syscall 0 /* 1000 sysindir() V*/
- sys sys_exit 1 /* 1001 exit() V*/
- sys sys_fork 0 /* 1002 fork() V*/
- sys sys_read 3 /* 1003 read() V*/
- sys sys_write 3 /* 1004 write() V*/
- sys sys_open 3 /* 1005 open() V*/
- sys sys_close 1 /* 1006 close() V*/
- sys irix_unimp 0 /* 1007 (XXX IRIX 4 wait) V*/
- sys sys_creat 2 /* 1008 creat() V*/
- sys sys_link 2 /* 1009 link() V*/
- sys sys_unlink 1 /* 1010 unlink() V*/
- sys irix_exec 0 /* 1011 exec() V*/
- sys sys_chdir 1 /* 1012 chdir() V*/
- sys irix_gtime 0 /* 1013 time() V*/
- sys irix_unimp 0 /* 1014 (XXX IRIX 4 mknod) V*/
- sys sys_chmod 2 /* 1015 chmod() V*/
- sys sys_chown 3 /* 1016 chown() V*/
- sys irix_brk 1 /* 1017 break() V*/
- sys irix_unimp 0 /* 1018 (XXX IRIX 4 stat) V*/
- sys sys_lseek 3 /* 1019 lseek() XXX64bit HV*/
- sys irix_getpid 0 /* 1020 getpid() V*/
- sys irix_mount 6 /* 1021 mount() IV*/
- sys sys_umount 1 /* 1022 umount() V*/
- sys sys_setuid 1 /* 1023 setuid() V*/
- sys irix_getuid 0 /* 1024 getuid() V*/
- sys irix_stime 1 /* 1025 stime() V*/
- sys irix_unimp 4 /* 1026 XXX ptrace() IV*/
- sys irix_alarm 1 /* 1027 alarm() V*/
- sys irix_unimp 0 /* 1028 (XXX IRIX 4 fstat) V*/
- sys irix_pause 0 /* 1029 pause() V*/
- sys sys_utime 2 /* 1030 utime() V*/
- sys irix_unimp 0 /* 1031 nuthin' V*/
- sys irix_unimp 0 /* 1032 nobody home man... V*/
- sys sys_access 2 /* 1033 access() V*/
- sys sys_nice 1 /* 1034 nice() V*/
- sys irix_statfs 2 /* 1035 statfs() V*/
- sys sys_sync 0 /* 1036 sync() V*/
- sys sys_kill 2 /* 1037 kill() V*/
- sys irix_fstatfs 2 /* 1038 fstatfs() V*/
- sys irix_setpgrp 1 /* 1039 setpgrp() V*/
- sys irix_syssgi 0 /* 1040 syssgi() HV*/
- sys sys_dup 1 /* 1041 dup() V*/
- sys sys_pipe 0 /* 1042 pipe() V*/
- sys irix_times 1 /* 1043 times() V*/
- sys irix_unimp 0 /* 1044 XXX profil() IV*/
- sys irix_unimp 0 /* 1045 XXX lock() IV*/
- sys sys_setgid 1 /* 1046 setgid() V*/
- sys irix_getgid 0 /* 1047 getgid() V*/
- sys irix_unimp 0 /* 1048 (XXX IRIX 4 ssig) V*/
- sys irix_msgsys 6 /* 1049 sys_msgsys V*/
- sys sys_sysmips 4 /* 1050 sysmips() HV*/
- sys irix_unimp 0 /* 1051 XXX sysacct() IV*/
- sys irix_shmsys 5 /* 1052 sys_shmsys V*/
- sys irix_semsys 0 /* 1053 sys_semsys V*/
- sys irix_ioctl 3 /* 1054 ioctl() HV*/
- sys irix_uadmin 0 /* 1055 XXX sys_uadmin() HC*/
- sys irix_sysmp 0 /* 1056 sysmp() HV*/
- sys irix_utssys 4 /* 1057 sys_utssys() HV*/
- sys irix_unimp 0 /* 1058 nada enchilada V*/
- sys irix_exece 0 /* 1059 exece() V*/
- sys sys_umask 1 /* 1060 umask() V*/
- sys sys_chroot 1 /* 1061 chroot() V*/
- sys irix_fcntl 3 /* 1062 fcntl() ?V*/
- sys irix_ulimit 2 /* 1063 ulimit() HV*/
- sys irix_unimp 0 /* 1064 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1065 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1066 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1067 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1068 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1069 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1070 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1071 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1072 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1073 XXX AFS shit DC*/
- sys irix_unimp 0 /* 1074 nuttin' V*/
- sys irix_unimp 0 /* 1075 XXX sys_getrlimit64()IV*/
- sys irix_unimp 0 /* 1076 XXX sys_setrlimit64()IV*/
- sys sys_nanosleep 2 /* 1077 nanosleep() V*/
- sys irix_lseek64 5 /* 1078 lseek64() ?V*/
- sys sys_rmdir 1 /* 1079 rmdir() V*/
- sys sys_mkdir 2 /* 1080 mkdir() V*/
- sys sys_getdents 3 /* 1081 getdents() V*/
- sys irix_sginap 1 /* 1082 sys_sginap() V*/
- sys irix_sgikopt 3 /* 1083 sys_sgikopt() DC*/
- sys sys_sysfs 3 /* 1084 sysfs() ?V*/
- sys irix_unimp 0 /* 1085 XXX sys_getmsg() DC*/
- sys irix_unimp 0 /* 1086 XXX sys_putmsg() DC*/
- sys sys_poll 3 /* 1087 poll() V*/
- sys irix_sigreturn 0 /* 1088 sigreturn() ?V*/
- sys sys_accept 3 /* 1089 accept() V*/
- sys sys_bind 3 /* 1090 bind() V*/
- sys sys_connect 3 /* 1091 connect() V*/
- sys irix_gethostid 0 /* 1092 sys_gethostid() ?V*/
- sys sys_getpeername 3 /* 1093 getpeername() V*/
- sys sys_getsockname 3 /* 1094 getsockname() V*/
- sys sys_getsockopt 5 /* 1095 getsockopt() V*/
- sys sys_listen 2 /* 1096 listen() V*/
- sys sys_recv 4 /* 1097 recv() V*/
- sys sys_recvfrom 6 /* 1098 recvfrom() V*/
- sys sys_recvmsg 3 /* 1099 recvmsg() V*/
- sys sys_select 5 /* 1100 select() V*/
- sys sys_send 4 /* 1101 send() V*/
- sys sys_sendmsg 3 /* 1102 sendmsg() V*/
- sys sys_sendto 6 /* 1103 sendto() V*/
- sys irix_sethostid 1 /* 1104 sys_sethostid() ?V*/
- sys sys_setsockopt 5 /* 1105 setsockopt() V*/
- sys sys_shutdown 2 /* 1106 shutdown() ?V*/
- sys irix_socket 3 /* 1107 socket() V*/
- sys sys_gethostname 2 /* 1108 sys_gethostname() ?V*/
- sys sys_sethostname 2 /* 1109 sethostname() ?V*/
- sys irix_getdomainname 2 /* 1110 sys_getdomainname() ?V*/
- sys sys_setdomainname 2 /* 1111 setdomainname() ?V*/
- sys sys_truncate 2 /* 1112 truncate() V*/
- sys sys_ftruncate 2 /* 1113 ftruncate() V*/
- sys sys_rename 2 /* 1114 rename() V*/
- sys sys_symlink 2 /* 1115 symlink() V*/
- sys sys_readlink 3 /* 1116 readlink() V*/
- sys irix_unimp 0 /* 1117 XXX IRIX 4 lstat() DC*/
- sys irix_unimp 0 /* 1118 nothin' V*/
- sys irix_unimp 0 /* 1119 XXX nfs_svc() DC*/
- sys irix_unimp 0 /* 1120 XXX nfs_getfh() DC*/
- sys irix_unimp 0 /* 1121 XXX async_daemon() DC*/
- sys irix_unimp 0 /* 1122 XXX exportfs() DC*/
- sys sys_setregid 2 /* 1123 setregid() V*/
- sys sys_setreuid 2 /* 1124 setreuid() V*/
- sys sys_getitimer 2 /* 1125 getitimer() V*/
- sys sys_setitimer 3 /* 1126 setitimer() V*/
- sys irix_unimp 1 /* 1127 XXX adjtime() IV*/
- sys irix_gettimeofday 1 /* 1128 gettimeofday() V*/
- sys irix_unimp 0 /* 1129 XXX sproc() IV*/
- sys irix_prctl 0 /* 1130 prctl() HV*/
- sys irix_unimp 0 /* 1131 XXX procblk() IV*/
- sys irix_unimp 0 /* 1132 XXX sprocsp() IV*/
- sys irix_unimp 0 /* 1133 XXX sgigsc() IV*/
- sys irix_mmap32 6 /* 1134 mmap() XXXflags? ?V*/
- sys sys_munmap 2 /* 1135 munmap() V*/
- sys sys_mprotect 3 /* 1136 mprotect() V*/
- sys sys_msync 4 /* 1137 msync() V*/
- sys irix_madvise 3 /* 1138 madvise() DC*/
- sys irix_pagelock 3 /* 1139 pagelock() IV*/
- sys irix_getpagesize 0 /* 1140 getpagesize() V*/
- sys irix_quotactl 0 /* 1141 quotactl() V*/
- sys irix_unimp 0 /* 1142 nobody home man V*/
- sys sys_getpgid 1 /* 1143 BSD getpgrp() V*/
- sys irix_BSDsetpgrp 2 /* 1143 BSD setpgrp() V*/
- sys sys_vhangup 0 /* 1144 vhangup() V*/
- sys sys_fsync 1 /* 1145 fsync() V*/
- sys sys_fchdir 1 /* 1146 fchdir() V*/
- sys sys_getrlimit 2 /* 1147 getrlimit() ?V*/
- sys sys_setrlimit 2 /* 1148 setrlimit() ?V*/
- sys sys_cacheflush 3 /* 1150 cacheflush() HV*/
- sys sys_cachectl 3 /* 1151 cachectl() HV*/
- sys sys_fchown 3 /* 1152 fchown() ?V*/
- sys sys_fchmod 2 /* 1153 fchmod() ?V*/
- sys irix_unimp 0 /* 1154 XXX IRIX 4 wait3() V*/
- sys sys_socketpair 4 /* 1155 socketpair() V*/
- sys irix_systeminfo 3 /* 1156 systeminfo() IV*/
- sys irix_uname 1 /* 1157 uname() IV*/
- sys irix_xstat 3 /* 1158 xstat() V*/
- sys irix_lxstat 3 /* 1159 lxstat() V*/
- sys irix_fxstat 3 /* 1160 fxstat() V*/
- sys irix_xmknod 0 /* 1161 xmknod() ?V*/
- sys irix_sigaction 4 /* 1162 sigaction() ?V*/
- sys irix_sigpending 1 /* 1163 sigpending() ?V*/
- sys irix_sigprocmask 3 /* 1164 sigprocmask() ?V*/
- sys irix_sigsuspend 0 /* 1165 sigsuspend() ?V*/
- sys irix_sigpoll_sys 3 /* 1166 sigpoll_sys() IV*/
- sys irix_swapctl 2 /* 1167 swapctl() IV*/
- sys irix_getcontext 0 /* 1168 getcontext() HV*/
- sys irix_setcontext 0 /* 1169 setcontext() HV*/
- sys irix_waitsys 5 /* 1170 waitsys() IV*/
- sys irix_sigstack 2 /* 1171 sigstack() HV*/
- sys irix_sigaltstack 2 /* 1172 sigaltstack() HV*/
- sys irix_sigsendset 2 /* 1173 sigsendset() IV*/
- sys irix_statvfs 2 /* 1174 statvfs() V*/
- sys irix_fstatvfs 2 /* 1175 fstatvfs() V*/
- sys irix_unimp 0 /* 1176 XXX getpmsg() DC*/
- sys irix_unimp 0 /* 1177 XXX putpmsg() DC*/
- sys sys_lchown 3 /* 1178 lchown() V*/
- sys irix_priocntl 0 /* 1179 priocntl() DC*/
- sys irix_sigqueue 4 /* 1180 sigqueue() IV*/
- sys sys_readv 3 /* 1181 readv() V*/
- sys sys_writev 3 /* 1182 writev() V*/
- sys irix_truncate64 4 /* 1183 truncate64() XX32bit HV*/
- sys irix_ftruncate64 4 /* 1184 ftruncate64()XX32bit HV*/
- sys irix_mmap64 0 /* 1185 mmap64() XX32bit HV*/
- sys irix_dmi 0 /* 1186 dmi() DC*/
- sys irix_pread 6 /* 1187 pread() IV*/
- sys irix_pwrite 6 /* 1188 pwrite() IV*/
- sys sys_fsync 1 /* 1189 fdatasync() XXPOSIX HV*/
- sys irix_sgifastpath 7 /* 1190 sgifastpath() WHEEE IV*/
- sys irix_unimp 0 /* 1191 XXX attr_get() DC*/
- sys irix_unimp 0 /* 1192 XXX attr_getf() DC*/
- sys irix_unimp 0 /* 1193 XXX attr_set() DC*/
- sys irix_unimp 0 /* 1194 XXX attr_setf() DC*/
- sys irix_unimp 0 /* 1195 XXX attr_remove() DC*/
- sys irix_unimp 0 /* 1196 XXX attr_removef() DC*/
- sys irix_unimp 0 /* 1197 XXX attr_list() DC*/
- sys irix_unimp 0 /* 1198 XXX attr_listf() DC*/
- sys irix_unimp 0 /* 1199 XXX attr_multi() DC*/
- sys irix_unimp 0 /* 1200 XXX attr_multif() DC*/
- sys irix_statvfs64 2 /* 1201 statvfs64() V*/
- sys irix_fstatvfs64 2 /* 1202 fstatvfs64() V*/
- sys irix_getmountid 2 /* 1203 getmountid()XXXfsids HV*/
- sys irix_nsproc 5 /* 1204 nsproc() IV*/
- sys irix_getdents64 3 /* 1205 getdents64() HV*/
- sys irix_unimp 0 /* 1206 XXX DFS garbage DC*/
- sys irix_ngetdents 4 /* 1207 ngetdents() XXXeop HV*/
- sys irix_ngetdents64 4 /* 1208 ngetdents64() XXXeop HV*/
- sys irix_unimp 0 /* 1209 nothin' V*/
- sys irix_unimp 0 /* 1210 XXX pidsprocsp() */
- sys irix_unimp 0 /* 1211 XXX rexec() */
- sys irix_unimp 0 /* 1212 XXX timer_create() */
- sys irix_unimp 0 /* 1213 XXX timer_delete() */
- sys irix_unimp 0 /* 1214 XXX timer_settime() */
- sys irix_unimp 0 /* 1215 XXX timer_gettime() */
- sys irix_unimp 0 /* 1216 XXX timer_setoverrun() */
- sys sys_sched_rr_get_interval 2 /* 1217 sched_rr_get_interval()V*/
- sys sys_sched_yield 0 /* 1218 sched_yield() V*/
- sys sys_sched_getscheduler 1 /* 1219 sched_getscheduler() V*/
- sys sys_sched_setscheduler 3 /* 1220 sched_setscheduler() V*/
- sys sys_sched_getparam 2 /* 1221 sched_getparam() V*/
- sys sys_sched_setparam 2 /* 1222 sched_setparam() V*/
- sys irix_unimp 0 /* 1223 XXX usync_cntl() */
- sys irix_unimp 0 /* 1224 XXX psema_cntl() */
- sys irix_unimp 0 /* 1225 XXX restartreturn() */
-
- /* Just to pad things out nicely. */
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
- sys irix_unimp 0
-
- .endm
-
- /*
- * Pre-compute the number of _instruction_ bytes needed to load
- * or store the arguments 6-8. Negative values are ignored.
- */
- .macro sys function, nargs
- PTR \function
- LONG (\nargs << 2) - (5 << 2)
- .endm
-
- .align 4
-EXPORT(sys_call_table_irix5)
- irix5syscalltable
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
deleted file mode 100644
index 290d8e3a664..00000000000
--- a/arch/mips/kernel/irixelf.c
+++ /dev/null
@@ -1,1358 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * irixelf.c: Code to load IRIX ELF executables conforming to the MIPS ABI.
- * Based off of work by Eric Youngdale.
- *
- * Copyright (C) 1993 - 1994 Eric Youngdale <ericy@cais.com>
- * Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
- * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com>
- */
-#undef DEBUG
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/stat.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/a.out.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/signal.h>
-#include <linux/binfmts.h>
-#include <linux/string.h>
-#include <linux/file.h>
-#include <linux/fcntl.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/shm.h>
-#include <linux/personality.h>
-#include <linux/elfcore.h>
-
-#include <asm/mipsregs.h>
-#include <asm/namei.h>
-#include <asm/prctl.h>
-#include <asm/uaccess.h>
-
-#define DLINFO_ITEMS 12
-
-#include <linux/elf.h>
-
-static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
-static int load_irix_library(struct file *);
-static int irix_core_dump(long signr, struct pt_regs * regs,
- struct file *file, unsigned long limit);
-
-static struct linux_binfmt irix_format = {
- .module = THIS_MODULE,
- .load_binary = load_irix_binary,
- .load_shlib = load_irix_library,
- .core_dump = irix_core_dump,
- .min_coredump = PAGE_SIZE,
-};
-
-/* Debugging routines. */
-static char *get_elf_p_type(Elf32_Word p_type)
-{
-#ifdef DEBUG
- switch (p_type) {
- case PT_NULL:
- return "PT_NULL";
- break;
-
- case PT_LOAD:
- return "PT_LOAD";
- break;
-
- case PT_DYNAMIC:
- return "PT_DYNAMIC";
- break;
-
- case PT_INTERP:
- return "PT_INTERP";
- break;
-
- case PT_NOTE:
- return "PT_NOTE";
- break;
-
- case PT_SHLIB:
- return "PT_SHLIB";
- break;
-
- case PT_PHDR:
- return "PT_PHDR";
- break;
-
- case PT_LOPROC:
- return "PT_LOPROC/REGINFO";
- break;
-
- case PT_HIPROC:
- return "PT_HIPROC";
- break;
-
- default:
- return "PT_BOGUS";
- break;
- }
-#endif
-}
-
-static void print_elfhdr(struct elfhdr *ehp)
-{
- int i;
-
- pr_debug("ELFHDR: e_ident<");
- for (i = 0; i < (EI_NIDENT - 1); i++)
- pr_debug("%x ", ehp->e_ident[i]);
- pr_debug("%x>\n", ehp->e_ident[i]);
- pr_debug(" e_type[%04x] e_machine[%04x] e_version[%08lx]\n",
- (unsigned short) ehp->e_type, (unsigned short) ehp->e_machine,
- (unsigned long) ehp->e_version);
- pr_debug(" e_entry[%08lx] e_phoff[%08lx] e_shoff[%08lx] "
- "e_flags[%08lx]\n",
- (unsigned long) ehp->e_entry, (unsigned long) ehp->e_phoff,
- (unsigned long) ehp->e_shoff, (unsigned long) ehp->e_flags);
- pr_debug(" e_ehsize[%04x] e_phentsize[%04x] e_phnum[%04x]\n",
- (unsigned short) ehp->e_ehsize,
- (unsigned short) ehp->e_phentsize,
- (unsigned short) ehp->e_phnum);
- pr_debug(" e_shentsize[%04x] e_shnum[%04x] e_shstrndx[%04x]\n",
- (unsigned short) ehp->e_shentsize,
- (unsigned short) ehp->e_shnum,
- (unsigned short) ehp->e_shstrndx);
-}
-
-static void print_phdr(int i, struct elf_phdr *ep)
-{
- pr_debug("PHDR[%d]: p_type[%s] p_offset[%08lx] p_vaddr[%08lx] "
- "p_paddr[%08lx]\n", i, get_elf_p_type(ep->p_type),
- (unsigned long) ep->p_offset, (unsigned long) ep->p_vaddr,
- (unsigned long) ep->p_paddr);
- pr_debug(" p_filesz[%08lx] p_memsz[%08lx] p_flags[%08lx] "
- "p_align[%08lx]\n", (unsigned long) ep->p_filesz,
- (unsigned long) ep->p_memsz, (unsigned long) ep->p_flags,
- (unsigned long) ep->p_align);
-}
-
-static void dump_phdrs(struct elf_phdr *ep, int pnum)
-{
- int i;
-
- for (i = 0; i < pnum; i++, ep++) {
- if ((ep->p_type == PT_LOAD) ||
- (ep->p_type == PT_INTERP) ||
- (ep->p_type == PT_PHDR))
- print_phdr(i, ep);
- }
-}
-
-static void set_brk(unsigned long start, unsigned long end)
-{
- start = PAGE_ALIGN(start);
- end = PAGE_ALIGN(end);
- if (end <= start)
- return;
- down_write(&current->mm->mmap_sem);
- do_brk(start, end - start);
- up_write(&current->mm->mmap_sem);
-}
-
-
-/* We need to explicitly zero any fractional pages
- * after the data section (i.e. bss). This would
- * contain the junk from the file that should not
- * be in memory.
- */
-static void padzero(unsigned long elf_bss)
-{
- unsigned long nbyte;
-
- nbyte = elf_bss & (PAGE_SIZE-1);
- if (nbyte) {
- nbyte = PAGE_SIZE - nbyte;
- clear_user((void __user *) elf_bss, nbyte);
- }
-}
-
-static unsigned long * create_irix_tables(char * p, int argc, int envc,
- struct elfhdr * exec, unsigned int load_addr,
- unsigned int interp_load_addr, struct pt_regs *regs,
- struct elf_phdr *ephdr)
-{
- elf_addr_t *argv;
- elf_addr_t *envp;
- elf_addr_t *sp, *csp;
-
- pr_debug("create_irix_tables: p[%p] argc[%d] envc[%d] "
- "load_addr[%08x] interp_load_addr[%08x]\n",
- p, argc, envc, load_addr, interp_load_addr);
-
- sp = (elf_addr_t *) (~15UL & (unsigned long) p);
- csp = sp;
- csp -= exec ? DLINFO_ITEMS*2 : 2;
- csp -= envc+1;
- csp -= argc+1;
- csp -= 1; /* argc itself */
- if ((unsigned long)csp & 15UL) {
- sp -= (16UL - ((unsigned long)csp & 15UL)) / sizeof(*sp);
- }
-
- /*
- * Put the ELF interpreter info on the stack
- */
-#define NEW_AUX_ENT(nr, id, val) \
- __put_user((id), sp+(nr*2)); \
- __put_user((val), sp+(nr*2+1)); \
-
- sp -= 2;
- NEW_AUX_ENT(0, AT_NULL, 0);
-
- if (exec) {
- sp -= 11*2;
-
- NEW_AUX_ENT(0, AT_PHDR, load_addr + exec->e_phoff);
- NEW_AUX_ENT(1, AT_PHENT, sizeof(struct elf_phdr));
- NEW_AUX_ENT(2, AT_PHNUM, exec->e_phnum);
- NEW_AUX_ENT(3, AT_PAGESZ, ELF_EXEC_PAGESIZE);
- NEW_AUX_ENT(4, AT_BASE, interp_load_addr);
- NEW_AUX_ENT(5, AT_FLAGS, 0);
- NEW_AUX_ENT(6, AT_ENTRY, (elf_addr_t) exec->e_entry);
- NEW_AUX_ENT(7, AT_UID, (elf_addr_t) current->uid);
- NEW_AUX_ENT(8, AT_EUID, (elf_addr_t) current->euid);
- NEW_AUX_ENT(9, AT_GID, (elf_addr_t) current->gid);
- NEW_AUX_ENT(10, AT_EGID, (elf_addr_t) current->egid);
- }
-#undef NEW_AUX_ENT
-
- sp -= envc+1;
- envp = sp;
- sp -= argc+1;
- argv = sp;
-
- __put_user((elf_addr_t)argc, --sp);
- current->mm->arg_start = (unsigned long) p;
- while (argc-->0) {
- __put_user((unsigned long)p, argv++);
- p += strlen_user(p);
- }
- __put_user((unsigned long) NULL, argv);
- current->mm->arg_end = current->mm->env_start = (unsigned long) p;
- while (envc-->0) {
- __put_user((unsigned long)p, envp++);
- p += strlen_user(p);
- }
- __put_user((unsigned long) NULL, envp);
- current->mm->env_end = (unsigned long) p;
- return sp;
-}
-
-
-/* This is much more generalized than the library routine read function,
- * so we keep this separate. Technically the library read function
- * is only provided so that we can read a.out libraries that have
- * an ELF header.
- */
-static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
- struct file * interpreter,
- unsigned int *interp_load_addr)
-{
- struct elf_phdr *elf_phdata = NULL;
- struct elf_phdr *eppnt;
- unsigned int len;
- unsigned int load_addr;
- int elf_bss;
- int retval;
- unsigned int last_bss;
- int error;
- int i;
- unsigned int k;
-
- elf_bss = 0;
- last_bss = 0;
- error = load_addr = 0;
-
- print_elfhdr(interp_elf_ex);
-
- /* First of all, some simple consistency checks */
- if ((interp_elf_ex->e_type != ET_EXEC &&
- interp_elf_ex->e_type != ET_DYN) ||
- !interpreter->f_op->mmap) {
- printk("IRIX interp has bad e_type %d\n", interp_elf_ex->e_type);
- return 0xffffffff;
- }
-
- /* Now read in all of the header information */
- if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
- printk("IRIX interp header bigger than a page (%d)\n",
- (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
- return 0xffffffff;
- }
-
- elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
- GFP_KERNEL);
-
- if (!elf_phdata) {
- printk("Cannot kmalloc phdata for IRIX interp.\n");
- return 0xffffffff;
- }
-
- /* If the size of this structure has changed, then punt, since
- * we will be doing the wrong thing.
- */
- if (interp_elf_ex->e_phentsize != 32) {
- printk("IRIX interp e_phentsize == %d != 32 ",
- interp_elf_ex->e_phentsize);
- kfree(elf_phdata);
- return 0xffffffff;
- }
-
- retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
- (char *) elf_phdata,
- sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
-
- dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
-
- eppnt = elf_phdata;
- for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
- if (eppnt->p_type == PT_LOAD) {
- int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
- int elf_prot = 0;
- unsigned long vaddr = 0;
- if (eppnt->p_flags & PF_R)
- elf_prot = PROT_READ;
- if (eppnt->p_flags & PF_W)
- elf_prot |= PROT_WRITE;
- if (eppnt->p_flags & PF_X)
- elf_prot |= PROT_EXEC;
- elf_type |= MAP_FIXED;
- vaddr = eppnt->p_vaddr;
-
- pr_debug("INTERP do_mmap"
- "(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
- interpreter, vaddr,
- (unsigned long)
- (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
- (unsigned long)
- elf_prot, (unsigned long) elf_type,
- (unsigned long)
- (eppnt->p_offset & 0xfffff000));
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap(interpreter, vaddr,
- eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
- elf_prot, elf_type,
- eppnt->p_offset & 0xfffff000);
- up_write(&current->mm->mmap_sem);
-
- if (error < 0 && error > -1024) {
- printk("Aieee IRIX interp mmap error=%d\n",
- error);
- break; /* Real error */
- }
- pr_debug("error=%08lx ", (unsigned long) error);
- if (!load_addr && interp_elf_ex->e_type == ET_DYN) {
- load_addr = error;
- pr_debug("load_addr = error ");
- }
-
- /*
- * Find the end of the file mapping for this phdr, and
- * keep track of the largest address we see for this.
- */
- k = eppnt->p_vaddr + eppnt->p_filesz;
- if (k > elf_bss)
- elf_bss = k;
-
- /* Do the same thing for the memory mapping - between
- * elf_bss and last_bss is the bss section.
- */
- k = eppnt->p_memsz + eppnt->p_vaddr;
- if (k > last_bss)
- last_bss = k;
- pr_debug("\n");
- }
- }
-
- /* Now use mmap to map the library into memory. */
- if (error < 0 && error > -1024) {
- pr_debug("got error %d\n", error);
- kfree(elf_phdata);
- return 0xffffffff;
- }
-
- /* Now fill out the bss section. First pad the last page up
- * to the page boundary, and then perform a mmap to make sure
- * that there are zero-mapped pages up to and including the
- * last bss page.
- */
- pr_debug("padzero(%08lx) ", (unsigned long) (elf_bss));
- padzero(elf_bss);
- len = (elf_bss + 0xfff) & 0xfffff000; /* What we have mapped so far */
-
- pr_debug("last_bss[%08lx] len[%08lx]\n", (unsigned long) last_bss,
- (unsigned long) len);
-
- /* Map the last of the bss segment */
- if (last_bss > len) {
- down_write(&current->mm->mmap_sem);
- do_brk(len, (last_bss - len));
- up_write(&current->mm->mmap_sem);
- }
- kfree(elf_phdata);
-
- *interp_load_addr = load_addr;
- return ((unsigned int) interp_elf_ex->e_entry);
-}
-
-/* Check sanity of IRIX elf executable header. */
-static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
-{
- if (memcmp(ehp->e_ident, ELFMAG, SELFMAG) != 0)
- return -ENOEXEC;
-
- /* First of all, some simple consistency checks */
- if ((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
- !bprm->file->f_op->mmap) {
- return -ENOEXEC;
- }
-
- /* XXX Don't support N32 or 64bit binaries yet because they can
- * XXX and do execute 64 bit instructions and expect all registers
- * XXX to be 64 bit as well. We need to make the kernel save
- * XXX all registers as 64bits on cpu's capable of this at
- * XXX exception time plus frob the XTLB exception vector.
- */
- if ((ehp->e_flags & EF_MIPS_ABI2))
- return -ENOEXEC;
-
- return 0;
-}
-
-/*
- * This is where the detailed check is performed. Irix binaries
- * use interpreters with 'libc.so' in the name, so this function
- * can differentiate between Linux and Irix binaries.
- */
-static inline int look_for_irix_interpreter(char **name,
- struct file **interpreter,
- struct elfhdr *interp_elf_ex,
- struct elf_phdr *epp,
- struct linux_binprm *bprm, int pnum)
-{
- int i;
- int retval = -EINVAL;
- struct file *file = NULL;
-
- *name = NULL;
- for (i = 0; i < pnum; i++, epp++) {
- if (epp->p_type != PT_INTERP)
- continue;
-
- /* It is illegal to have two interpreters for one executable. */
- if (*name != NULL)
- goto out;
-
- *name = kmalloc(epp->p_filesz + strlen(IRIX_EMUL), GFP_KERNEL);
- if (!*name)
- return -ENOMEM;
-
- strcpy(*name, IRIX_EMUL);
- retval = kernel_read(bprm->file, epp->p_offset, (*name + 16),
- epp->p_filesz);
- if (retval < 0)
- goto out;
-
- file = open_exec(*name);
- if (IS_ERR(file)) {
- retval = PTR_ERR(file);
- goto out;
- }
- retval = kernel_read(file, 0, bprm->buf, 128);
- if (retval < 0)
- goto dput_and_out;
-
- *interp_elf_ex = *(struct elfhdr *) bprm->buf;
- }
- *interpreter = file;
- return 0;
-
-dput_and_out:
- fput(file);
-out:
- kfree(*name);
- return retval;
-}
-
-static inline int verify_irix_interpreter(struct elfhdr *ihp)
-{
- if (memcmp(ihp->e_ident, ELFMAG, SELFMAG) != 0)
- return -ELIBBAD;
- return 0;
-}
-
-#define EXEC_MAP_FLAGS (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE)
-
-static inline void map_executable(struct file *fp, struct elf_phdr *epp, int pnum,
- unsigned int *estack, unsigned int *laddr,
- unsigned int *scode, unsigned int *ebss,
- unsigned int *ecode, unsigned int *edata,
- unsigned int *ebrk)
-{
- unsigned int tmp;
- int i, prot;
-
- for (i = 0; i < pnum; i++, epp++) {
- if (epp->p_type != PT_LOAD)
- continue;
-
- /* Map it. */
- prot = (epp->p_flags & PF_R) ? PROT_READ : 0;
- prot |= (epp->p_flags & PF_W) ? PROT_WRITE : 0;
- prot |= (epp->p_flags & PF_X) ? PROT_EXEC : 0;
- down_write(&current->mm->mmap_sem);
- (void) do_mmap(fp, (epp->p_vaddr & 0xfffff000),
- (epp->p_filesz + (epp->p_vaddr & 0xfff)),
- prot, EXEC_MAP_FLAGS,
- (epp->p_offset & 0xfffff000));
- up_write(&current->mm->mmap_sem);
-
- /* Fixup location tracking vars. */
- if ((epp->p_vaddr & 0xfffff000) < *estack)
- *estack = (epp->p_vaddr & 0xfffff000);
- if (!*laddr)
- *laddr = epp->p_vaddr - epp->p_offset;
- if (epp->p_vaddr < *scode)
- *scode = epp->p_vaddr;
-
- tmp = epp->p_vaddr + epp->p_filesz;
- if (tmp > *ebss)
- *ebss = tmp;
- if ((epp->p_flags & PF_X) && *ecode < tmp)
- *ecode = tmp;
- if (*edata < tmp)
- *edata = tmp;
-
- tmp = epp->p_vaddr + epp->p_memsz;
- if (tmp > *ebrk)
- *ebrk = tmp;
- }
-
-}
-
-static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
- struct file *interp, unsigned int *iladdr,
- int pnum, mm_segment_t old_fs,
- unsigned int *eentry)
-{
- int i;
-
- *eentry = 0xffffffff;
- for (i = 0; i < pnum; i++, epp++) {
- if (epp->p_type != PT_INTERP)
- continue;
-
- /* We should have fielded this error elsewhere... */
- if (*eentry != 0xffffffff)
- return -1;
-
- set_fs(old_fs);
- *eentry = load_irix_interp(ihp, interp, iladdr);
- old_fs = get_fs();
- set_fs(get_ds());
-
- fput(interp);
-
- if (*eentry == 0xffffffff)
- return -1;
- }
- return 0;
-}
-
-/*
- * IRIX maps a page at 0x200000 that holds information about the
- * process and the system, here we map the page and fill the
- * structure
- */
-static void irix_map_prda_page(void)
-{
- unsigned long v;
- struct prda *pp;
-
- down_write(&current->mm->mmap_sem);
- v = do_brk(PRDA_ADDRESS, PAGE_SIZE);
- up_write(&current->mm->mmap_sem);
-
- if (v < 0)
- return;
-
- pp = (struct prda *) v;
- pp->prda_sys.t_pid = task_pid_vnr(current);
- pp->prda_sys.t_prid = read_c0_prid();
- pp->prda_sys.t_rpid = task_pid_vnr(current);
-
- /* We leave the rest set to zero */
-}
-
-
-
-/* These are the functions used to load ELF style executables and shared
- * libraries. There is no binary dependent code anywhere else.
- */
-static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
-{
- struct elfhdr elf_ex, interp_elf_ex;
- struct file *interpreter;
- struct elf_phdr *elf_phdata, *elf_ihdr, *elf_ephdr;
- unsigned int load_addr, elf_bss, elf_brk;
- unsigned int elf_entry, interp_load_addr = 0;
- unsigned int start_code, end_code, end_data, elf_stack;
- int retval, has_interp, has_ephdr, size, i;
- char *elf_interpreter;
- mm_segment_t old_fs;
-
- load_addr = 0;
- has_interp = has_ephdr = 0;
- elf_ihdr = elf_ephdr = NULL;
- elf_ex = *((struct elfhdr *) bprm->buf);
- retval = -ENOEXEC;
-
- if (verify_binary(&elf_ex, bprm))
- goto out;
-
- /*
- * Telling -o32 static binaries from Linux and Irix apart from each
- * other is difficult. There are 2 differences to be noted for static
- * binaries from the 2 operating systems:
- *
- * 1) Irix binaries have their .text section before their .init
- * section. Linux binaries are just the opposite.
- *
- * 2) Irix binaries usually have <= 12 sections and Linux
- * binaries have > 20.
- *
- * We will use Method #2 since Method #1 would require us to read in
- * the section headers which is way too much overhead. This appears
- * to work for everything we have ran into so far. If anyone has a
- * better method to tell the binaries apart, I'm listening.
- */
- if (elf_ex.e_shnum > 20)
- goto out;
-
- print_elfhdr(&elf_ex);
-
- /* Now read in all of the header information */
- size = elf_ex.e_phentsize * elf_ex.e_phnum;
- if (size > 65536)
- goto out;
- elf_phdata = kmalloc(size, GFP_KERNEL);
- if (elf_phdata == NULL) {
- retval = -ENOMEM;
- goto out;
- }
-
- retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *)elf_phdata, size);
- if (retval < 0)
- goto out_free_ph;
-
- dump_phdrs(elf_phdata, elf_ex.e_phnum);
-
- /* Set some things for later. */
- for (i = 0; i < elf_ex.e_phnum; i++) {
- switch (elf_phdata[i].p_type) {
- case PT_INTERP:
- has_interp = 1;
- elf_ihdr = &elf_phdata[i];
- break;
- case PT_PHDR:
- has_ephdr = 1;
- elf_ephdr = &elf_phdata[i];
- break;
- };
- }
-
- pr_debug("\n");
-
- elf_bss = 0;
- elf_brk = 0;
-
- elf_stack = 0xffffffff;
- elf_interpreter = NULL;
- start_code = 0xffffffff;
- end_code = 0;
- end_data = 0;
-
- /*
- * If we get a return value, we change the value to be ENOEXEC
- * so that we can exit gracefully and the main binary format
- * search loop in 'fs/exec.c' will move onto the next handler
- * which should be the normal ELF binary handler.
- */
- retval = look_for_irix_interpreter(&elf_interpreter, &interpreter,
- &interp_elf_ex, elf_phdata, bprm,
- elf_ex.e_phnum);
- if (retval) {
- retval = -ENOEXEC;
- goto out_free_file;
- }
-
- if (elf_interpreter) {
- retval = verify_irix_interpreter(&interp_elf_ex);
- if (retval)
- goto out_free_interp;
- }
-
- /* OK, we are done with that, now set up the arg stuff,
- * and then start this sucker up.
- */
- retval = -E2BIG;
- if (!bprm->sh_bang && !bprm->p)
- goto out_free_interp;
-
- /* Flush all traces of the currently running executable */
- retval = flush_old_exec(bprm);
- if (retval)
- goto out_free_dentry;
-
- /* OK, This is the point of no return */
- current->mm->end_data = 0;
- current->mm->end_code = 0;
- current->mm->mmap = NULL;
- current->flags &= ~PF_FORKNOEXEC;
- elf_entry = (unsigned int) elf_ex.e_entry;
-
- /* Do this so that we can load the interpreter, if need be. We will
- * change some of these later.
- */
- setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
- current->mm->start_stack = bprm->p;
-
- /* At this point, we assume that the image should be loaded at
- * fixed address, not at a variable address.
- */
- old_fs = get_fs();
- set_fs(get_ds());
-
- map_executable(bprm->file, elf_phdata, elf_ex.e_phnum, &elf_stack,
- &load_addr, &start_code, &elf_bss, &end_code,
- &end_data, &elf_brk);
-
- if (elf_interpreter) {
- retval = map_interpreter(elf_phdata, &interp_elf_ex,
- interpreter, &interp_load_addr,
- elf_ex.e_phnum, old_fs, &elf_entry);
- kfree(elf_interpreter);
- if (retval) {
- set_fs(old_fs);
- printk("Unable to load IRIX ELF interpreter\n");
- send_sig(SIGSEGV, current, 0);
- retval = 0;
- goto out_free_file;
- }
- }
-
- set_fs(old_fs);
-
- kfree(elf_phdata);
- set_personality(PER_IRIX32);
- set_binfmt(&irix_format);
- compute_creds(bprm);
- current->flags &= ~PF_FORKNOEXEC;
- bprm->p = (unsigned long)
- create_irix_tables((char *)bprm->p, bprm->argc, bprm->envc,
- (elf_interpreter ? &elf_ex : NULL),
- load_addr, interp_load_addr, regs, elf_ephdr);
- current->mm->start_brk = current->mm->brk = elf_brk;
- current->mm->end_code = end_code;
- current->mm->start_code = start_code;
- current->mm->end_data = end_data;
- current->mm->start_stack = bprm->p;
-
- /* Calling set_brk effectively mmaps the pages that we need for the
- * bss and break sections.
- */
- set_brk(elf_bss, elf_brk);
-
- /*
- * IRIX maps a page at 0x200000 which holds some system
- * information. Programs depend on this.
- */
- irix_map_prda_page();
-
- padzero(elf_bss);
-
- pr_debug("(start_brk) %lx\n" , (long) current->mm->start_brk);
- pr_debug("(end_code) %lx\n" , (long) current->mm->end_code);
- pr_debug("(start_code) %lx\n" , (long) current->mm->start_code);
- pr_debug("(end_data) %lx\n" , (long) current->mm->end_data);
- pr_debug("(start_stack) %lx\n" , (long) current->mm->start_stack);
- pr_debug("(brk) %lx\n" , (long) current->mm->brk);
-
-#if 0 /* XXX No fucking way dude... */
- /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
- * and some applications "depend" upon this behavior.
- * Since we do not have the power to recompile these, we
- * emulate the SVr4 behavior. Sigh.
- */
- down_write(&current->mm->mmap_sem);
- (void) do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE, 0);
- up_write(&current->mm->mmap_sem);
-#endif
-
- start_thread(regs, elf_entry, bprm->p);
- if (current->ptrace & PT_PTRACED)
- send_sig(SIGTRAP, current, 0);
- return 0;
-out:
- return retval;
-
-out_free_dentry:
- allow_write_access(interpreter);
- fput(interpreter);
-out_free_interp:
- kfree(elf_interpreter);
-out_free_file:
-out_free_ph:
- kfree(elf_phdata);
- goto out;
-}
-
-/* This is really simpleminded and specialized - we are loading an
- * a.out library that is given an ELF header.
- */
-static int load_irix_library(struct file *file)
-{
- struct elfhdr elf_ex;
- struct elf_phdr *elf_phdata = NULL;
- unsigned int len = 0;
- int elf_bss = 0;
- int retval;
- unsigned int bss;
- int error;
- int i, j, k;
-
- error = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
- if (error != sizeof(elf_ex))
- return -ENOEXEC;
-
- if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
- return -ENOEXEC;
-
- /* First of all, some simple consistency checks. */
- if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
- !file->f_op->mmap)
- return -ENOEXEC;
-
- /* Now read in all of the header information. */
- if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
- return -ENOEXEC;
-
- elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
- if (elf_phdata == NULL)
- return -ENOMEM;
-
- retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata,
- sizeof(struct elf_phdr) * elf_ex.e_phnum);
-
- j = 0;
- for (i=0; i<elf_ex.e_phnum; i++)
- if ((elf_phdata + i)->p_type == PT_LOAD) j++;
-
- if (j != 1) {
- kfree(elf_phdata);
- return -ENOEXEC;
- }
-
- while (elf_phdata->p_type != PT_LOAD) elf_phdata++;
-
- /* Now use mmap to map the library into memory. */
- down_write(&current->mm->mmap_sem);
- error = do_mmap(file,
- elf_phdata->p_vaddr & 0xfffff000,
- elf_phdata->p_filesz + (elf_phdata->p_vaddr & 0xfff),
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
- elf_phdata->p_offset & 0xfffff000);
- up_write(&current->mm->mmap_sem);
-
- k = elf_phdata->p_vaddr + elf_phdata->p_filesz;
- if (k > elf_bss) elf_bss = k;
-
- if (error != (elf_phdata->p_vaddr & 0xfffff000)) {
- kfree(elf_phdata);
- return error;
- }
-
- padzero(elf_bss);
-
- len = (elf_phdata->p_filesz + elf_phdata->p_vaddr+ 0xfff) & 0xfffff000;
- bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
- if (bss > len) {
- down_write(&current->mm->mmap_sem);
- do_brk(len, bss-len);
- up_write(&current->mm->mmap_sem);
- }
- kfree(elf_phdata);
- return 0;
-}
-
-/* Called through irix_syssgi() to map an elf image given an FD,
- * a phdr ptr USER_PHDRP in userspace, and a count CNT telling how many
- * phdrs there are in the USER_PHDRP array. We return the vaddr the
- * first phdr was successfully mapped to.
- */
-unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
-{
- unsigned long type, vaddr, filesz, offset, flags;
- struct elf_phdr __user *hp;
- struct file *filp;
- int i, retval;
-
- pr_debug("irix_mapelf: fd[%d] user_phdrp[%p] cnt[%d]\n",
- fd, user_phdrp, cnt);
-
- /* First get the verification out of the way. */
- hp = user_phdrp;
- if (!access_ok(VERIFY_READ, hp, (sizeof(struct elf_phdr) * cnt))) {
- pr_debug("irix_mapelf: bad pointer to ELF PHDR!\n");
-
- return -EFAULT;
- }
-
- dump_phdrs(user_phdrp, cnt);
-
- for (i = 0; i < cnt; i++, hp++) {
- if (__get_user(type, &hp->p_type))
- return -EFAULT;
- if (type != PT_LOAD) {
- printk("irix_mapelf: One section is not PT_LOAD!\n");
- return -ENOEXEC;
- }
- }
-
- filp = fget(fd);
- if (!filp)
- return -EACCES;
- if (!filp->f_op) {
- printk("irix_mapelf: Bogon filp!\n");
- fput(filp);
- return -EACCES;
- }
-
- hp = user_phdrp;
- for (i = 0; i < cnt; i++, hp++) {
- int prot;
-
- retval = __get_user(vaddr, &hp->p_vaddr);
- retval |= __get_user(filesz, &hp->p_filesz);
- retval |= __get_user(offset, &hp->p_offset);
- retval |= __get_user(flags, &hp->p_flags);
- if (retval)
- return retval;
-
- prot = (flags & PF_R) ? PROT_READ : 0;
- prot |= (flags & PF_W) ? PROT_WRITE : 0;
- prot |= (flags & PF_X) ? PROT_EXEC : 0;
-
- down_write(&current->mm->mmap_sem);
- retval = do_mmap(filp, (vaddr & 0xfffff000),
- (filesz + (vaddr & 0xfff)),
- prot, (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
- (offset & 0xfffff000));
- up_write(&current->mm->mmap_sem);
-
- if (retval != (vaddr & 0xfffff000)) {
- printk("irix_mapelf: do_mmap fails with %d!\n", retval);
- fput(filp);
- return retval;
- }
- }
-
- pr_debug("irix_mapelf: Success, returning %08lx\n",
- (unsigned long) user_phdrp->p_vaddr);
-
- fput(filp);
-
- if (__get_user(vaddr, &user_phdrp->p_vaddr))
- return -EFAULT;
-
- return vaddr;
-}
-
-/*
- * ELF core dumper
- *
- * Modelled on fs/exec.c:aout_core_dump()
- * Jeremy Fitzhardinge <jeremy@sw.oz.au>
- */
-
-/* These are the only things you should do on a core-file: use only these
- * functions to write out all the necessary info.
- */
-static int dump_write(struct file *file, const void __user *addr, int nr)
-{
- return file->f_op->write(file, (const char __user *) addr, nr, &file->f_pos) == nr;
-}
-
-static int dump_seek(struct file *file, off_t off)
-{
- if (file->f_op->llseek) {
- if (file->f_op->llseek(file, off, 0) != off)
- return 0;
- } else
- file->f_pos = off;
- return 1;
-}
-
-/* Decide whether a segment is worth dumping; default is yes to be
- * sure (missing info is worse than too much; etc).
- * Personally I'd include everything, and use the coredump limit...
- *
- * I think we should skip something. But I am not sure how. H.J.
- */
-static inline int maydump(struct vm_area_struct *vma)
-{
- if (!(vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)))
- return 0;
-#if 1
- if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
- return 1;
- if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
- return 0;
-#endif
- return 1;
-}
-
-/* An ELF note in memory. */
-struct memelfnote
-{
- const char *name;
- int type;
- unsigned int datasz;
- void *data;
-};
-
-static int notesize(struct memelfnote *en)
-{
- int sz;
-
- sz = sizeof(struct elf_note);
- sz += roundup(strlen(en->name) + 1, 4);
- sz += roundup(en->datasz, 4);
-
- return sz;
-}
-
-#define DUMP_WRITE(addr, nr) \
- if (!dump_write(file, (addr), (nr))) \
- goto end_coredump;
-#define DUMP_SEEK(off) \
- if (!dump_seek(file, (off))) \
- goto end_coredump;
-
-static int writenote(struct memelfnote *men, struct file *file)
-{
- struct elf_note en;
-
- en.n_namesz = strlen(men->name) + 1;
- en.n_descsz = men->datasz;
- en.n_type = men->type;
-
- DUMP_WRITE(&en, sizeof(en));
- DUMP_WRITE(men->name, en.n_namesz);
- /* XXX - cast from long long to long to avoid need for libgcc.a */
- DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
- DUMP_WRITE(men->data, men->datasz);
- DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
-
- return 1;
-
-end_coredump:
- return 0;
-}
-#undef DUMP_WRITE
-#undef DUMP_SEEK
-
-#define DUMP_WRITE(addr, nr) \
- if (!dump_write(file, (addr), (nr))) \
- goto end_coredump;
-#define DUMP_SEEK(off) \
- if (!dump_seek(file, (off))) \
- goto end_coredump;
-
-/* Actual dumper.
- *
- * This is a two-pass process; first we find the offsets of the bits,
- * and then they are actually written out. If we run out of core limit
- * we just truncate.
- */
-static int irix_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
-{
- int has_dumped = 0;
- mm_segment_t fs;
- int segs;
- int i;
- size_t size;
- struct vm_area_struct *vma;
- struct elfhdr elf;
- off_t offset = 0, dataoff;
- int numnote = 3;
- struct memelfnote notes[3];
- struct elf_prstatus prstatus; /* NT_PRSTATUS */
- elf_fpregset_t fpu; /* NT_PRFPREG */
- struct elf_prpsinfo psinfo; /* NT_PRPSINFO */
-
- /* Count what's needed to dump, up to the limit of coredump size. */
- segs = 0;
- size = 0;
- for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
- if (maydump(vma))
- {
- int sz = vma->vm_end-vma->vm_start;
-
- if (size+sz >= limit)
- break;
- else
- size += sz;
- }
-
- segs++;
- }
- pr_debug("irix_core_dump: %d segs taking %d bytes\n", segs, size);
-
- /* Set up header. */
- memcpy(elf.e_ident, ELFMAG, SELFMAG);
- elf.e_ident[EI_CLASS] = ELFCLASS32;
- elf.e_ident[EI_DATA] = ELFDATA2LSB;
- elf.e_ident[EI_VERSION] = EV_CURRENT;
- elf.e_ident[EI_OSABI] = ELF_OSABI;
- memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
-
- elf.e_type = ET_CORE;
- elf.e_machine = ELF_ARCH;
- elf.e_version = EV_CURRENT;
- elf.e_entry = 0;
- elf.e_phoff = sizeof(elf);
- elf.e_shoff = 0;
- elf.e_flags = 0;
- elf.e_ehsize = sizeof(elf);
- elf.e_phentsize = sizeof(struct elf_phdr);
- elf.e_phnum = segs+1; /* Include notes. */
- elf.e_shentsize = 0;
- elf.e_shnum = 0;
- elf.e_shstrndx = 0;
-
- fs = get_fs();
- set_fs(KERNEL_DS);
-
- has_dumped = 1;
- current->flags |= PF_DUMPCORE;
-
- DUMP_WRITE(&elf, sizeof(elf));
- offset += sizeof(elf); /* Elf header. */
- offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers. */
-
- /* Set up the notes in similar form to SVR4 core dumps made
- * with info from their /proc.
- */
- memset(&psinfo, 0, sizeof(psinfo));
- memset(&prstatus, 0, sizeof(prstatus));
-
- notes[0].name = "CORE";
- notes[0].type = NT_PRSTATUS;
- notes[0].datasz = sizeof(prstatus);
- notes[0].data = &prstatus;
- prstatus.pr_info.si_signo = prstatus.pr_cursig = signr;
- prstatus.pr_sigpend = current->pending.signal.sig[0];
- prstatus.pr_sighold = current->blocked.sig[0];
- psinfo.pr_pid = prstatus.pr_pid = task_pid_vnr(current);
- psinfo.pr_ppid = prstatus.pr_ppid = task_pid_vnr(current->parent);
- psinfo.pr_pgrp = prstatus.pr_pgrp = task_pgrp_vnr(current);
- psinfo.pr_sid = prstatus.pr_sid = task_session_vnr(current);
- if (thread_group_leader(current)) {
- /*
- * This is the record for the group leader. Add in the
- * cumulative times of previous dead threads. This total
- * won't include the time of each live thread whose state
- * is included in the core dump. The final total reported
- * to our parent process when it calls wait4 will include
- * those sums as well as the little bit more time it takes
- * this and each other thread to finish dying after the
- * core dump synchronization phase.
- */
- jiffies_to_timeval(current->utime + current->signal->utime,
- &prstatus.pr_utime);
- jiffies_to_timeval(current->stime + current->signal->stime,
- &prstatus.pr_stime);
- } else {
- jiffies_to_timeval(current->utime, &prstatus.pr_utime);
- jiffies_to_timeval(current->stime, &prstatus.pr_stime);
- }
- jiffies_to_timeval(current->signal->cutime, &prstatus.pr_cutime);
- jiffies_to_timeval(current->signal->cstime, &prstatus.pr_cstime);
-
- if (sizeof(elf_gregset_t) != sizeof(struct pt_regs)) {
- printk("sizeof(elf_gregset_t) (%d) != sizeof(struct pt_regs) "
- "(%d)\n", sizeof(elf_gregset_t), sizeof(struct pt_regs));
- } else {
- *(struct pt_regs *)&prstatus.pr_reg = *regs;
- }
-
- notes[1].name = "CORE";
- notes[1].type = NT_PRPSINFO;
- notes[1].datasz = sizeof(psinfo);
- notes[1].data = &psinfo;
- i = current->state ? ffz(~current->state) + 1 : 0;
- psinfo.pr_state = i;
- psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
- psinfo.pr_zomb = psinfo.pr_sname == 'Z';
- psinfo.pr_nice = task_nice(current);
- psinfo.pr_flag = current->flags;
- psinfo.pr_uid = current->uid;
- psinfo.pr_gid = current->gid;
- {
- int i, len;
-
- set_fs(fs);
-
- len = current->mm->arg_end - current->mm->arg_start;
- len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len;
- (void *) copy_from_user(&psinfo.pr_psargs,
- (const char __user *)current->mm->arg_start, len);
- for (i = 0; i < len; i++)
- if (psinfo.pr_psargs[i] == 0)
- psinfo.pr_psargs[i] = ' ';
- psinfo.pr_psargs[len] = 0;
-
- set_fs(KERNEL_DS);
- }
- strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
-
- /* Try to dump the FPU. */
- prstatus.pr_fpvalid = dump_fpu(regs, &fpu);
- if (!prstatus.pr_fpvalid) {
- numnote--;
- } else {
- notes[2].name = "CORE";
- notes[2].type = NT_PRFPREG;
- notes[2].datasz = sizeof(fpu);
- notes[2].data = &fpu;
- }
-
- /* Write notes phdr entry. */
- {
- struct elf_phdr phdr;
- int sz = 0;
-
- for (i = 0; i < numnote; i++)
- sz += notesize(&notes[i]);
-
- phdr.p_type = PT_NOTE;
- phdr.p_offset = offset;
- phdr.p_vaddr = 0;
- phdr.p_paddr = 0;
- phdr.p_filesz = sz;
- phdr.p_memsz = 0;
- phdr.p_flags = 0;
- phdr.p_align = 0;
-
- offset += phdr.p_filesz;
- DUMP_WRITE(&phdr, sizeof(phdr));
- }
-
- /* Page-align dumped data. */
- dataoff = offset = roundup(offset, PAGE_SIZE);
-
- /* Write program headers for segments dump. */
- for (vma = current->mm->mmap, i = 0;
- i < segs && vma != NULL; vma = vma->vm_next) {
- struct elf_phdr phdr;
- size_t sz;
-
- i++;
-
- sz = vma->vm_end - vma->vm_start;
-
- phdr.p_type = PT_LOAD;
- phdr.p_offset = offset;
- phdr.p_vaddr = vma->vm_start;
- phdr.p_paddr = 0;
- phdr.p_filesz = maydump(vma) ? sz : 0;
- phdr.p_memsz = sz;
- offset += phdr.p_filesz;
- phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
- if (vma->vm_flags & VM_WRITE)
- phdr.p_flags |= PF_W;
- if (vma->vm_flags & VM_EXEC)
- phdr.p_flags |= PF_X;
- phdr.p_align = PAGE_SIZE;
-
- DUMP_WRITE(&phdr, sizeof(phdr));
- }
-
- for (i = 0; i < numnote; i++)
- if (!writenote(&notes[i], file))
- goto end_coredump;
-
- set_fs(fs);
-
- DUMP_SEEK(dataoff);
-
- for (i = 0, vma = current->mm->mmap;
- i < segs && vma != NULL;
- vma = vma->vm_next) {
- unsigned long addr = vma->vm_start;
- unsigned long len = vma->vm_end - vma->vm_start;
-
- if (!maydump(vma))
- continue;
- i++;
- pr_debug("elf_core_dump: writing %08lx %lx\n", addr, len);
- DUMP_WRITE((void __user *)addr, len);
- }
-
- if ((off_t) file->f_pos != offset) {
- /* Sanity check. */
- printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
- (off_t) file->f_pos, offset);
- }
-
-end_coredump:
- set_fs(fs);
- return has_dumped;
-}
-
-static int __init init_irix_binfmt(void)
-{
- extern int init_inventory(void);
- extern asmlinkage unsigned long sys_call_table;
- extern asmlinkage unsigned long sys_call_table_irix5;
-
- init_inventory();
-
- /*
- * Copy the IRIX5 syscall table (8000 bytes) into the main syscall
- * table. The IRIX5 calls are located by an offset of 8000 bytes
- * from the beginning of the main table.
- */
- memcpy((void *) ((unsigned long) &sys_call_table + 8000),
- &sys_call_table_irix5, 8000);
-
- return register_binfmt(&irix_format);
-}
-
-static void __exit exit_irix_binfmt(void)
-{
- /*
- * Remove the Irix ELF loader.
- */
- unregister_binfmt(&irix_format);
-}
-
-module_init(init_irix_binfmt)
-module_exit(exit_irix_binfmt)
diff --git a/arch/mips/kernel/irixinv.c b/arch/mips/kernel/irixinv.c
deleted file mode 100644
index cf2dcd3d6a9..00000000000
--- a/arch/mips/kernel/irixinv.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Support the inventory interface for IRIX binaries
- * This is invoked before the mm layer is working, so we do not
- * use the linked lists for the inventory yet.
- *
- * Miguel de Icaza, 1997.
- */
-#include <linux/mm.h>
-#include <asm/inventory.h>
-#include <asm/uaccess.h>
-
-#define MAX_INVENTORY 50
-int inventory_items = 0;
-
-static inventory_t inventory [MAX_INVENTORY];
-
-void add_to_inventory(int class, int type, int controller, int unit, int state)
-{
- inventory_t *ni = &inventory [inventory_items];
-
- if (inventory_items == MAX_INVENTORY)
- return;
-
- ni->inv_class = class;
- ni->inv_type = type;
- ni->inv_controller = controller;
- ni->inv_unit = unit;
- ni->inv_state = state;
- ni->inv_next = ni;
- inventory_items++;
-}
-
-int dump_inventory_to_user(void __user *userbuf, int size)
-{
- inventory_t *inv = &inventory [0];
- inventory_t __user *user = userbuf;
- int v;
-
- if (!access_ok(VERIFY_WRITE, userbuf, size))
- return -EFAULT;
-
- for (v = 0; v < inventory_items; v++){
- inv = &inventory [v];
- if (copy_to_user (user, inv, sizeof (inventory_t)))
- return -EFAULT;
- user++;
- }
- return inventory_items * sizeof(inventory_t);
-}
-
-int __init init_inventory(void)
-{
- /*
- * gross hack while we put the right bits all over the kernel
- * most likely this will not let just anyone run the X server
- * until we put the right values all over the place
- */
- add_to_inventory(10, 3, 0, 0, 16400);
- add_to_inventory(1, 1, 150, -1, 12);
- add_to_inventory(1, 3, 0, 0, 8976);
- add_to_inventory(1, 2, 0, 0, 8976);
- add_to_inventory(4, 8, 0, 0, 2);
- add_to_inventory(5, 5, 0, 0, 1);
- add_to_inventory(3, 3, 0, 0, 32768);
- add_to_inventory(3, 4, 0, 0, 32768);
- add_to_inventory(3, 8, 0, 0, 524288);
- add_to_inventory(3, 9, 0, 0, 64);
- add_to_inventory(3, 1, 0, 0, 67108864);
- add_to_inventory(12, 3, 0, 0, 16);
- add_to_inventory(8, 7, 17, 0, 16777472);
- add_to_inventory(8, 0, 0, 0, 1);
- add_to_inventory(2, 1, 0, 13, 2);
- add_to_inventory(2, 2, 0, 2, 0);
- add_to_inventory(2, 2, 0, 1, 0);
- add_to_inventory(7, 14, 0, 0, 6);
-
- return 0;
-}
diff --git a/arch/mips/kernel/irixioctl.c b/arch/mips/kernel/irixioctl.c
deleted file mode 100644
index 2bde200d5ad..00000000000
--- a/arch/mips/kernel/irixioctl.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * irixioctl.c: A fucking mess...
- *
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/sockios.h>
-#include <linux/syscalls.h>
-#include <linux/tty.h>
-#include <linux/file.h>
-#include <linux/rcupdate.h>
-
-#include <asm/uaccess.h>
-#include <asm/ioctl.h>
-#include <asm/ioctls.h>
-
-#undef DEBUG_IOCTLS
-#undef DEBUG_MISSING_IOCTL
-
-struct irix_termios {
- tcflag_t c_iflag, c_oflag, c_cflag, c_lflag;
- cc_t c_cc[NCCS];
-};
-
-extern void start_tty(struct tty_struct *tty);
-static struct tty_struct *get_tty(int fd)
-{
- struct file *filp;
- struct tty_struct *ttyp = NULL;
-
- rcu_read_lock();
- filp = fcheck(fd);
- if(filp && filp->private_data) {
- ttyp = (struct tty_struct *) filp->private_data;
-
- if(ttyp->magic != TTY_MAGIC)
- ttyp =NULL;
- }
- rcu_read_unlock();
- return ttyp;
-}
-
-static struct tty_struct *get_real_tty(struct tty_struct *tp)
-{
- if (tp->driver->type == TTY_DRIVER_TYPE_PTY &&
- tp->driver->subtype == PTY_TYPE_MASTER)
- return tp->link;
- else
- return tp;
-}
-
-asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
-{
- struct tty_struct *tp, *rtp;
- mm_segment_t old_fs;
- int i, error = 0;
-
-#ifdef DEBUG_IOCTLS
- printk("[%s:%d] irix_ioctl(%d, ", current->comm, current->pid, fd);
-#endif
- switch(cmd) {
- case 0x00005401:
-#ifdef DEBUG_IOCTLS
- printk("TCGETA, %08lx) ", arg);
-#endif
- error = sys_ioctl(fd, TCGETA, arg);
- break;
-
- case 0x0000540d: {
- struct termios kt;
- struct irix_termios __user *it =
- (struct irix_termios __user *) arg;
-
-#ifdef DEBUG_IOCTLS
- printk("TCGETS, %08lx) ", arg);
-#endif
- if (!access_ok(VERIFY_WRITE, it, sizeof(*it))) {
- error = -EFAULT;
- break;
- }
- old_fs = get_fs(); set_fs(get_ds());
- error = sys_ioctl(fd, TCGETS, (unsigned long) &kt);
- set_fs(old_fs);
- if (error)
- break;
-
- error = __put_user(kt.c_iflag, &it->c_iflag);
- error |= __put_user(kt.c_oflag, &it->c_oflag);
- error |= __put_user(kt.c_cflag, &it->c_cflag);
- error |= __put_user(kt.c_lflag, &it->c_lflag);
-
- for (i = 0; i < NCCS; i++)
- error |= __put_user(kt.c_cc[i], &it->c_cc[i]);
- break;
- }
-
- case 0x0000540e: {
- struct termios kt;
- struct irix_termios *it = (struct irix_termios *) arg;
-
-#ifdef DEBUG_IOCTLS
- printk("TCSETS, %08lx) ", arg);
-#endif
- if (!access_ok(VERIFY_READ, it, sizeof(*it))) {
- error = -EFAULT;
- break;
- }
- old_fs = get_fs(); set_fs(get_ds());
- error = sys_ioctl(fd, TCGETS, (unsigned long) &kt);
- set_fs(old_fs);
- if (error)
- break;
-
- error = __get_user(kt.c_iflag, &it->c_iflag);
- error |= __get_user(kt.c_oflag, &it->c_oflag);
- error |= __get_user(kt.c_cflag, &it->c_cflag);
- error |= __get_user(kt.c_lflag, &it->c_lflag);
-
- for (i = 0; i < NCCS; i++)
- error |= __get_user(kt.c_cc[i], &it->c_cc[i]);
-
- if (error)
- break;
- old_fs = get_fs(); set_fs(get_ds());
- error = sys_ioctl(fd, TCSETS, (unsigned long) &kt);
- set_fs(old_fs);
- break;
- }
-
- case 0x0000540f:
-#ifdef DEBUG_IOCTLS
- printk("TCSETSW, %08lx) ", arg);
-#endif
- error = sys_ioctl(fd, TCSETSW, arg);
- break;
-
- case 0x00005471:
-#ifdef DEBUG_IOCTLS
- printk("TIOCNOTTY, %08lx) ", arg);
-#endif
- error = sys_ioctl(fd, TIOCNOTTY, arg);
- break;
-
- case 0x00007416:
-#ifdef DEBUG_IOCTLS
- printk("TIOCGSID, %08lx) ", arg);
-#endif
- tp = get_tty(fd);
- if(!tp) {
- error = -EINVAL;
- break;
- }
- rtp = get_real_tty(tp);
-#ifdef DEBUG_IOCTLS
- printk("rtp->session=%d ", rtp->session);
-#endif
- error = put_user(rtp->session, (unsigned long __user *) arg);
- break;
-
- case 0x746e:
- /* TIOCSTART, same effect as hitting ^Q */
-#ifdef DEBUG_IOCTLS
- printk("TIOCSTART, %08lx) ", arg);
-#endif
- tp = get_tty(fd);
- if(!tp) {
- error = -EINVAL;
- break;
- }
- rtp = get_real_tty(tp);
- start_tty(rtp);
- break;
-
- case 0x20006968:
-#ifdef DEBUG_IOCTLS
- printk("SIOCGETLABEL, %08lx) ", arg);
-#endif
- error = -ENOPKG;
- break;
-
- case 0x40047477:
-#ifdef DEBUG_IOCTLS
- printk("TIOCGPGRP, %08lx) ", arg);
-#endif
- error = sys_ioctl(fd, TIOCGPGRP, arg);
-#ifdef DEBUG_IOCTLS
- printk("arg=%d ", *(int *)arg);
-#endif
- break;
-
- case 0x40087468:
-#ifdef DEBUG_IOCTLS
- printk("TIOCGWINSZ, %08lx) ", arg);
-#endif
- error = sys_ioctl(fd, TIOCGWINSZ, arg);
- break;
-
- case 0x8004667e:
- error = sys_ioctl(fd, FIONBIO, arg);
- break;
-
- case 0x80047476:
- error = sys_ioctl(fd, TIOCSPGRP, arg);
- break;
-
- case 0x8020690c:
- error = sys_ioctl(fd, SIOCSIFADDR, arg);
- break;
-
- case 0x80206910:
- error = sys_ioctl(fd, SIOCSIFFLAGS, arg);
- break;
-
- case 0xc0206911:
- error = sys_ioctl(fd, SIOCGIFFLAGS, arg);
- break;
-
- case 0xc020691b:
- error = sys_ioctl(fd, SIOCGIFMETRIC, arg);
- break;
-
- default: {
-#ifdef DEBUG_MISSING_IOCTL
- char *msg = "Unimplemented IOCTL cmd tell linux-mips@linux-mips.org\n";
-
-#ifdef DEBUG_IOCTLS
- printk("UNIMP_IOCTL, %08lx)\n", arg);
-#endif
- old_fs = get_fs(); set_fs(get_ds());
- sys_write(2, msg, strlen(msg));
- set_fs(old_fs);
- printk("[%s:%d] Does unimplemented IRIX ioctl cmd %08lx\n",
- current->comm, current->pid, cmd);
- do_exit(255);
-#else
- error = sys_ioctl(fd, cmd, arg);
-#endif
- }
-
- };
-#ifdef DEBUG_IOCTLS
- printk("error=%d\n", error);
-#endif
- return error;
-}
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
deleted file mode 100644
index 0215c805a59..00000000000
--- a/arch/mips/kernel/irixsig.c
+++ /dev/null
@@ -1,888 +0,0 @@
-/*
- * irixsig.c: WHEEE, IRIX signals! YOW, am I compatible or what?!?!
- *
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
- * Copyright (C) 1997 - 2000 Ralf Baechle (ralf@gnu.org)
- * Copyright (C) 2000 Silicon Graphics, Inc.
- */
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/smp.h>
-#include <linux/time.h>
-#include <linux/ptrace.h>
-#include <linux/resource.h>
-
-#include <asm/ptrace.h>
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
-
-#undef DEBUG_SIG
-
-#define _S(nr) (1<<((nr)-1))
-
-#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
-
-#define _IRIX_NSIG 128
-#define _IRIX_NSIG_BPW BITS_PER_LONG
-#define _IRIX_NSIG_WORDS (_IRIX_NSIG / _IRIX_NSIG_BPW)
-
-typedef struct {
- unsigned long sig[_IRIX_NSIG_WORDS];
-} irix_sigset_t;
-
-struct sigctx_irix5 {
- u32 rmask, cp0_status;
- u64 pc;
- u64 regs[32];
- u64 fpregs[32];
- u32 usedfp, fpcsr, fpeir, sstk_flags;
- u64 hi, lo;
- u64 cp0_cause, cp0_badvaddr, _unused0;
- irix_sigset_t sigset;
- u64 weird_fpu_thing;
- u64 _unused1[31];
-};
-
-#ifdef DEBUG_SIG
-/* Debugging */
-static inline void dump_irix5_sigctx(struct sigctx_irix5 *c)
-{
- int i;
-
- printk("misc: rmask[%08lx] status[%08lx] pc[%08lx]\n",
- (unsigned long) c->rmask,
- (unsigned long) c->cp0_status,
- (unsigned long) c->pc);
- printk("regs: ");
- for(i = 0; i < 16; i++)
- printk("[%d]<%08lx> ", i, (unsigned long) c->regs[i]);
- printk("\nregs: ");
- for(i = 16; i < 32; i++)
- printk("[%d]<%08lx> ", i, (unsigned long) c->regs[i]);
- printk("\nfpregs: ");
- for(i = 0; i < 16; i++)
- printk("[%d]<%08lx> ", i, (unsigned long) c->fpregs[i]);
- printk("\nfpregs: ");
- for(i = 16; i < 32; i++)
- printk("[%d]<%08lx> ", i, (unsigned long) c->fpregs[i]);
- printk("misc: usedfp[%d] fpcsr[%08lx] fpeir[%08lx] stk_flgs[%08lx]\n",
- (int) c->usedfp, (unsigned long) c->fpcsr,
- (unsigned long) c->fpeir, (unsigned long) c->sstk_flags);
- printk("misc: hi[%08lx] lo[%08lx] cause[%08lx] badvaddr[%08lx]\n",
- (unsigned long) c->hi, (unsigned long) c->lo,
- (unsigned long) c->cp0_cause, (unsigned long) c->cp0_badvaddr);
- printk("misc: sigset<0>[%08lx] sigset<1>[%08lx] sigset<2>[%08lx] "
- "sigset<3>[%08lx]\n", (unsigned long) c->sigset.sig[0],
- (unsigned long) c->sigset.sig[1],
- (unsigned long) c->sigset.sig[2],
- (unsigned long) c->sigset.sig[3]);
-}
-#endif
-
-static int setup_irix_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signr, sigset_t *oldmask)
-{
- struct sigctx_irix5 __user *ctx;
- unsigned long sp;
- int error, i;
-
- sp = regs->regs[29];
- sp -= sizeof(struct sigctx_irix5);
- sp &= ~(0xf);
- ctx = (struct sigctx_irix5 __user *) sp;
- if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)))
- goto segv_and_exit;
-
- error = __put_user(0, &ctx->weird_fpu_thing);
- error |= __put_user(~(0x00000001), &ctx->rmask);
- error |= __put_user(0, &ctx->regs[0]);
- for(i = 1; i < 32; i++)
- error |= __put_user((u64) regs->regs[i], &ctx->regs[i]);
-
- error |= __put_user((u64) regs->hi, &ctx->hi);
- error |= __put_user((u64) regs->lo, &ctx->lo);
- error |= __put_user((u64) regs->cp0_epc, &ctx->pc);
- error |= __put_user(!!used_math(), &ctx->usedfp);
- error |= __put_user((u64) regs->cp0_cause, &ctx->cp0_cause);
- error |= __put_user((u64) regs->cp0_badvaddr, &ctx->cp0_badvaddr);
-
- error |= __put_user(0, &ctx->sstk_flags); /* XXX sigstack unimp... todo... */
-
- error |= __copy_to_user(&ctx->sigset, oldmask, sizeof(irix_sigset_t)) ? -EFAULT : 0;
-
- if (error)
- goto segv_and_exit;
-
-#ifdef DEBUG_SIG
- dump_irix5_sigctx(ctx);
-#endif
-
- regs->regs[4] = (unsigned long) signr;
- regs->regs[5] = 0; /* XXX sigcode XXX */
- regs->regs[6] = regs->regs[29] = sp;
- regs->regs[7] = (unsigned long) ka->sa.sa_handler;
- regs->regs[25] = regs->cp0_epc = (unsigned long) ka->sa_restorer;
-
- return 1;
-
-segv_and_exit:
- force_sigsegv(signr, current);
- return 0;
-}
-
-static int inline
-setup_irix_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
- int signr, sigset_t *oldmask, siginfo_t *info)
-{
- printk("Aiee: setup_tr_frame wants to be written");
- do_exit(SIGSEGV);
-}
-
-static inline int handle_signal(unsigned long sig, siginfo_t *info,
- struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs)
-{
- int ret;
-
- switch(regs->regs[0]) {
- case ERESTARTNOHAND:
- regs->regs[2] = EINTR;
- break;
- case ERESTARTSYS:
- if(!(ka->sa.sa_flags & SA_RESTART)) {
- regs->regs[2] = EINTR;
- break;
- }
- /* fallthrough */
- case ERESTARTNOINTR: /* Userland will reload $v0. */
- regs->cp0_epc -= 8;
- }
-
- regs->regs[0] = 0; /* Don't deal with this again. */
-
- if (ka->sa.sa_flags & SA_SIGINFO)
- ret = setup_irix_rt_frame(ka, regs, sig, oldset, info);
- else
- ret = setup_irix_frame(ka, regs, sig, oldset);
-
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked, sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- return ret;
-}
-
-void do_irix_signal(struct pt_regs *regs)
-{
- struct k_sigaction ka;
- siginfo_t info;
- int signr;
- sigset_t *oldset;
-
- /*
- * We want the common case to go fast, which is why we may in certain
- * cases get here from kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return;
-
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- oldset = &current->saved_sigmask;
- else
- oldset = &current->blocked;
-
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
- /* Whee! Actually deliver the signal. */
- if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
- /* a signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- }
-
- return;
- }
-
- /*
- * Who's code doesn't conform to the restartable syscall convention
- * dies here!!! The li instruction, a single machine instruction,
- * must directly be followed by the syscall instruction.
- */
- if (regs->regs[0]) {
- if (regs->regs[2] == ERESTARTNOHAND ||
- regs->regs[2] == ERESTARTSYS ||
- regs->regs[2] == ERESTARTNOINTR) {
- regs->cp0_epc -= 8;
- }
- if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
- regs->regs[2] = __NR_restart_syscall;
- regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 4;
- }
- regs->regs[0] = 0; /* Don't deal with this again. */
- }
-
- /*
- * If there's no signal to deliver, we just put the saved sigmask
- * back
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
- }
-}
-
-asmlinkage void
-irix_sigreturn(struct pt_regs *regs)
-{
- struct sigctx_irix5 __user *context, *magic;
- unsigned long umask, mask;
- u64 *fregs;
- u32 usedfp;
- int error, sig, i, base = 0;
- sigset_t blocked;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
- if (regs->regs[2] == 1000)
- base = 1;
-
- context = (struct sigctx_irix5 __user *) regs->regs[base + 4];
- magic = (struct sigctx_irix5 __user *) regs->regs[base + 5];
- sig = (int) regs->regs[base + 6];
-#ifdef DEBUG_SIG
- printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n",
- current->comm, current->pid, context, magic, sig);
-#endif
- if (!context)
- context = magic;
- if (!access_ok(VERIFY_READ, context, sizeof(struct sigctx_irix5)))
- goto badframe;
-
-#ifdef DEBUG_SIG
- dump_irix5_sigctx(context);
-#endif
-
- error = __get_user(regs->cp0_epc, &context->pc);
- error |= __get_user(umask, &context->rmask);
-
- mask = 2;
- for (i = 1; i < 32; i++, mask <<= 1) {
- if (umask & mask)
- error |= __get_user(regs->regs[i], &context->regs[i]);
- }
- error |= __get_user(regs->hi, &context->hi);
- error |= __get_user(regs->lo, &context->lo);
-
- error |= __get_user(usedfp, &context->usedfp);
- if ((umask & 1) && usedfp) {
- fregs = (u64 *) &current->thread.fpu;
-
- for(i = 0; i < 32; i++)
- error |= __get_user(fregs[i], &context->fpregs[i]);
- error |= __get_user(current->thread.fpu.fcr31, &context->fpcsr);
- }
-
- /* XXX do sigstack crapola here... XXX */
-
- error |= __copy_from_user(&blocked, &context->sigset, sizeof(blocked)) ? -EFAULT : 0;
-
- if (error)
- goto badframe;
-
- sigdelsetmask(&blocked, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = blocked;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- /*
- * Don't let your children do this ...
- */
- __asm__ __volatile__(
- "move\t$29,%0\n\t"
- "j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
- /* Unreached */
-
-badframe:
- force_sig(SIGSEGV, current);
-}
-
-struct sigact_irix5 {
- int flags;
- void (*handler)(int);
- u32 sigset[4];
- int _unused0[2];
-};
-
-#define SIG_SETMASK32 256 /* Goodie from SGI for BSD compatibility:
- set only the low 32 bit of the sigset. */
-
-#ifdef DEBUG_SIG
-static inline void dump_sigact_irix5(struct sigact_irix5 *p)
-{
- printk("<f[%d] hndlr[%08lx] msk[%08lx]>", p->flags,
- (unsigned long) p->handler,
- (unsigned long) p->sigset[0]);
-}
-#endif
-
-asmlinkage int
-irix_sigaction(int sig, const struct sigaction __user *act,
- struct sigaction __user *oact, void __user *trampoline)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
-
-#ifdef DEBUG_SIG
- printk(" (%d,%s,%s,%08lx) ", sig, (!new ? "0" : "NEW"),
- (!old ? "0" : "OLD"), trampoline);
- if(new) {
- dump_sigact_irix5(new); printk(" ");
- }
-#endif
- if (act) {
- sigset_t mask;
- int err;
-
- if (!access_ok(VERIFY_READ, act, sizeof(*act)))
- return -EFAULT;
- err = __get_user(new_ka.sa.sa_handler, &act->sa_handler);
- err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-
- err |= __copy_from_user(&mask, &act->sa_mask, sizeof(sigset_t)) ? -EFAULT : 0;
- if (err)
- return err;
-
- /*
- * Hmmm... methinks IRIX libc always passes a valid trampoline
- * value for all invocations of sigaction. Will have to
- * investigate. POSIX POSIX, die die die...
- */
- new_ka.sa_restorer = trampoline;
- }
-
-/* XXX Implement SIG_SETMASK32 for IRIX compatibility */
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
- if (!ret && oact) {
- int err;
-
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
- return -EFAULT;
-
- err = __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
- err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- err |= __copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask,
- sizeof(sigset_t)) ? -EFAULT : 0;
- if (err)
- return -EFAULT;
- }
-
- return ret;
-}
-
-asmlinkage int irix_sigpending(irix_sigset_t __user *set)
-{
- return do_sigpending(set, sizeof(*set));
-}
-
-asmlinkage int irix_sigprocmask(int how, irix_sigset_t __user *new,
- irix_sigset_t __user *old)
-{
- sigset_t oldbits, newbits;
-
- if (new) {
- if (!access_ok(VERIFY_READ, new, sizeof(*new)))
- return -EFAULT;
- if (__copy_from_user(&newbits, new, sizeof(unsigned long)*4))
- return -EFAULT;
- sigdelsetmask(&newbits, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- oldbits = current->blocked;
-
- switch(how) {
- case 1:
- sigorsets(&newbits, &oldbits, &newbits);
- break;
-
- case 2:
- sigandsets(&newbits, &oldbits, &newbits);
- break;
-
- case 3:
- break;
-
- case 256:
- siginitset(&newbits, newbits.sig[0]);
- break;
-
- default:
- spin_unlock_irq(&current->sighand->siglock);
- return -EINVAL;
- }
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
- if (old)
- return copy_to_user(old, &current->blocked,
- sizeof(unsigned long)*4) ? -EFAULT : 0;
-
- return 0;
-}
-
-asmlinkage int irix_sigsuspend(struct pt_regs *regs)
-{
- sigset_t newset;
- sigset_t __user *uset;
-
- uset = (sigset_t __user *) regs->regs[4];
- if (copy_from_user(&newset, uset, sizeof(sigset_t)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
-/* hate hate hate... */
-struct irix5_siginfo {
- int sig, code, error;
- union {
- char unused[128 - (3 * 4)]; /* Safety net. */
- struct {
- int pid;
- union {
- int uid;
- struct {
- int utime, status, stime;
- } child;
- } procdata;
- } procinfo;
-
- unsigned long fault_addr;
-
- struct {
- int fd;
- long band;
- } fileinfo;
-
- unsigned long sigval;
- } stuff;
-};
-
-asmlinkage int irix_sigpoll_sys(unsigned long __user *set,
- struct irix5_siginfo __user *info, struct timespec __user *tp)
-{
- long expire = MAX_SCHEDULE_TIMEOUT;
- sigset_t kset;
- int i, sig, error, timeo = 0;
- struct timespec ktp;
-
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_sigpoll_sys(%p,%p,%p)\n",
- current->comm, current->pid, set, info, tp);
-#endif
-
- /* Must always specify the signal set. */
- if (!set)
- return -EINVAL;
-
- if (copy_from_user(&kset, set, sizeof(set)))
- return -EFAULT;
-
- if (info && clear_user(info, sizeof(*info))) {
- error = -EFAULT;
- goto out;
- }
-
- if (tp) {
- if (copy_from_user(&ktp, tp, sizeof(*tp)))
- return -EFAULT;
-
- if (!ktp.tv_sec && !ktp.tv_nsec)
- return -EINVAL;
-
- expire = timespec_to_jiffies(&ktp) +
- (ktp.tv_sec || ktp.tv_nsec);
- }
-
- while(1) {
- long tmp = 0;
-
- expire = schedule_timeout_interruptible(expire);
-
- for (i=0; i < _IRIX_NSIG_WORDS; i++)
- tmp |= (current->pending.signal.sig[i] & kset.sig[i]);
-
- if (tmp)
- break;
- if (!expire) {
- timeo = 1;
- break;
- }
- if (signal_pending(current))
- return -EINTR;
- }
- if (timeo)
- return -EAGAIN;
-
- for (sig = 1; i <= 65 /* IRIX_NSIG */; sig++) {
- if (sigismember (&kset, sig))
- continue;
- if (sigismember (&current->pending.signal, sig)) {
- /* XXX need more than this... */
- if (info)
- return copy_to_user(&info->sig, &sig, sizeof(sig));
- return 0;
- }
- }
-
- /* Should not get here, but do something sane if we do. */
- error = -EINTR;
-
-out:
- return error;
-}
-
-/* This is here because of irix5_siginfo definition. */
-#define IRIX_P_PID 0
-#define IRIX_P_PGID 2
-#define IRIX_P_ALL 7
-
-#define W_EXITED 1
-#define W_TRAPPED 2
-#define W_STOPPED 4
-#define W_CONT 8
-#define W_NOHANG 64
-
-#define W_MASK (W_EXITED | W_TRAPPED | W_STOPPED | W_CONT | W_NOHANG)
-
-asmlinkage int irix_waitsys(int type, int upid,
- struct irix5_siginfo __user *info, int options,
- struct rusage __user *ru)
-{
- struct pid *pid = NULL;
- int flag, retval;
- DECLARE_WAITQUEUE(wait, current);
- struct task_struct *tsk;
- struct task_struct *p;
- struct list_head *_p;
-
- if (!info)
- return -EINVAL;
-
- if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
- return -EFAULT;
-
- if (ru)
- if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)))
- return -EFAULT;
-
- if (options & ~W_MASK)
- return -EINVAL;
-
- if (type != IRIX_P_PID && type != IRIX_P_PGID && type != IRIX_P_ALL)
- return -EINVAL;
-
- if (type != IRIX_P_ALL)
- pid = find_get_pid(upid);
- add_wait_queue(&current->signal->wait_chldexit, &wait);
-repeat:
- flag = 0;
- current->state = TASK_INTERRUPTIBLE;
- read_lock(&tasklist_lock);
- tsk = current;
- list_for_each(_p, &tsk->children) {
- p = list_entry(_p, struct task_struct, sibling);
- if ((type == IRIX_P_PID) && task_pid(p) != pid)
- continue;
- if ((type == IRIX_P_PGID) && task_pgrp(p) != pid)
- continue;
- if ((p->exit_signal != SIGCHLD))
- continue;
- flag = 1;
- switch (p->state) {
- case TASK_STOPPED:
- if (!p->exit_code)
- continue;
- if (!(options & (W_TRAPPED|W_STOPPED)) &&
- !(p->ptrace & PT_PTRACED))
- continue;
- read_unlock(&tasklist_lock);
-
- /* move to end of parent's list to avoid starvation */
- write_lock_irq(&tasklist_lock);
- remove_parent(p);
- add_parent(p);
- write_unlock_irq(&tasklist_lock);
- retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
- if (retval)
- goto end_waitsys;
-
- retval = __put_user(SIGCHLD, &info->sig);
- retval |= __put_user(0, &info->code);
- retval |= __put_user(task_pid_vnr(p), &info->stuff.procinfo.pid);
- retval |= __put_user((p->exit_code >> 8) & 0xff,
- &info->stuff.procinfo.procdata.child.status);
- retval |= __put_user(p->utime, &info->stuff.procinfo.procdata.child.utime);
- retval |= __put_user(p->stime, &info->stuff.procinfo.procdata.child.stime);
- if (retval)
- goto end_waitsys;
-
- p->exit_code = 0;
- goto end_waitsys;
-
- case EXIT_ZOMBIE:
- current->signal->cutime += p->utime + p->signal->cutime;
- current->signal->cstime += p->stime + p->signal->cstime;
- if (ru != NULL)
- getrusage(p, RUSAGE_BOTH, ru);
- retval = __put_user(SIGCHLD, &info->sig);
- retval |= __put_user(1, &info->code); /* CLD_EXITED */
- retval |= __put_user(task_pid_vnr(p), &info->stuff.procinfo.pid);
- retval |= __put_user((p->exit_code >> 8) & 0xff,
- &info->stuff.procinfo.procdata.child.status);
- retval |= __put_user(p->utime,
- &info->stuff.procinfo.procdata.child.utime);
- retval |= __put_user(p->stime,
- &info->stuff.procinfo.procdata.child.stime);
- if (retval)
- goto end_waitsys;
-
- if (p->real_parent != p->parent) {
- write_lock_irq(&tasklist_lock);
- remove_parent(p);
- p->parent = p->real_parent;
- add_parent(p);
- do_notify_parent(p, SIGCHLD);
- write_unlock_irq(&tasklist_lock);
- } else
- release_task(p);
- goto end_waitsys;
- default:
- continue;
- }
- tsk = next_thread(tsk);
- }
- read_unlock(&tasklist_lock);
- if (flag) {
- retval = 0;
- if (options & W_NOHANG)
- goto end_waitsys;
- retval = -ERESTARTSYS;
- if (signal_pending(current))
- goto end_waitsys;
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- goto repeat;
- }
- retval = -ECHILD;
-end_waitsys:
- current->state = TASK_RUNNING;
- remove_wait_queue(&current->signal->wait_chldexit, &wait);
- put_pid(pid);
-
- return retval;
-}
-
-struct irix5_context {
- u32 flags;
- u32 link;
- u32 sigmask[4];
- struct { u32 sp, size, flags; } stack;
- int regs[36];
- u32 fpregs[32];
- u32 fpcsr;
- u32 _unused0;
- u32 _unused1[47];
- u32 weird_graphics_thing;
-};
-
-asmlinkage int irix_getcontext(struct pt_regs *regs)
-{
- int error, i, base = 0;
- struct irix5_context __user *ctx;
- unsigned long flags;
-
- if (regs->regs[2] == 1000)
- base = 1;
- ctx = (struct irix5_context __user *) regs->regs[base + 4];
-
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_getcontext(%p)\n",
- current->comm, current->pid, ctx);
-#endif
-
- if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)))
- return -EFAULT;
-
- error = __put_user(current->thread.irix_oldctx, &ctx->link);
-
- error |= __copy_to_user(&ctx->sigmask, &current->blocked, sizeof(irix_sigset_t)) ? -EFAULT : 0;
-
- /* XXX Do sigstack stuff someday... */
- error |= __put_user(0, &ctx->stack.sp);
- error |= __put_user(0, &ctx->stack.size);
- error |= __put_user(0, &ctx->stack.flags);
-
- error |= __put_user(0, &ctx->weird_graphics_thing);
- error |= __put_user(0, &ctx->regs[0]);
- for (i = 1; i < 32; i++)
- error |= __put_user(regs->regs[i], &ctx->regs[i]);
- error |= __put_user(regs->lo, &ctx->regs[32]);
- error |= __put_user(regs->hi, &ctx->regs[33]);
- error |= __put_user(regs->cp0_cause, &ctx->regs[34]);
- error |= __put_user(regs->cp0_epc, &ctx->regs[35]);
-
- flags = 0x0f;
- if (!used_math()) {
- flags &= ~(0x08);
- } else {
- /* XXX wheee... */
- printk("Wheee, no code for saving IRIX FPU context yet.\n");
- }
- error |= __put_user(flags, &ctx->flags);
-
- return error;
-}
-
-asmlinkage void irix_setcontext(struct pt_regs *regs)
-{
- struct irix5_context __user *ctx;
- int err, base = 0;
- u32 flags;
-
- if (regs->regs[2] == 1000)
- base = 1;
- ctx = (struct irix5_context __user *) regs->regs[base + 4];
-
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_setcontext(%p)\n",
- current->comm, current->pid, ctx);
-#endif
-
- if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)))
- goto segv_and_exit;
-
- err = __get_user(flags, &ctx->flags);
- if (flags & 0x02) {
- /* XXX sigstack garbage, todo... */
- printk("Wheee, cannot do sigstack stuff in setcontext\n");
- }
-
- if (flags & 0x04) {
- int i;
-
- /* XXX extra control block stuff... todo... */
- for (i = 1; i < 32; i++)
- err |= __get_user(regs->regs[i], &ctx->regs[i]);
- err |= __get_user(regs->lo, &ctx->regs[32]);
- err |= __get_user(regs->hi, &ctx->regs[33]);
- err |= __get_user(regs->cp0_epc, &ctx->regs[35]);
- }
-
- if (flags & 0x08)
- /* XXX fpu context, blah... */
- printk(KERN_ERR "Wheee, cannot restore FPU context yet...\n");
-
- err |= __get_user(current->thread.irix_oldctx, &ctx->link);
- if (err)
- goto segv_and_exit;
-
- /*
- * Don't let your children do this ...
- */
- __asm__ __volatile__(
- "move\t$29,%0\n\t"
- "j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
- /* Unreached */
-
-segv_and_exit:
- force_sigsegv(SIGSEGV, current);
-}
-
-struct irix_sigstack {
- unsigned long sp;
- int status;
-};
-
-asmlinkage int irix_sigstack(struct irix_sigstack __user *new,
- struct irix_sigstack __user *old)
-{
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_sigstack(%p,%p)\n",
- current->comm, current->pid, new, old);
-#endif
- if (new) {
- if (!access_ok(VERIFY_READ, new, sizeof(*new)))
- return -EFAULT;
- }
-
- if (old) {
- if (!access_ok(VERIFY_WRITE, old, sizeof(*old)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-struct irix_sigaltstack { unsigned long sp; int size; int status; };
-
-asmlinkage int irix_sigaltstack(struct irix_sigaltstack __user *new,
- struct irix_sigaltstack __user *old)
-{
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_sigaltstack(%p,%p)\n",
- current->comm, current->pid, new, old);
-#endif
- if (new)
- if (!access_ok(VERIFY_READ, new, sizeof(*new)))
- return -EFAULT;
-
- if (old) {
- if (!access_ok(VERIFY_WRITE, old, sizeof(*old)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-struct irix_procset {
- int cmd, ltype, lid, rtype, rid;
-};
-
-asmlinkage int irix_sigsendset(struct irix_procset __user *pset, int sig)
-{
- if (!access_ok(VERIFY_READ, pset, sizeof(*pset)))
- return -EFAULT;
-#ifdef DEBUG_SIG
- printk("[%s:%d] irix_sigsendset([%d,%d,%d,%d,%d],%d)\n",
- current->comm, current->pid,
- pset->cmd, pset->ltype, pset->lid, pset->rtype, pset->rid,
- sig);
-#endif
- return -EINVAL;
-}
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
new file mode 100644
index 00000000000..88e4c323382
--- /dev/null
+++ b/arch/mips/kernel/irq-gic.c
@@ -0,0 +1,380 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/bitmap.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/clocksource.h>
+
+#include <asm/io.h>
+#include <asm/gic.h>
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <linux/hardirq.h>
+#include <asm-generic/bitops/find.h>
+
+unsigned int gic_frequency;
+unsigned int gic_present;
+unsigned long _gic_base;
+unsigned int gic_irq_base;
+unsigned int gic_irq_flags[GIC_NUM_INTRS];
+
+/* The index into this array is the vector # of the interrupt. */
+struct gic_shared_intr_map gic_shared_intr_map[GIC_NUM_INTRS];
+
+static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
+static struct gic_pending_regs pending_regs[NR_CPUS];
+static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
+
+#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
+cycle_t gic_read_count(void)
+{
+ unsigned int hi, hi2, lo;
+
+ do {
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
+ } while (hi2 != hi);
+
+ return (((cycle_t) hi) << 32) + lo;
+}
+
+void gic_write_compare(cycle_t cnt)
+{
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+ (int)(cnt >> 32));
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+ (int)(cnt & 0xffffffff));
+}
+
+void gic_write_cpu_compare(cycle_t cnt, int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
+ (int)(cnt >> 32));
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
+ (int)(cnt & 0xffffffff));
+
+ local_irq_restore(flags);
+}
+
+cycle_t gic_read_compare(void)
+{
+ unsigned int hi, lo;
+
+ GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
+ GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
+
+ return (((cycle_t) hi) << 32) + lo;
+}
+#endif
+
+unsigned int gic_get_timer_pending(void)
+{
+ unsigned int vpe_pending;
+
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
+ GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_PEND), vpe_pending);
+ return (vpe_pending & GIC_VPE_PEND_TIMER_MSK);
+}
+
+void gic_bind_eic_interrupt(int irq, int set)
+{
+ /* Convert irq vector # to hw int # */
+ irq -= GIC_PIN_TO_VEC_OFFSET;
+
+ /* Set irq to use shadow set */
+ GICWRITE(GIC_REG_ADDR(VPE_LOCAL, GIC_VPE_EIC_SS(irq)), set);
+}
+
+void gic_send_ipi(unsigned int intr)
+{
+ GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
+}
+
+static void gic_eic_irq_dispatch(void)
+{
+ unsigned int cause = read_c0_cause();
+ int irq;
+
+ irq = (cause & ST0_IM) >> STATUSB_IP2;
+ if (irq == 0)
+ irq = -1;
+
+ if (irq >= 0)
+ do_IRQ(gic_irq_base + irq);
+ else
+ spurious_interrupt();
+}
+
+static void __init vpe_local_setup(unsigned int numvpes)
+{
+ unsigned long timer_intr = GIC_INT_TMR;
+ unsigned long perf_intr = GIC_INT_PERFCTR;
+ unsigned int vpe_ctl;
+ int i;
+
+ if (cpu_has_veic) {
+ /*
+ * GIC timer interrupt -> CPU HW Int X (vector X+2) ->
+ * map to pin X+2-1 (since GIC adds 1)
+ */
+ timer_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
+ /*
+ * GIC perfcnt interrupt -> CPU HW Int X (vector X+2) ->
+ * map to pin X+2-1 (since GIC adds 1)
+ */
+ perf_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
+ }
+
+ /*
+ * Setup the default performance counter timer interrupts
+ * for all VPEs
+ */
+ for (i = 0; i < numvpes; i++) {
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+
+ /* Are Interrupts locally routable? */
+ GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
+ if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
+ GIC_MAP_TO_PIN_MSK | timer_intr);
+ if (cpu_has_veic) {
+ set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET,
+ gic_eic_irq_dispatch);
+ gic_shared_intr_map[timer_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_TIMER_MSK;
+ }
+
+ if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
+ GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
+ GIC_MAP_TO_PIN_MSK | perf_intr);
+ if (cpu_has_veic) {
+ set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET, gic_eic_irq_dispatch);
+ gic_shared_intr_map[perf_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_PERFCNT_MSK;
+ }
+ }
+}
+
+unsigned int gic_compare_int(void)
+{
+ unsigned int pending;
+
+ GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
+ if (pending & GIC_VPE_PEND_CMP_MSK)
+ return 1;
+ else
+ return 0;
+}
+
+unsigned int gic_get_int(void)
+{
+ unsigned int i;
+ unsigned long *pending, *intrmask, *pcpu_mask;
+ unsigned long *pending_abs, *intrmask_abs;
+
+ /* Get per-cpu bitmaps */
+ pending = pending_regs[smp_processor_id()].pending;
+ intrmask = intrmask_regs[smp_processor_id()].intrmask;
+ pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
+
+ pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
+ GIC_SH_PEND_31_0_OFS);
+ intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
+ GIC_SH_MASK_31_0_OFS);
+
+ for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
+ GICREAD(*pending_abs, pending[i]);
+ GICREAD(*intrmask_abs, intrmask[i]);
+ pending_abs++;
+ intrmask_abs++;
+ }
+
+ bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
+ bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
+
+ return find_first_bit(pending, GIC_NUM_INTRS);
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+ GIC_CLR_INTR_MASK(d->irq - gic_irq_base);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+ GIC_SET_INTR_MASK(d->irq - gic_irq_base);
+}
+
+#ifdef CONFIG_SMP
+static DEFINE_SPINLOCK(gic_lock);
+
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned int irq = (d->irq - gic_irq_base);
+ cpumask_t tmp = CPU_MASK_NONE;
+ unsigned long flags;
+ int i;
+
+ cpumask_and(&tmp, cpumask, cpu_online_mask);
+ if (cpus_empty(tmp))
+ return -1;
+
+ /* Assumption : cpumask refers to a single CPU */
+ spin_lock_irqsave(&gic_lock, flags);
+
+ /* Re-route this IRQ */
+ GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
+
+ /* Update the pcpu_masks */
+ for (i = 0; i < NR_CPUS; i++)
+ clear_bit(irq, pcpu_masks[i].pcpu_mask);
+ set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+
+ cpumask_copy(d->affinity, cpumask);
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+#endif
+
+static struct irq_chip gic_irq_controller = {
+ .name = "MIPS GIC",
+ .irq_ack = gic_irq_ack,
+ .irq_mask = gic_mask_irq,
+ .irq_mask_ack = gic_mask_irq,
+ .irq_unmask = gic_unmask_irq,
+ .irq_eoi = gic_finish_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gic_set_affinity,
+#endif
+};
+
+static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
+ unsigned int pin, unsigned int polarity, unsigned int trigtype,
+ unsigned int flags)
+{
+ struct gic_shared_intr_map *map_ptr;
+
+ /* Setup Intr to Pin mapping */
+ if (pin & GIC_MAP_TO_NMI_MSK) {
+ GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
+ /* FIXME: hack to route NMI to all cpu's */
+ for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
+ GICWRITE(GIC_REG_ADDR(SHARED,
+ GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
+ 0xffffffff);
+ }
+ } else {
+ GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
+ GIC_MAP_TO_PIN_MSK | pin);
+ /* Setup Intr to CPU mapping */
+ GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
+ if (cpu_has_veic) {
+ set_vi_handler(pin + GIC_PIN_TO_VEC_OFFSET,
+ gic_eic_irq_dispatch);
+ map_ptr = &gic_shared_intr_map[pin + GIC_PIN_TO_VEC_OFFSET];
+ if (map_ptr->num_shared_intr >= GIC_MAX_SHARED_INTR)
+ BUG();
+ map_ptr->intr_list[map_ptr->num_shared_intr++] = intr;
+ }
+ }
+
+ /* Setup Intr Polarity */
+ GIC_SET_POLARITY(intr, polarity);
+
+ /* Setup Intr Trigger Type */
+ GIC_SET_TRIGGER(intr, trigtype);
+
+ /* Init Intr Masks */
+ GIC_CLR_INTR_MASK(intr);
+ /* Initialise per-cpu Interrupt software masks */
+ if (flags & GIC_FLAG_IPI)
+ set_bit(intr, pcpu_masks[cpu].pcpu_mask);
+ if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0))
+ GIC_SET_INTR_MASK(intr);
+ if (trigtype == GIC_TRIG_EDGE)
+ gic_irq_flags[intr] |= GIC_TRIG_EDGE;
+}
+
+static void __init gic_basic_init(int numintrs, int numvpes,
+ struct gic_intr_map *intrmap, int mapsize)
+{
+ unsigned int i, cpu;
+ unsigned int pin_offset = 0;
+
+ board_bind_eic_interrupt = &gic_bind_eic_interrupt;
+
+ /* Setup defaults */
+ for (i = 0; i < numintrs; i++) {
+ GIC_SET_POLARITY(i, GIC_POL_POS);
+ GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
+ GIC_CLR_INTR_MASK(i);
+ if (i < GIC_NUM_INTRS) {
+ gic_irq_flags[i] = 0;
+ gic_shared_intr_map[i].num_shared_intr = 0;
+ gic_shared_intr_map[i].local_intr_mask = 0;
+ }
+ }
+
+ /*
+ * In EIC mode, the HW_INT# is offset by (2-1). Need to subtract
+ * one because the GIC will add one (since 0=no intr).
+ */
+ if (cpu_has_veic)
+ pin_offset = (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
+
+ /* Setup specifics */
+ for (i = 0; i < mapsize; i++) {
+ cpu = intrmap[i].cpunum;
+ if (cpu == GIC_UNUSED)
+ continue;
+ if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
+ continue;
+ gic_setup_intr(i,
+ intrmap[i].cpunum,
+ intrmap[i].pin + pin_offset,
+ intrmap[i].polarity,
+ intrmap[i].trigtype,
+ intrmap[i].flags);
+ }
+
+ vpe_local_setup(numvpes);
+}
+
+void __init gic_init(unsigned long gic_base_addr,
+ unsigned long gic_addrspace_size,
+ struct gic_intr_map *intr_map, unsigned int intr_map_size,
+ unsigned int irqbase)
+{
+ unsigned int gicconfig;
+ int numvpes, numintrs;
+
+ _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
+ gic_addrspace_size);
+ gic_irq_base = irqbase;
+
+ GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
+ numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
+ GIC_SH_CONFIG_NUMINTRS_SHF;
+ numintrs = ((numintrs + 1) * 8);
+
+ numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
+ GIC_SH_CONFIG_NUMVPES_SHF;
+ numvpes = numvpes + 1;
+
+ gic_basic_init(numintrs, numvpes, intr_map, intr_map_size);
+
+ gic_platform_init(numintrs, &gic_irq_controller);
+}
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
index 1b81b131f43..44a1f792e39 100644
--- a/arch/mips/kernel/irq-gt641xx.c
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -1,7 +1,7 @@
/*
* GT641xx IRQ routines.
*
- * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,68 +25,68 @@
#include <asm/gt64120.h>
-#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
+#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
-static DEFINE_SPINLOCK(gt641xx_irq_lock);
+static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
-static void ack_gt641xx_irq(unsigned int irq)
+static void ack_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 cause;
- spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
cause = GT_READ(GT_INTRCAUSE_OFS);
- cause &= ~GT641XX_IRQ_TO_BIT(irq);
+ cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRCAUSE_OFS, cause);
- spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
}
-static void mask_gt641xx_irq(unsigned int irq)
+static void mask_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 mask;
- spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
mask = GT_READ(GT_INTRMASK_OFS);
- mask &= ~GT641XX_IRQ_TO_BIT(irq);
+ mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRMASK_OFS, mask);
- spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
}
-static void mask_ack_gt641xx_irq(unsigned int irq)
+static void mask_ack_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 cause, mask;
- spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
mask = GT_READ(GT_INTRMASK_OFS);
- mask &= ~GT641XX_IRQ_TO_BIT(irq);
+ mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRMASK_OFS, mask);
cause = GT_READ(GT_INTRCAUSE_OFS);
- cause &= ~GT641XX_IRQ_TO_BIT(irq);
+ cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRCAUSE_OFS, cause);
- spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
}
-static void unmask_gt641xx_irq(unsigned int irq)
+static void unmask_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 mask;
- spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
mask = GT_READ(GT_INTRMASK_OFS);
- mask |= GT641XX_IRQ_TO_BIT(irq);
+ mask |= GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRMASK_OFS, mask);
- spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
}
static struct irq_chip gt641xx_irq_chip = {
.name = "GT641xx",
- .ack = ack_gt641xx_irq,
- .mask = mask_gt641xx_irq,
- .mask_ack = mask_ack_gt641xx_irq,
- .unmask = unmask_gt641xx_irq,
+ .irq_ack = ack_gt641xx_irq,
+ .irq_mask = mask_gt641xx_irq,
+ .irq_mask_ack = mask_ack_gt641xx_irq,
+ .irq_unmask = unmask_gt641xx_irq,
};
void gt641xx_irq_dispatch(void)
@@ -126,6 +126,6 @@ void __init gt641xx_irq_init(void)
* bit31: logical or of bits[25:1].
*/
for (i = 1; i < 30; i++)
- set_irq_chip_and_handler(GT641XX_IRQ_BASE + i,
- &gt641xx_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
+ &gt641xx_irq_chip, handle_level_irq);
}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 4edc7e451d9..a734b2c2f9e 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -1,6 +1,6 @@
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -9,7 +9,6 @@
*
* Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org>
*/
-#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -17,6 +16,7 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/msc01_ic.h>
+#include <asm/traps.h>
static unsigned long _icctrl_msc;
#define MSC01_IC_REG_BASE _icctrl_msc
@@ -27,8 +27,10 @@ static unsigned long _icctrl_msc;
static unsigned int irq_base;
/* mask off an interrupt */
-static inline void mask_msc_irq(unsigned int irq)
+static inline void mask_msc_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
if (irq < (irq_base + 32))
MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
else
@@ -36,8 +38,10 @@ static inline void mask_msc_irq(unsigned int irq)
}
/* unmask an interrupt */
-static inline void unmask_msc_irq(unsigned int irq)
+static inline void unmask_msc_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
if (irq < (irq_base + 32))
MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
else
@@ -47,21 +51,21 @@ static inline void unmask_msc_irq(unsigned int irq)
/*
* Masks and ACKs an IRQ
*/
-static void level_mask_and_ack_msc_irq(unsigned int irq)
+static void level_mask_and_ack_msc_irq(struct irq_data *d)
{
- mask_msc_irq(irq);
+ mask_msc_irq(d);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
- /* This actually needs to be a call into platform code */
- smtc_im_ack_irq(irq);
}
/*
* Masks and ACKs an IRQ
*/
-static void edge_mask_and_ack_msc_irq(unsigned int irq)
+static void edge_mask_and_ack_msc_irq(struct irq_data *d)
{
- mask_msc_irq(irq);
+ unsigned int irq = d->irq;
+
+ mask_msc_irq(d);
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
else {
@@ -70,16 +74,6 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
}
- smtc_im_ack_irq(irq);
-}
-
-/*
- * End IRQ processing
- */
-static void end_msc_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- unmask_msc_irq(irq);
}
/*
@@ -87,7 +81,7 @@ static void end_msc_irq(unsigned int irq)
*/
void ll_msc_irq(void)
{
- unsigned int irq;
+ unsigned int irq;
/* read the interrupt vector register */
MSCIC_READ(MSC01_IC_VEC, irq);
@@ -98,38 +92,33 @@ void ll_msc_irq(void)
}
}
-void
-msc_bind_eic_interrupt(unsigned int irq, unsigned int set)
+static void msc_bind_eic_interrupt(int irq, int set)
{
MSCIC_WRITE(MSC01_IC_RAMW,
(irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
}
-struct irq_chip msc_levelirq_type = {
+static struct irq_chip msc_levelirq_type = {
.name = "SOC-it-Level",
- .ack = level_mask_and_ack_msc_irq,
- .mask = mask_msc_irq,
- .mask_ack = level_mask_and_ack_msc_irq,
- .unmask = unmask_msc_irq,
- .eoi = unmask_msc_irq,
- .end = end_msc_irq,
+ .irq_ack = level_mask_and_ack_msc_irq,
+ .irq_mask = mask_msc_irq,
+ .irq_mask_ack = level_mask_and_ack_msc_irq,
+ .irq_unmask = unmask_msc_irq,
+ .irq_eoi = unmask_msc_irq,
};
-struct irq_chip msc_edgeirq_type = {
+static struct irq_chip msc_edgeirq_type = {
.name = "SOC-it-Edge",
- .ack = edge_mask_and_ack_msc_irq,
- .mask = mask_msc_irq,
- .mask_ack = edge_mask_and_ack_msc_irq,
- .unmask = unmask_msc_irq,
- .eoi = unmask_msc_irq,
- .end = end_msc_irq,
+ .irq_ack = edge_mask_and_ack_msc_irq,
+ .irq_mask = mask_msc_irq,
+ .irq_mask_ack = edge_mask_and_ack_msc_irq,
+ .irq_unmask = unmask_msc_irq,
+ .irq_eoi = unmask_msc_irq,
};
void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq)
{
- extern void (*board_bind_eic_interrupt)(unsigned int irq, unsigned int regset);
-
_icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
/* Reset interrupt controller - initialises all registers to 0 */
@@ -137,19 +126,25 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
board_bind_eic_interrupt = &msc_bind_eic_interrupt;
- for (; nirq >= 0; nirq--, imp++) {
+ for (; nirq > 0; nirq--, imp++) {
int n = imp->im_irq;
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
- set_irq_chip(irqbase+n, &msc_edgeirq_type);
+ irq_set_chip_and_handler_name(irqbase + n,
+ &msc_edgeirq_type,
+ handle_edge_irq,
+ "edge");
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
- set_irq_chip(irqbase+n, &msc_levelirq_type);
+ irq_set_chip_and_handler_name(irqbase + n,
+ &msc_levelirq_type,
+ handle_level_irq,
+ "level");
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index fb50cc78b28..26f4e4c9db1 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2003 Ralf Baechle
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -11,29 +11,29 @@
*/
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
-#include <asm/system.h>
-static inline void unmask_rm7k_irq(unsigned int irq)
+static inline void unmask_rm7k_irq(struct irq_data *d)
{
- set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
+ set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
}
-static inline void mask_rm7k_irq(unsigned int irq)
+static inline void mask_rm7k_irq(struct irq_data *d)
{
- clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE));
+ clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
}
static struct irq_chip rm7k_irq_controller = {
.name = "RM7000",
- .ack = mask_rm7k_irq,
- .mask = mask_rm7k_irq,
- .mask_ack = mask_rm7k_irq,
- .unmask = unmask_rm7k_irq,
- .eoi = unmask_rm7k_irq
+ .irq_ack = mask_rm7k_irq,
+ .irq_mask = mask_rm7k_irq,
+ .irq_mask_ack = mask_rm7k_irq,
+ .irq_unmask = unmask_rm7k_irq,
+ .irq_eoi = unmask_rm7k_irq
};
void __init rm7k_cpu_irq_init(void)
@@ -44,6 +44,6 @@ void __init rm7k_cpu_irq_init(void)
clear_c0_intcontrol(0x00000f00); /* Mask all */
for (i = base; i < base + 4; i++)
- set_irq_chip_and_handler(i, &rm7k_irq_controller,
+ irq_set_chip_and_handler(i, &rm7k_irq_controller,
handle_percpu_irq);
}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
deleted file mode 100644
index ed9febe63d7..00000000000
--- a/arch/mips/kernel/irq-rm9000.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2003 Ralf Baechle
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Handler for RM9000 extended interrupts. These are a non-standard
- * feature so we handle them separately from standard interrupts.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
-
-static inline void unmask_rm9k_irq(unsigned int irq)
-{
- set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
-}
-
-static inline void mask_rm9k_irq(unsigned int irq)
-{
- clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE));
-}
-
-static inline void rm9k_cpu_irq_enable(unsigned int irq)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- unmask_rm9k_irq(irq);
- local_irq_restore(flags);
-}
-
-/*
- * Performance counter interrupts are global on all processors.
- */
-static void local_rm9k_perfcounter_irq_startup(void *args)
-{
- unsigned int irq = (unsigned int) args;
-
- rm9k_cpu_irq_enable(irq);
-}
-
-static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq)
-{
- on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1);
-
- return 0;
-}
-
-static void local_rm9k_perfcounter_irq_shutdown(void *args)
-{
- unsigned int irq = (unsigned int) args;
- unsigned long flags;
-
- local_irq_save(flags);
- mask_rm9k_irq(irq);
- local_irq_restore(flags);
-}
-
-static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
-{
- on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
-}
-
-static struct irq_chip rm9k_irq_controller = {
- .name = "RM9000",
- .ack = mask_rm9k_irq,
- .mask = mask_rm9k_irq,
- .mask_ack = mask_rm9k_irq,
- .unmask = unmask_rm9k_irq,
- .eoi = unmask_rm9k_irq
-};
-
-static struct irq_chip rm9k_perfcounter_irq = {
- .name = "RM9000",
- .startup = rm9k_perfcounter_irq_startup,
- .shutdown = rm9k_perfcounter_irq_shutdown,
- .ack = mask_rm9k_irq,
- .mask = mask_rm9k_irq,
- .mask_ack = mask_rm9k_irq,
- .unmask = unmask_rm9k_irq,
-};
-
-unsigned int rm9000_perfcount_irq;
-
-EXPORT_SYMBOL(rm9000_perfcount_irq);
-
-void __init rm9k_cpu_irq_init(void)
-{
- int base = RM9K_CPU_IRQ_BASE;
- int i;
-
- clear_c0_intcontrol(0x0000f000); /* Mask all */
-
- for (i = base; i < base + 4; i++)
- set_irq_chip_and_handler(i, &rm9k_irq_controller,
- handle_level_irq);
-
- rm9000_perfcount_irq = base + 1;
- set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
- handle_percpu_irq);
-}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index e3309ff9ece..d2bfbc2e899 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -13,19 +13,22 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
-#include <linux/module.h>
#include <linux/proc_fs.h>
-#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
+#include <linux/kgdb.h>
+#include <linux/ftrace.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
+#ifdef CONFIG_KGDB
+int kgdb_early_setup;
+#endif
+
static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
int allocate_irqno(void)
@@ -44,10 +47,8 @@ again:
return irq;
}
-EXPORT_SYMBOL_GPL(allocate_irqno);
-
/*
- * Allocate the 16 legacy interrupts for i8259 devices. This happens early
+ * Allocate the 16 legacy interrupts for i8259 devices. This happens early
* in the kernel initialization so treating allocation failure as BUG() is
* ok.
*/
@@ -61,67 +62,25 @@ void __init alloc_legacy_irqno(void)
void free_irqno(unsigned int irq)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(irq, irq_map);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
-EXPORT_SYMBOL_GPL(free_irqno);
-
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
- smtc_im_ack_irq(irq);
printk("unexpected IRQ # %d\n", irq);
}
atomic_t irq_err_count;
-/*
- * Generic, controller-independent functions:
- */
-
-int show_interrupts(struct seq_file *p, void *v)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
- unsigned long flags;
-
- if (i == 0) {
- seq_printf(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%d ", j);
- seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if (!action)
- goto skip;
- seq_printf(p, "%3d: ", i);
-#ifndef CONFIG_SMP
- seq_printf(p, "%10u ", kstat_irqs(i));
-#else
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-#endif
- seq_printf(p, " %14s", irq_desc[i].chip->name);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
-
- seq_putc(p, '\n');
-skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS) {
- seq_putc(p, '\n');
- seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
- }
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
return 0;
}
@@ -130,33 +89,59 @@ asmlinkage void spurious_interrupt(void)
atomic_inc(&irq_err_count);
}
-#ifdef CONFIG_KGDB
-extern void breakpoint(void);
-extern void set_debug_traps(void);
-
-static int kgdb_flag = 1;
-static int __init nokgdb(char *str)
-{
- kgdb_flag = 0;
- return 1;
-}
-__setup("nokgdb", nokgdb);
-#endif
-
void __init init_IRQ(void)
{
int i;
+#ifdef CONFIG_KGDB
+ if (kgdb_early_setup)
+ return;
+#endif
+
for (i = 0; i < NR_IRQS; i++)
- set_irq_noprobe(i);
+ irq_set_noprobe(i);
arch_init_irq();
#ifdef CONFIG_KGDB
- if (kgdb_flag) {
- printk("Wait for gdb client connection ...\n");
- set_debug_traps();
- breakpoint();
+ if (!kgdb_early_setup)
+ kgdb_early_setup = 1;
+#endif
+}
+
+#ifdef DEBUG_STACKOVERFLOW
+static inline void check_stack_overflow(void)
+{
+ unsigned long sp;
+
+ __asm__ __volatile__("move %0, $sp" : "=r" (sp));
+ sp &= THREAD_MASK;
+
+ /*
+ * Check for stack overflow: is there less than STACK_WARN free?
+ * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
+ */
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
}
+}
+#else
+static inline void check_stack_overflow(void) {}
#endif
+
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+void __irq_entry do_IRQ(unsigned int irq)
+{
+ irq_enter();
+ check_stack_overflow();
+ generic_handle_irq(irq);
+ irq_exit();
}
+
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 0ee2567b780..e498f2b3646 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -3,13 +3,13 @@
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* Copyright (C) 2001 Ralf Baechle
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
- * Author: Maciej W. Rozycki <macro@mips.com>
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Author: Maciej W. Rozycki <macro@mips.com>
*
* This file define the irq handler for MIPS CPU interrupts.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -30,48 +30,45 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
-#include <asm/system.h>
-static inline void unmask_mips_irq(unsigned int irq)
+static inline void unmask_mips_irq(struct irq_data *d)
{
- set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
+ set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
irq_enable_hazard();
}
-static inline void mask_mips_irq(unsigned int irq)
+static inline void mask_mips_irq(struct irq_data *d)
{
- clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE));
+ clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
irq_disable_hazard();
}
static struct irq_chip mips_cpu_irq_controller = {
.name = "MIPS",
- .ack = mask_mips_irq,
- .mask = mask_mips_irq,
- .mask_ack = mask_mips_irq,
- .unmask = unmask_mips_irq,
- .eoi = unmask_mips_irq,
+ .irq_ack = mask_mips_irq,
+ .irq_mask = mask_mips_irq,
+ .irq_mask_ack = mask_mips_irq,
+ .irq_unmask = unmask_mips_irq,
+ .irq_eoi = unmask_mips_irq,
};
/*
* Basically the same as above but taking care of all the MT stuff
*/
-#define unmask_mips_mt_irq unmask_mips_irq
-#define mask_mips_mt_irq mask_mips_irq
-
-static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
+static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
{
unsigned int vpflags = dvpe();
- clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
+ clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
evpe(vpflags);
- unmask_mips_mt_irq(irq);
-
+ unmask_mips_irq(d);
return 0;
}
@@ -79,22 +76,22 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for mips_cpu_irq_end.
*/
-static void mips_mt_cpu_irq_ack(unsigned int irq)
+static void mips_mt_cpu_irq_ack(struct irq_data *d)
{
unsigned int vpflags = dvpe();
- clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
+ clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
evpe(vpflags);
- mask_mips_mt_irq(irq);
+ mask_mips_irq(d);
}
static struct irq_chip mips_mt_cpu_irq_controller = {
.name = "MIPS",
- .startup = mips_mt_cpu_irq_startup,
- .ack = mips_mt_cpu_irq_ack,
- .mask = mask_mips_mt_irq,
- .mask_ack = mips_mt_cpu_irq_ack,
- .unmask = unmask_mips_mt_irq,
- .eoi = unmask_mips_mt_irq,
+ .irq_startup = mips_mt_cpu_irq_startup,
+ .irq_ack = mips_mt_cpu_irq_ack,
+ .irq_mask = mask_mips_irq,
+ .irq_mask_ack = mips_mt_cpu_irq_ack,
+ .irq_unmask = unmask_mips_irq,
+ .irq_eoi = unmask_mips_irq,
};
void __init mips_cpu_irq_init(void)
@@ -106,15 +103,55 @@ void __init mips_cpu_irq_init(void)
clear_c0_status(ST0_IM);
clear_c0_cause(CAUSEF_IP);
- /*
- * Only MT is using the software interrupts currently, so we just
- * leave them uninitialized for other processors.
- */
- if (cpu_has_mipsmt)
- for (i = irq_base; i < irq_base + 2; i++)
- set_irq_chip(i, &mips_mt_cpu_irq_controller);
+ /* Software interrupts are used for MT/CMT IPI */
+ for (i = irq_base; i < irq_base + 2; i++)
+ irq_set_chip_and_handler(i, cpu_has_mipsmt ?
+ &mips_mt_cpu_irq_controller :
+ &mips_cpu_irq_controller,
+ handle_percpu_irq);
for (i = irq_base + 2; i < irq_base + 8; i++)
- set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
+ irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
handle_percpu_irq);
}
+
+#ifdef CONFIG_IRQ_DOMAIN
+static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ static struct irq_chip *chip;
+
+ if (hw < 2 && cpu_has_mipsmt) {
+ /* Software interrupts are used for MT/CMT IPI */
+ chip = &mips_mt_cpu_irq_controller;
+ } else {
+ chip = &mips_cpu_irq_controller;
+ }
+
+ irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
+ .map = mips_cpu_intc_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+int __init mips_cpu_intc_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain;
+
+ /* Mask interrupts. */
+ clear_c0_status(ST0_IM);
+ clear_c0_cause(CAUSEF_IP);
+
+ domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
+ &mips_cpu_intc_irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add irqdomain for MIPS CPU");
+
+ return 0;
+}
+#endif /* CONFIG_IRQ_DOMAIN */
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
index a4d1462c27f..ab00e490482 100644
--- a/arch/mips/kernel/irq_txx9.c
+++ b/arch/mips/kernel/irq_txx9.c
@@ -1,14 +1,12 @@
/*
- * linux/arch/mips/kernel/irq_txx9.c
- *
* Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
- * linux/arch/mips/tx4927/common/tx4927_irq.c,
- * linux/arch/mips/tx4938/common/irq.c
+ * linux/arch/mips/tx4927/common/tx4927_irq.c,
+ * linux/arch/mips/tx4938/common/irq.c
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
- * ahennessy@mvista.com
- * source@mvista.com
+ * ahennessy@mvista.com
+ * source@mvista.com
* Copyright (C) 2000-2001 Toshiba Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -18,6 +16,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
+#include <linux/irq.h>
#include <asm/txx9irq.h>
struct txx9_irc_reg {
@@ -64,9 +63,9 @@ static struct {
unsigned char mode;
} txx9irq[TXx9_MAX_IR] __read_mostly;
-static void txx9_irq_unmask(unsigned int irq)
+static void txx9_irq_unmask(struct irq_data *d)
{
- unsigned int irq_nr = irq - TXX9_IRQ_BASE;
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
@@ -80,9 +79,9 @@ static void txx9_irq_unmask(unsigned int irq)
#endif
}
-static inline void txx9_irq_mask(unsigned int irq)
+static inline void txx9_irq_mask(struct irq_data *d)
{
- unsigned int irq_nr = irq - TXX9_IRQ_BASE;
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
@@ -100,19 +99,19 @@ static inline void txx9_irq_mask(unsigned int irq)
#endif
}
-static void txx9_irq_mask_ack(unsigned int irq)
+static void txx9_irq_mask_ack(struct irq_data *d)
{
- unsigned int irq_nr = irq - TXX9_IRQ_BASE;
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
- txx9_irq_mask(irq);
+ txx9_irq_mask(d);
/* clear edge detection */
if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
__raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
}
-static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
- unsigned int irq_nr = irq - TXX9_IRQ_BASE;
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
u32 cr;
u32 __iomem *crp;
int ofs;
@@ -123,7 +122,7 @@ static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type)
switch (flow_type & IRQF_TRIGGER_MASK) {
case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break;
case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break;
- case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
+ case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break;
default:
return -EINVAL;
@@ -140,11 +139,11 @@ static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type)
static struct irq_chip txx9_irq_chip = {
.name = "TXX9",
- .ack = txx9_irq_mask_ack,
- .mask = txx9_irq_mask,
- .mask_ack = txx9_irq_mask_ack,
- .unmask = txx9_irq_unmask,
- .set_type = txx9_irq_set_type,
+ .irq_ack = txx9_irq_mask_ack,
+ .irq_mask = txx9_irq_mask,
+ .irq_mask_ack = txx9_irq_mask_ack,
+ .irq_unmask = txx9_irq_unmask,
+ .irq_set_type = txx9_irq_set_type,
};
void __init txx9_irq_init(unsigned long baseaddr)
@@ -155,8 +154,8 @@ void __init txx9_irq_init(unsigned long baseaddr)
for (i = 0; i < TXx9_MAX_IR; i++) {
txx9irq[i].level = 4; /* middle level */
txx9irq[i].mode = TXx9_IRCR_LOW;
- set_irq_chip_and_handler(TXX9_IRQ_BASE + i,
- &txx9_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
+ handle_level_irq);
}
/* mask all IRC interrupts */
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
new file mode 100644
index 00000000000..6001610cfe5
--- /dev/null
+++ b/arch/mips/kernel/jump_label.c
@@ -0,0 +1,54 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2010 Cavium Networks, Inc.
+ */
+
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/inst.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+#define J_RANGE_MASK ((1ul << 28) - 1)
+
+void arch_jump_label_transform(struct jump_entry *e,
+ enum jump_label_type type)
+{
+ union mips_instruction insn;
+ union mips_instruction *insn_p =
+ (union mips_instruction *)(unsigned long)e->code;
+
+ /* Jump only works within a 256MB aligned region. */
+ BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK));
+
+ /* Target must have 4 byte alignment. */
+ BUG_ON((e->target & 3) != 0);
+
+ if (type == JUMP_LABEL_ENABLE) {
+ insn.j_format.opcode = j_op;
+ insn.j_format.target = (e->target & J_RANGE_MASK) >> 2;
+ } else {
+ insn.word = 0; /* nop */
+ }
+
+ get_online_cpus();
+ mutex_lock(&text_mutex);
+ *insn_p = insn;
+
+ flush_icache_range((unsigned long)insn_p,
+ (unsigned long)insn_p + sizeof(*insn_p));
+
+ mutex_unlock(&text_mutex);
+ put_online_cpus();
+}
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
new file mode 100644
index 00000000000..7afcc2f22c0
--- /dev/null
+++ b/arch/mips/kernel/kgdb.c
@@ -0,0 +1,409 @@
+/*
+ * Originally written by Glenn Engel, Lake Stevens Instrument Division
+ *
+ * Contributed by HP Systems
+ *
+ * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
+ * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
+ *
+ * Copyright (C) 1995 Andreas Busse
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * Copyright (C) 2004-2005 MontaVista Software Inc.
+ * Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com
+ *
+ * Copyright (C) 2007-2008 Wind River Systems, Inc.
+ * Author/Maintainer: Jason Wessel, jason.wessel@windriver.com
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/ptrace.h> /* for linux pt_regs struct */
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <asm/inst.h>
+#include <asm/fpu.h>
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/sigcontext.h>
+#include <asm/uaccess.h>
+
+static struct hard_trap_info {
+ unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
+ unsigned char signo; /* Signal that we map this trap into */
+} hard_trap_info[] = {
+ { 6, SIGBUS }, /* instruction bus error */
+ { 7, SIGBUS }, /* data bus error */
+ { 9, SIGTRAP }, /* break */
+/* { 11, SIGILL }, */ /* CPU unusable */
+ { 12, SIGFPE }, /* overflow */
+ { 13, SIGTRAP }, /* trap */
+ { 14, SIGSEGV }, /* virtual instruction cache coherency */
+ { 15, SIGFPE }, /* floating point exception */
+ { 23, SIGSEGV }, /* watch */
+ { 31, SIGSEGV }, /* virtual data cache coherency */
+ { 0, 0} /* Must be last */
+};
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
+{
+ { "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
+ { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
+ { "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
+ { "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
+ { "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
+ { "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
+ { "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
+ { "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
+ { "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
+ { "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
+ { "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
+ { "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
+ { "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
+ { "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
+ { "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
+ { "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
+ { "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
+ { "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
+ { "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
+ { "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
+ { "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
+ { "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
+ { "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
+ { "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
+ { "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
+ { "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
+ { "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
+ { "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
+ { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
+ { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
+ { "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
+ { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
+ { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
+ { "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
+ { "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
+ { "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
+ { "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
+ { "f0", GDB_SIZEOF_REG, 0 },
+ { "f1", GDB_SIZEOF_REG, 1 },
+ { "f2", GDB_SIZEOF_REG, 2 },
+ { "f3", GDB_SIZEOF_REG, 3 },
+ { "f4", GDB_SIZEOF_REG, 4 },
+ { "f5", GDB_SIZEOF_REG, 5 },
+ { "f6", GDB_SIZEOF_REG, 6 },
+ { "f7", GDB_SIZEOF_REG, 7 },
+ { "f8", GDB_SIZEOF_REG, 8 },
+ { "f9", GDB_SIZEOF_REG, 9 },
+ { "f10", GDB_SIZEOF_REG, 10 },
+ { "f11", GDB_SIZEOF_REG, 11 },
+ { "f12", GDB_SIZEOF_REG, 12 },
+ { "f13", GDB_SIZEOF_REG, 13 },
+ { "f14", GDB_SIZEOF_REG, 14 },
+ { "f15", GDB_SIZEOF_REG, 15 },
+ { "f16", GDB_SIZEOF_REG, 16 },
+ { "f17", GDB_SIZEOF_REG, 17 },
+ { "f18", GDB_SIZEOF_REG, 18 },
+ { "f19", GDB_SIZEOF_REG, 19 },
+ { "f20", GDB_SIZEOF_REG, 20 },
+ { "f21", GDB_SIZEOF_REG, 21 },
+ { "f22", GDB_SIZEOF_REG, 22 },
+ { "f23", GDB_SIZEOF_REG, 23 },
+ { "f24", GDB_SIZEOF_REG, 24 },
+ { "f25", GDB_SIZEOF_REG, 25 },
+ { "f26", GDB_SIZEOF_REG, 26 },
+ { "f27", GDB_SIZEOF_REG, 27 },
+ { "f28", GDB_SIZEOF_REG, 28 },
+ { "f29", GDB_SIZEOF_REG, 29 },
+ { "f30", GDB_SIZEOF_REG, 30 },
+ { "f31", GDB_SIZEOF_REG, 31 },
+ { "fsr", GDB_SIZEOF_REG, 0 },
+ { "fir", GDB_SIZEOF_REG, 0 },
+};
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ return 0;
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy((void *)&current->thread.fpu.fcr31, mem,
+ dbg_reg_def[regno].size);
+ goto out_save;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ goto out_save;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy((void *)&current->thread.fpu.fpr[fp_reg], mem,
+ dbg_reg_def[regno].size);
+out_save:
+ restore_fp(current);
+ }
+
+ return 0;
+}
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ /* First 38 registers */
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ goto out;
+ save_fp(current);
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy(mem, (void *)&current->thread.fpu.fcr31,
+ dbg_reg_def[regno].size);
+ goto out;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ memset(mem, 0, dbg_reg_def[regno].size);
+ goto out;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy(mem, (void *)&current->thread.fpu.fpr[fp_reg],
+ dbg_reg_def[regno].size);
+ }
+
+out:
+ return dbg_reg_def[regno].name;
+
+}
+
+void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__(
+ ".globl breakinst\n\t"
+ ".set\tnoreorder\n\t"
+ "nop\n"
+ "breakinst:\tbreak\n\t"
+ "nop\n\t"
+ ".set\treorder");
+}
+
+static void kgdb_call_nmi_hook(void *ignored)
+{
+ mm_segment_t old_fs;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+
+ kgdb_nmicallback(raw_smp_processor_id(), NULL);
+
+ set_fs(old_fs);
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+ local_irq_enable();
+ smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+ local_irq_disable();
+}
+
+static int compute_signal(int tt)
+{
+ struct hard_trap_info *ht;
+
+ for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
+ if (ht->tt == tt)
+ return ht->signo;
+
+ return SIGHUP; /* default for things we don't know about */
+}
+
+/*
+ * Similar to regs_to_gdb_regs() except that process is sleeping and so
+ * we may not be able to get all the info.
+ */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ int reg;
+ struct thread_info *ti = task_thread_info(p);
+ unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
+ struct pt_regs *regs = (struct pt_regs *)ksp - 1;
+#if (KGDB_GDB_REG_SIZE == 32)
+ u32 *ptr = (u32 *)gdb_regs;
+#else
+ u64 *ptr = (u64 *)gdb_regs;
+#endif
+
+ for (reg = 0; reg < 16; reg++)
+ *(ptr++) = regs->regs[reg];
+
+ /* S0 - S7 */
+ for (reg = 16; reg < 24; reg++)
+ *(ptr++) = regs->regs[reg];
+
+ for (reg = 24; reg < 28; reg++)
+ *(ptr++) = 0;
+
+ /* GP, SP, FP, RA */
+ for (reg = 28; reg < 32; reg++)
+ *(ptr++) = regs->regs[reg];
+
+ *(ptr++) = regs->cp0_status;
+ *(ptr++) = regs->lo;
+ *(ptr++) = regs->hi;
+ *(ptr++) = regs->cp0_badvaddr;
+ *(ptr++) = regs->cp0_cause;
+ *(ptr++) = regs->cp0_epc;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->cp0_epc = pc;
+}
+
+/*
+ * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
+ * then try to fall into the debugger
+ */
+static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
+ void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ int trap = (regs->cp0_cause & 0x7c) >> 2;
+ mm_segment_t old_fs;
+
+#ifdef CONFIG_KPROBES
+ /*
+ * Return immediately if the kprobes fault notifier has set
+ * DIE_PAGE_FAULT.
+ */
+ if (cmd == DIE_PAGE_FAULT)
+ return NOTIFY_DONE;
+#endif /* CONFIG_KPROBES */
+
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+
+ /* Kernel mode. Set correct address limit */
+ old_fs = get_fs();
+ set_fs(get_ds());
+
+ if (atomic_read(&kgdb_active) != -1)
+ kgdb_nmicallback(smp_processor_id(), regs);
+
+ if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
+ set_fs(old_fs);
+ return NOTIFY_DONE;
+ }
+
+ if (atomic_read(&kgdb_setting_breakpoint))
+ if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
+ regs->cp0_epc += 4;
+
+ /* In SMP mode, __flush_cache_all does IPI */
+ local_irq_enable();
+ __flush_cache_all();
+
+ set_fs(old_fs);
+ return NOTIFY_STOP;
+}
+
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ return kgdb_mips_notify(NULL, cmd, &args);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_mips_notify,
+};
+
+/*
+ * Handle the 'c' command
+ */
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ char *ptr;
+ unsigned long address;
+
+ switch (remcom_in_buffer[0]) {
+ case 'c':
+ /* handle the optional parameter */
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &address))
+ regs->cp0_epc = address;
+
+ return 0;
+ }
+
+ return -1;
+}
+
+struct kgdb_arch arch_kgdb_ops;
+
+/*
+ * We use kgdb_early_setup so that functions we need to call now don't
+ * cause trouble when called again later.
+ */
+int kgdb_arch_init(void)
+{
+ union mips_instruction insn = {
+ .r_format = {
+ .opcode = spec_op,
+ .func = break_op,
+ }
+ };
+ memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
+
+ register_die_notifier(&kgdb_notifier);
+
+ return 0;
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
new file mode 100644
index 00000000000..1f8187ab099
--- /dev/null
+++ b/arch/mips/kernel/kprobes.c
@@ -0,0 +1,679 @@
+/*
+ * Kernel Probes (KProbes)
+ * arch/mips/kernel/kprobes.c
+ *
+ * Copyright 2006 Sony Corp.
+ * Copyright 2010 Cavium Networks
+ *
+ * Some portions copied from the powerpc version.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kprobes.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+#include <linux/slab.h>
+
+#include <asm/ptrace.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/inst.h>
+
+static const union mips_instruction breakpoint_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_BP,
+ .func = break_op
+ }
+};
+
+static const union mips_instruction breakpoint2_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_SSTEPBP,
+ .func = break_op
+ }
+};
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static int __kprobes insn_has_delayslot(union mips_instruction insn)
+{
+ switch (insn.i_format.opcode) {
+
+ /*
+ * This group contains:
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jr_op:
+ case jalr_op:
+ break;
+ default:
+ goto insn_ok;
+ }
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ case j_op:
+
+ /*
+ * These are conditional and in i_format.
+ */
+ case beq_op:
+ case beql_op:
+ case bne_op:
+ case bnel_op:
+ case blez_op:
+ case blezl_op:
+ case bgtz_op:
+ case bgtzl_op:
+
+ /*
+ * These are the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ case ldc2_op: /* This is bbit032 on Octeon */
+ case swc2_op: /* This is bbit1 on Octeon */
+ case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+ return 1;
+ default:
+ break;
+ }
+insn_ok:
+ return 0;
+}
+
+/*
+ * insn_has_ll_or_sc function checks whether instruction is ll or sc
+ * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
+ * so we need to prevent it and refuse kprobes insertion for such
+ * instructions; cannot do much about breakpoint in the middle of
+ * ll/sc pair; it is upto user to avoid those places
+ */
+static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
+{
+ int ret = 0;
+
+ switch (insn.i_format.opcode) {
+ case ll_op:
+ case lld_op:
+ case sc_op:
+ case scd_op:
+ ret = 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ union mips_instruction insn;
+ union mips_instruction prev_insn;
+ int ret = 0;
+
+ insn = p->addr[0];
+
+ if (insn_has_ll_or_sc(insn)) {
+ pr_notice("Kprobes for ll and sc instructions are not"
+ "supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if ((probe_kernel_read(&prev_insn, p->addr - 1,
+ sizeof(mips_instruction)) == 0) &&
+ insn_has_delayslot(prev_insn)) {
+ pr_notice("Kprobes for branch delayslot are not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* insn: must be on special executable page on mips. */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * In the kprobe->ainsn.insn[] array we store the original
+ * instruction at index zero and a break trap instruction at
+ * index one.
+ *
+ * On MIPS arch if the instruction at probed address is a
+ * branch instruction, we need to execute the instruction at
+ * Branch Delayslot (BD) at the time of probe hit. As MIPS also
+ * doesn't have single stepping support, the BD instruction can
+ * not be executed in-line and it would be executed on SSOL slot
+ * using a normal breakpoint instruction in the next slot.
+ * So, read the instruction and save it for later execution.
+ */
+ if (insn_has_delayslot(insn))
+ memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
+ else
+ memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
+
+ p->ainsn.insn[1] = breakpoint2_insn;
+ p->opcode = *p->addr;
+
+out:
+ return ret;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = breakpoint_insn;
+ flush_insn_slot(p);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_insn_slot(p);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+}
+
+static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
+ kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
+ kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
+}
+
+static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
+ kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
+ kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
+}
+
+static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = p;
+ kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
+ kcb->kprobe_saved_epc = regs->cp0_epc;
+}
+
+/**
+ * evaluate_branch_instrucion -
+ *
+ * Evaluate the branch instruction at probed address during probe hit. The
+ * result of evaluation would be the updated epc. The insturction in delayslot
+ * would actually be single stepped using a normal breakpoint) on SSOL slot.
+ *
+ * The result is also saved in the kprobe control block for later use,
+ * in case we need to execute the delayslot instruction. The latter will be
+ * false for NOP instruction in dealyslot and the branch-likely instructions
+ * when the branch is taken. And for those cases we set a flag as
+ * SKIP_DELAYSLOT in the kprobe control block
+ */
+static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ union mips_instruction insn = p->opcode;
+ long epc;
+ int ret = 0;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ if (p->ainsn.insn->word == 0)
+ kcb->flags |= SKIP_DELAYSLOT;
+ else
+ kcb->flags &= ~SKIP_DELAYSLOT;
+
+ ret = __compute_return_epc_for_insn(regs, insn);
+ if (ret < 0)
+ return ret;
+
+ if (ret == BRANCH_LIKELY_TAKEN)
+ kcb->flags |= SKIP_DELAYSLOT;
+
+ kcb->target_epc = regs->cp0_epc;
+
+ return 0;
+
+unaligned:
+ pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+ force_sig(SIGBUS, current);
+ return -EFAULT;
+
+}
+
+static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ int ret = 0;
+
+ regs->cp0_status &= ~ST0_IE;
+
+ /* single step inline if the instruction is a break */
+ if (p->opcode.word == breakpoint_insn.word ||
+ p->opcode.word == breakpoint2_insn.word)
+ regs->cp0_epc = (unsigned long)p->addr;
+ else if (insn_has_delayslot(p->opcode)) {
+ ret = evaluate_branch_instruction(p, regs, kcb);
+ if (ret < 0) {
+ pr_notice("Kprobes: Error in evaluating branch\n");
+ return;
+ }
+ }
+ regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "break 0"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap. In case of branch instructions, the target
+ * epc to be restored.
+ */
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ if (insn_has_delayslot(p->opcode))
+ regs->cp0_epc = kcb->target_epc;
+ else {
+ unsigned long orig_epc = kcb->kprobe_saved_epc;
+ regs->cp0_epc = orig_epc + 4;
+ }
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ int ret = 0;
+ kprobe_opcode_t *addr;
+ struct kprobe_ctlblk *kcb;
+
+ addr = (kprobe_opcode_t *) regs->cp0_epc;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ p->ainsn.insn->word == breakpoint_insn.word) {
+ regs->cp0_status &= ~ST0_IE;
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+ goto no_kprobe;
+ }
+ /*
+ * We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_REENTER;
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ resume_execution(p, regs, kcb);
+ restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
+ }
+ return 1;
+ } else {
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
+ goto no_kprobe;
+ }
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs))
+ goto ss_probe;
+ }
+ goto no_kprobe;
+ }
+
+ p = get_kprobe(addr);
+ if (!p) {
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ */
+ ret = 1;
+ }
+ /* Not one of ours: let kernel handle it */
+ goto no_kprobe;
+ }
+
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ /* handler has already set things up, so skip ss setup */
+ return 1;
+ }
+
+ss_probe:
+ prepare_singlestep(p, regs, kcb);
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (p->post_handler)
+ p->post_handler(p, regs, 0);
+ resume_execution(p, regs, kcb);
+ preempt_enable_no_resched();
+ } else
+ kcb->kprobe_status = KPROBE_HIT_SS;
+
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+
+}
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ resume_execution(cur, regs, kcb);
+
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+
+ /* Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+ reset_current_kprobe();
+out:
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs, kcb);
+ regs->cp0_status |= kcb->kprobe_old_SR;
+
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+ return 0;
+}
+
+/*
+ * Wrapper routine for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+
+ struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BREAK:
+ if (kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_SSTEPBP:
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+
+ case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+
+ if (kprobe_running()
+ && kprobe_fault_handler(args->regs, args->trapnr))
+ ret = NOTIFY_STOP;
+ preempt_enable();
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ kcb->jprobe_saved_regs = *regs;
+ kcb->jprobe_saved_sp = regs->regs[29];
+
+ memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
+ MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+
+ regs->cp0_epc = (unsigned long)(jp->entry);
+
+ return 1;
+}
+
+/* Defined in the inline asm below. */
+void jprobe_return_end(void);
+
+void __kprobes jprobe_return(void)
+{
+ /* Assembler quirk necessitates this '0,code' business. */
+ asm volatile(
+ "break 0,%0\n\t"
+ ".globl jprobe_return_end\n"
+ "jprobe_return_end:\n"
+ : : "n" (BRK_KPROBE_BP) : "memory");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (regs->cp0_epc >= (unsigned long)jprobe_return &&
+ regs->cp0_epc <= (unsigned long)jprobe_return_end) {
+ *regs = kcb->jprobe_saved_regs;
+ memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
+ MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
+ preempt_enable_no_resched();
+
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Function return probe trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe causes the
+ * handlers to fire
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+ asm volatile(
+ ".set push\n\t"
+ /* Keep the assembler from reordering and placing JR here. */
+ ".set noreorder\n\t"
+ "nop\n\t"
+ ".global kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n\t"
+ "nop\n\t"
+ ".set pop"
+ : : : "memory");
+}
+
+void kretprobe_trampoline(void);
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
+
+ /* Replace the return addr with trampoline addr */
+ regs->regs[31] = (unsigned long)kretprobe_trampoline;
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more than one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ instruction_pointer(regs) = orig_ret_address;
+
+ reset_current_kprobe();
+ kretprobe_hash_unlock(current, &flags);
+ preempt_enable_no_resched();
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
+ return 1;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
deleted file mode 100644
index 998c4efcce8..00000000000
--- a/arch/mips/kernel/kspd.c
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/unistd.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/syscalls.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-
-#include <asm/vpe.h>
-#include <asm/rtlx.h>
-#include <asm/kspd.h>
-
-static struct workqueue_struct *workqueue = NULL;
-static struct work_struct work;
-
-extern unsigned long cpu_khz;
-
-struct mtsp_syscall {
- int cmd;
- unsigned char abi;
- unsigned char size;
-};
-
-struct mtsp_syscall_ret {
- int retval;
- int errno;
-};
-
-struct mtsp_syscall_generic {
- int arg0;
- int arg1;
- int arg2;
- int arg3;
- int arg4;
- int arg5;
- int arg6;
-};
-
-static struct list_head kspd_notifylist;
-static int sp_stopping = 0;
-
-/* these should match with those in the SDE kit */
-#define MTSP_SYSCALL_BASE 0
-#define MTSP_SYSCALL_EXIT (MTSP_SYSCALL_BASE + 0)
-#define MTSP_SYSCALL_OPEN (MTSP_SYSCALL_BASE + 1)
-#define MTSP_SYSCALL_READ (MTSP_SYSCALL_BASE + 2)
-#define MTSP_SYSCALL_WRITE (MTSP_SYSCALL_BASE + 3)
-#define MTSP_SYSCALL_CLOSE (MTSP_SYSCALL_BASE + 4)
-#define MTSP_SYSCALL_LSEEK32 (MTSP_SYSCALL_BASE + 5)
-#define MTSP_SYSCALL_ISATTY (MTSP_SYSCALL_BASE + 6)
-#define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7)
-#define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8)
-#define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9)
-#define MTSP_SYSCALL_IOCTL (MTSP_SYSCALL_BASE + 10)
-
-#define MTSP_O_RDONLY 0x0000
-#define MTSP_O_WRONLY 0x0001
-#define MTSP_O_RDWR 0x0002
-#define MTSP_O_NONBLOCK 0x0004
-#define MTSP_O_APPEND 0x0008
-#define MTSP_O_SHLOCK 0x0010
-#define MTSP_O_EXLOCK 0x0020
-#define MTSP_O_ASYNC 0x0040
-#define MTSP_O_FSYNC O_SYNC
-#define MTSP_O_NOFOLLOW 0x0100
-#define MTSP_O_SYNC 0x0080
-#define MTSP_O_CREAT 0x0200
-#define MTSP_O_TRUNC 0x0400
-#define MTSP_O_EXCL 0x0800
-#define MTSP_O_BINARY 0x8000
-
-extern int tclimit;
-
-struct apsp_table {
- int sp;
- int ap;
-};
-
-/* we might want to do the mode flags too */
-struct apsp_table open_flags_table[] = {
- { MTSP_O_RDWR, O_RDWR },
- { MTSP_O_WRONLY, O_WRONLY },
- { MTSP_O_CREAT, O_CREAT },
- { MTSP_O_TRUNC, O_TRUNC },
- { MTSP_O_NONBLOCK, O_NONBLOCK },
- { MTSP_O_APPEND, O_APPEND },
- { MTSP_O_NOFOLLOW, O_NOFOLLOW }
-};
-
-struct apsp_table syscall_command_table[] = {
- { MTSP_SYSCALL_OPEN, __NR_open },
- { MTSP_SYSCALL_CLOSE, __NR_close },
- { MTSP_SYSCALL_READ, __NR_read },
- { MTSP_SYSCALL_WRITE, __NR_write },
- { MTSP_SYSCALL_LSEEK32, __NR_lseek },
- { MTSP_SYSCALL_IOCTL, __NR_ioctl }
-};
-
-static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
-{
- register long int _num __asm__("$2") = num;
- register long int _arg0 __asm__("$4") = arg0;
- register long int _arg1 __asm__("$5") = arg1;
- register long int _arg2 __asm__("$6") = arg2;
- register long int _arg3 __asm__("$7") = arg3;
-
- mm_segment_t old_fs;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- __asm__ __volatile__ (
- " syscall \n"
- : "=r" (_num), "=r" (_arg3)
- : "r" (_num), "r" (_arg0), "r" (_arg1), "r" (_arg2), "r" (_arg3));
-
- set_fs(old_fs);
-
- /* $a3 is error flag */
- if (_arg3)
- return -_num;
-
- return _num;
-}
-
-static int translate_syscall_command(int cmd)
-{
- int i;
- int ret = -1;
-
- for (i = 0; i < ARRAY_SIZE(syscall_command_table); i++) {
- if ((cmd == syscall_command_table[i].sp))
- return syscall_command_table[i].ap;
- }
-
- return ret;
-}
-
-static unsigned int translate_open_flags(int flags)
-{
- int i;
- unsigned int ret = 0;
-
- for (i = 0; i < ARRAY_SIZE(open_flags_table); i++) {
- if( (flags & open_flags_table[i].sp) ) {
- ret |= open_flags_table[i].ap;
- }
- }
-
- return ret;
-}
-
-
-static void sp_setfsuidgid( uid_t uid, gid_t gid)
-{
- current->fsuid = uid;
- current->fsgid = gid;
-
- key_fsuid_changed(current);
- key_fsgid_changed(current);
-}
-
-/*
- * Expects a request to be on the sysio channel. Reads it. Decides whether
- * its a linux syscall and runs it, or whatever. Puts the return code back
- * into the request and sends the whole thing back.
- */
-void sp_work_handle_request(void)
-{
- struct mtsp_syscall sc;
- struct mtsp_syscall_generic generic;
- struct mtsp_syscall_ret ret;
- struct kspd_notifications *n;
- unsigned long written;
- mm_segment_t old_fs;
- struct timeval tv;
- struct timezone tz;
- int cmd;
-
- char *vcwd;
- int size;
-
- ret.retval = -1;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall))) {
- set_fs(old_fs);
- printk(KERN_ERR "Expected request but nothing to read\n");
- return;
- }
-
- size = sc.size;
-
- if (size) {
- if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size)) {
- set_fs(old_fs);
- printk(KERN_ERR "Expected request but nothing to read\n");
- return;
- }
- }
-
- /* Run the syscall at the privilege of the user who loaded the
- SP program */
-
- if (vpe_getuid(tclimit))
- sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit));
-
- switch (sc.cmd) {
- /* needs the flags argument translating from SDE kit to
- linux */
- case MTSP_SYSCALL_PIPEFREQ:
- ret.retval = cpu_khz * 1000;
- ret.errno = 0;
- break;
-
- case MTSP_SYSCALL_GETTOD:
- memset(&tz, 0, sizeof(tz));
- if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
- (int)&tz, 0, 0)) == 0)
- ret.retval = tv.tv_sec;
- break;
-
- case MTSP_SYSCALL_EXIT:
- list_for_each_entry(n, &kspd_notifylist, list)
- n->kspd_sp_exit(tclimit);
- sp_stopping = 1;
-
- printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n",
- generic.arg0);
- break;
-
- case MTSP_SYSCALL_OPEN:
- generic.arg1 = translate_open_flags(generic.arg1);
-
- vcwd = vpe_getcwd(tclimit);
-
- /* change to the cwd of the process that loaded the SP program */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- sys_chdir(vcwd);
- set_fs(old_fs);
-
- sc.cmd = __NR_open;
-
- /* fall through */
-
- default:
- if ((sc.cmd >= __NR_Linux) &&
- (sc.cmd <= (__NR_Linux + __NR_Linux_syscalls)) )
- cmd = sc.cmd;
- else
- cmd = translate_syscall_command(sc.cmd);
-
- if (cmd >= 0) {
- ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1,
- generic.arg2, generic.arg3);
- } else
- printk(KERN_WARNING
- "KSPD: Unknown SP syscall number %d\n", sc.cmd);
- break;
- } /* switch */
-
- if (vpe_getuid(tclimit))
- sp_setfsuidgid( 0, 0);
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- written = rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(ret));
- set_fs(old_fs);
- if (written < sizeof(ret))
- printk("KSPD: sp_work_handle_request failed to send to SP\n");
-}
-
-static void sp_cleanup(void)
-{
- struct files_struct *files = current->files;
- int i, j;
- struct fdtable *fdt;
-
- j = 0;
-
- /*
- * It is safe to dereference the fd table without RCU or
- * ->file_lock
- */
- fdt = files_fdtable(files);
- for (;;) {
- unsigned long set;
- i = j * __NFDBITS;
- if (i >= fdt->max_fds)
- break;
- set = fdt->open_fds->fds_bits[j++];
- while (set) {
- if (set & 1) {
- struct file * file = xchg(&fdt->fd[i], NULL);
- if (file)
- filp_close(file, files);
- }
- i++;
- set >>= 1;
- }
- }
-}
-
-static int channel_open = 0;
-
-/* the work handler */
-static void sp_work(struct work_struct *unused)
-{
- if (!channel_open) {
- if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
- printk("KSPD: unable to open sp channel\n");
- sp_stopping = 1;
- } else {
- channel_open++;
- printk(KERN_DEBUG "KSPD: SP channel opened\n");
- }
- } else {
- /* wait for some data, allow it to sleep */
- rtlx_read_poll(RTLX_CHANNEL_SYSIO, 1);
-
- /* Check we haven't been woken because we are stopping */
- if (!sp_stopping)
- sp_work_handle_request();
- }
-
- if (!sp_stopping)
- queue_work(workqueue, &work);
- else
- sp_cleanup();
-}
-
-static void startwork(int vpe)
-{
- sp_stopping = channel_open = 0;
-
- if (workqueue == NULL) {
- if ((workqueue = create_singlethread_workqueue("kspd")) == NULL) {
- printk(KERN_ERR "unable to start kspd\n");
- return;
- }
-
- INIT_WORK(&work, sp_work);
- }
-
- queue_work(workqueue, &work);
-}
-
-static void stopwork(int vpe)
-{
- sp_stopping = 1;
-
- printk(KERN_DEBUG "KSPD: SP stopping\n");
-}
-
-void kspd_notify(struct kspd_notifications *notify)
-{
- list_add(&notify->list, &kspd_notifylist);
-}
-
-static struct vpe_notifications notify;
-static int kspd_module_init(void)
-{
- INIT_LIST_HEAD(&kspd_notifylist);
-
- notify.start = startwork;
- notify.stop = stopwork;
- vpe_notify(tclimit, &notify);
-
- return 0;
-}
-
-static void kspd_module_exit(void)
-{
-
-}
-
-module_init(kspd_module_init);
-module_exit(kspd_module_exit);
-
-MODULE_DESCRIPTION("MIPS KSPD");
-MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 65af3cc90ab..0b29646bcee 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -3,21 +3,17 @@
*
* Copyright (C) 2000 Silicon Graphics, Inc.
* Written by Ulf Carlsson (ulfc@engr.sgi.com)
- * sys32_execve from ia64/ia32 code, Feb 2000, Kanoj Sarcar (kanoj@sgi.com)
*/
#include <linux/compiler.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/file.h>
-#include <linux/smp_lock.h>
#include <linux/highuid.h>
-#include <linux/dirent.h>
#include <linux/resource.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/times.h>
#include <linux/poll.h>
-#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
#include <linux/shm.h>
@@ -36,6 +32,7 @@
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/ipc.h>
+#include <linux/slab.h>
#include <net/sock.h>
#include <net/scm.h>
@@ -64,109 +61,22 @@
#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
#endif
-/*
- * Revalidate the inode. This is required for proper NFS attribute caching.
- */
-
-int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
-{
- struct compat_stat tmp;
-
- if (!new_valid_dev(stat->dev) || !new_valid_dev(stat->rdev))
- return -EOVERFLOW;
-
- memset(&tmp, 0, sizeof(tmp));
- tmp.st_dev = new_encode_dev(stat->dev);
- tmp.st_ino = stat->ino;
- if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
- return -EOVERFLOW;
- tmp.st_mode = stat->mode;
- tmp.st_nlink = stat->nlink;
- SET_UID(tmp.st_uid, stat->uid);
- SET_GID(tmp.st_gid, stat->gid);
- tmp.st_rdev = new_encode_dev(stat->rdev);
- tmp.st_size = stat->size;
- tmp.st_atime = stat->atime.tv_sec;
- tmp.st_mtime = stat->mtime.tv_sec;
- tmp.st_ctime = stat->ctime.tv_sec;
-#ifdef STAT_HAVE_NSEC
- tmp.st_atime_nsec = stat->atime.tv_nsec;
- tmp.st_mtime_nsec = stat->mtime.tv_nsec;
- tmp.st_ctime_nsec = stat->ctime.tv_nsec;
-#endif
- tmp.st_blocks = stat->blocks;
- tmp.st_blksize = stat->blksize;
- return copy_to_user(statbuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
-}
-
-asmlinkage unsigned long
-sys32_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, unsigned long pgoff)
+SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long, fd,
+ unsigned long, pgoff)
{
- struct file * file = NULL;
unsigned long error;
error = -EINVAL;
if (pgoff & (~PAGE_MASK >> 12))
goto out;
- pgoff >>= PAGE_SHIFT-12;
-
- if (!(flags & MAP_ANONYMOUS)) {
- error = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
- }
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
-
-out:
- return error;
-}
-
-
-asmlinkage int sys_truncate64(const char __user *path, unsigned int high,
- unsigned int low)
-{
- if ((int)high < 0)
- return -EINVAL;
- return sys_truncate(path, ((long) high << 32) | low);
-}
-
-asmlinkage int sys_ftruncate64(unsigned int fd, unsigned int high,
- unsigned int low)
-{
- if ((int)high < 0)
- return -EINVAL;
- return sys_ftruncate(fd, ((long) high << 32) | low);
-}
-
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys32_execve(nabi_no_regargs struct pt_regs regs)
-{
- int error;
- char * filename;
-
- filename = getname(compat_ptr(regs.regs[4]));
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = compat_do_execve(filename, compat_ptr(regs.regs[5]),
- compat_ptr(regs.regs[6]), &regs);
- putname(filename);
-
+ error = sys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT-12));
out:
return error;
}
-#define RLIM_INFINITY32 0x7fffffff
+#define RLIM_INFINITY32 0x7fffffff
#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
struct rlimit32 {
@@ -174,354 +84,57 @@ struct rlimit32 {
int rlim_max;
};
-asmlinkage long sys32_truncate64(const char __user * path,
- unsigned long __dummy, int a2, int a3)
+SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
+ unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
{
return sys_truncate(path, merge_64(a2, a3));
}
-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy,
- int a2, int a3)
+SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy,
+ unsigned long, a2, unsigned long, a3)
{
return sys_ftruncate(fd, merge_64(a2, a3));
}
-static inline long
-get_tv32(struct timeval *o, struct compat_timeval __user *i)
-{
- return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
- (__get_user(o->tv_sec, &i->tv_sec) |
- __get_user(o->tv_usec, &i->tv_usec)));
-}
-
-static inline long
-put_tv32(struct compat_timeval __user *o, struct timeval *i)
-{
- return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
- (__put_user(i->tv_sec, &o->tv_sec) |
- __put_user(i->tv_usec, &o->tv_usec)));
-}
-
-extern struct timezone sys_tz;
-
-asmlinkage int
-sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
-{
- if (tv) {
- struct timeval ktv;
- do_gettimeofday(&ktv);
- if (put_tv32(tv, &ktv))
- return -EFAULT;
- }
- if (tz) {
- if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
- return -EFAULT;
- }
- return 0;
-}
-
-static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
-{
- long usec;
-
- if (!access_ok(VERIFY_READ, i, sizeof(*i)))
- return -EFAULT;
- if (__get_user(o->tv_sec, &i->tv_sec))
- return -EFAULT;
- if (__get_user(usec, &i->tv_usec))
- return -EFAULT;
- o->tv_nsec = usec * 1000;
- return 0;
-}
-
-asmlinkage int
-sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
-{
- struct timespec kts;
- struct timezone ktz;
-
- if (tv) {
- if (get_ts32(&kts, tv))
- return -EFAULT;
- }
- if (tz) {
- if (copy_from_user(&ktz, tz, sizeof(ktz)))
- return -EFAULT;
- }
-
- return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
-}
-
-asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high,
- unsigned int offset_low, loff_t __user * result,
- unsigned int origin)
+SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
+ unsigned int, offset_low, loff_t __user *, result,
+ unsigned int, origin)
{
return sys_llseek(fd, offset_high, offset_low, result, origin);
}
/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
lseek back to original location. They fail just like lseek does on
- non-seekable files. */
+ non-seekable files. */
-asmlinkage ssize_t sys32_pread(unsigned int fd, char __user * buf,
- size_t count, u32 unused, u64 a4, u64 a5)
+SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
+ unsigned long, unused, unsigned long, a4, unsigned long, a5)
{
return sys_pread64(fd, buf, count, merge_64(a4, a5));
}
-asmlinkage ssize_t sys32_pwrite(unsigned int fd, const char __user * buf,
- size_t count, u32 unused, u64 a4, u64 a5)
+SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf,
+ size_t, count, u32, unused, u64, a4, u64, a5)
{
return sys_pwrite64(fd, buf, count, merge_64(a4, a5));
}
-asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval)
+SYSCALL_DEFINE1(32_personality, unsigned long, personality)
{
- struct timespec t;
+ unsigned int p = personality & 0xffffffff;
int ret;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
- set_fs(old_fs);
- if (put_user (t.tv_sec, &interval->tv_sec) ||
- __put_user(t.tv_nsec, &interval->tv_nsec))
- return -EFAULT;
- return ret;
-}
-#ifdef CONFIG_SYSVIPC
-
-asmlinkage long
-sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
-{
- int version, err;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- switch (call) {
- case SEMOP:
- /* struct sembuf is the same on 32 and 64bit :)) */
- err = sys_semtimedop(first, compat_ptr(ptr), second, NULL);
- break;
- case SEMTIMEDOP:
- err = compat_sys_semtimedop(first, compat_ptr(ptr), second,
- compat_ptr(fifth));
- break;
- case SEMGET:
- err = sys_semget(first, second, third);
- break;
- case SEMCTL:
- err = compat_sys_semctl(first, second, third, compat_ptr(ptr));
- break;
- case MSGSND:
- err = compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
- break;
- case MSGRCV:
- err = compat_sys_msgrcv(first, second, fifth, third,
- version, compat_ptr(ptr));
- break;
- case MSGGET:
- err = sys_msgget((key_t) first, second);
- break;
- case MSGCTL:
- err = compat_sys_msgctl(first, second, compat_ptr(ptr));
- break;
- case SHMAT:
- err = compat_sys_shmat(first, second, third, version,
- compat_ptr(ptr));
- break;
- case SHMDT:
- err = sys_shmdt(compat_ptr(ptr));
- break;
- case SHMGET:
- err = sys_shmget(first, (unsigned)second, third);
- break;
- case SHMCTL:
- err = compat_sys_shmctl(first, second, compat_ptr(ptr));
- break;
- default:
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-#else
-
-asmlinkage long
-sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_SYSVIPC */
-
-#ifdef CONFIG_MIPS32_N32
-asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg)
-{
- /* compat_sys_semctl expects a pointer to union semun */
- u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
- if (put_user(arg, uptr))
- return -EFAULT;
- return compat_sys_semctl(semid, semnum, cmd, uptr);
-}
-
-asmlinkage long sysn32_msgsnd(int msqid, u32 msgp, unsigned msgsz, int msgflg)
-{
- return compat_sys_msgsnd(msqid, msgsz, msgflg, compat_ptr(msgp));
-}
-
-asmlinkage long sysn32_msgrcv(int msqid, u32 msgp, size_t msgsz, int msgtyp,
- int msgflg)
-{
- return compat_sys_msgrcv(msqid, msgsz, msgtyp, msgflg, IPC_64,
- compat_ptr(msgp));
-}
-#endif
-
-struct sysctl_args32
-{
- compat_caddr_t name;
- int nlen;
- compat_caddr_t oldval;
- compat_caddr_t oldlenp;
- compat_caddr_t newval;
- compat_size_t newlen;
- unsigned int __unused[4];
-};
-
-#ifdef CONFIG_SYSCTL_SYSCALL
-
-asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args)
-{
- struct sysctl_args32 tmp;
- int error;
- size_t oldlen;
- size_t __user *oldlenp = NULL;
- unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7;
-
- if (copy_from_user(&tmp, args, sizeof(tmp)))
- return -EFAULT;
-
- if (tmp.oldval && tmp.oldlenp) {
- /* Duh, this is ugly and might not work if sysctl_args
- is in read-only memory, but do_sysctl does indirectly
- a lot of uaccess in both directions and we'd have to
- basically copy the whole sysctl.c here, and
- glibc's __sysctl uses rw memory for the structure
- anyway. */
- if (get_user(oldlen, (u32 __user *)A(tmp.oldlenp)) ||
- put_user(oldlen, (size_t __user *)addr))
- return -EFAULT;
- oldlenp = (size_t __user *)addr;
- }
-
- lock_kernel();
- error = do_sysctl((int __user *)A(tmp.name), tmp.nlen, (void __user *)A(tmp.oldval),
- oldlenp, (void __user *)A(tmp.newval), tmp.newlen);
- unlock_kernel();
- if (oldlenp) {
- if (!error) {
- if (get_user(oldlen, (size_t __user *)addr) ||
- put_user(oldlen, (u32 __user *)A(tmp.oldlenp)))
- error = -EFAULT;
- }
- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
- }
- return error;
-}
-
-#endif /* CONFIG_SYSCTL_SYSCALL */
-
-asmlinkage long sys32_newuname(struct new_utsname __user * name)
-{
- int ret = 0;
-
- down_read(&uts_sem);
- if (copy_to_user(name, utsname(), sizeof *name))
- ret = -EFAULT;
- up_read(&uts_sem);
-
- if (current->personality == PER_LINUX32 && !ret)
- if (copy_to_user(name->machine, "mips\0\0\0", 8))
- ret = -EFAULT;
-
- return ret;
-}
-
-asmlinkage int sys32_personality(unsigned long personality)
-{
- int ret;
- personality &= 0xffffffff;
if (personality(current->personality) == PER_LINUX32 &&
- personality == PER_LINUX)
- personality = PER_LINUX32;
- ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
- return ret;
-}
-
-/* ustat compatibility */
-struct ustat32 {
- compat_daddr_t f_tfree;
- compat_ino_t f_tinode;
- char f_fname[6];
- char f_fpack[6];
-};
-
-extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
-
-asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32)
-{
- int err;
- struct ustat tmp;
- struct ustat32 tmp32;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- err = sys_ustat(dev, (struct ustat __user *)&tmp);
- set_fs(old_fs);
-
- if (err)
- goto out;
-
- memset(&tmp32, 0, sizeof(struct ustat32));
- tmp32.f_tfree = tmp.f_tfree;
- tmp32.f_tinode = tmp.f_tinode;
-
- err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0;
-
-out:
- return err;
-}
-
-asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
- s32 count)
-{
- mm_segment_t old_fs = get_fs();
- int ret;
- off_t of;
-
- if (offset && get_user(of, offset))
- return -EFAULT;
-
- set_fs(KERNEL_DS);
- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count);
- set_fs(old_fs);
-
- if (offset && put_user(of, offset))
- return -EFAULT;
-
+ personality(p) == PER_LINUX)
+ p = (p & ~PER_MASK) | PER_LINUX32;
+ ret = sys_personality(p);
+ if (ret != -1 && personality(ret) == PER_LINUX32)
+ ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}
asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
- size_t count)
+ size_t count)
{
return sys_readahead(fd, merge_64(a2, a3), count);
}
@@ -550,26 +163,5 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
unsigned offset_a3, unsigned len_a4, unsigned len_a5)
{
return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
- merge_64(len_a4, len_a5));
-}
-
-save_static_function(sys32_clone);
-static int noinline __used
-_sys32_clone(nabi_no_regargs struct pt_regs regs)
-{
- unsigned long clone_flags;
- unsigned long newsp;
- int __user *parent_tidptr, *child_tidptr;
-
- clone_flags = regs.regs[4];
- newsp = regs.regs[5];
- if (!newsp)
- newsp = regs.regs[29];
- parent_tidptr = (int __user *) regs.regs[6];
-
- /* Use __dummy4 instead of getting it off the stack, so that
- syscall() works. */
- child_tidptr = (int __user *) __dummy4;
- return do_fork(clone_flags, newsp, &regs, 0,
- parent_tidptr, child_tidptr);
+ merge_64(len_a4, len_a5));
}
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 85beb9b0b2d..992e18474da 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -5,7 +5,7 @@
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
-
+#include <linux/compiler.h>
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/delay.h>
@@ -19,9 +19,19 @@ extern const size_t relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
+int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+void (*_machine_kexec_shutdown)(void) = NULL;
+void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+#ifdef CONFIG_SMP
+void (*relocated_kexec_smp_wait) (void *);
+atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+#endif
+
int
machine_kexec_prepare(struct kimage *kimage)
{
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
return 0;
}
@@ -33,14 +43,20 @@ machine_kexec_cleanup(struct kimage *kimage)
void
machine_shutdown(void)
{
+ if (_machine_kexec_shutdown)
+ _machine_kexec_shutdown();
}
void
machine_crash_shutdown(struct pt_regs *regs)
{
+ if (_machine_crash_shutdown)
+ _machine_crash_shutdown(regs);
+ else
+ default_machine_crash_shutdown(regs);
}
-typedef void (*noretfun_t)(void) __attribute__((noreturn));
+typedef void (*noretfun_t)(void) __noreturn;
void
machine_kexec(struct kimage *image)
@@ -52,7 +68,9 @@ machine_kexec(struct kimage *image)
reboot_code_buffer =
(unsigned long)page_address(image->control_code_page);
- kexec_start_address = image->start;
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
+
kexec_indirection_page =
(unsigned long) phys_to_virt(image->head & PAGE_MASK);
@@ -63,7 +81,7 @@ machine_kexec(struct kimage *image)
* The generic kexec code builds a page list with physical
* addresses. they are directly accessible through KSEG0 (or
* CKSEG0 or XPHYS if on 64bit system), hence the
- * pys_to_virt() call.
+ * phys_to_virt() call.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
@@ -81,5 +99,12 @@ machine_kexec(struct kimage *image)
printk("Will call new kernel at %08lx\n", image->start);
printk("Bye ...\n");
__flush_cache_all();
+#ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer +
+ (void *)(kexec_smp_wait - relocate_new_kernel);
+ smp_wmb();
+ atomic_set(&kexec_ready_to_reboot, 1);
+#endif
((noretfun_t) reboot_code_buffer)();
}
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
new file mode 100644
index 00000000000..539b6294b61
--- /dev/null
+++ b/arch/mips/kernel/mcount.S
@@ -0,0 +1,202 @@
+/*
+ * MIPS specific _mcount support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive for
+ * more details.
+ *
+ * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China
+ * Copyright (C) 2010 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/ftrace.h>
+
+ .text
+ .set noreorder
+ .set noat
+
+ .macro MCOUNT_SAVE_REGS
+ PTR_SUBU sp, PT_SIZE
+ PTR_S ra, PT_R31(sp)
+ PTR_S AT, PT_R1(sp)
+ PTR_S a0, PT_R4(sp)
+ PTR_S a1, PT_R5(sp)
+ PTR_S a2, PT_R6(sp)
+ PTR_S a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ PTR_S a4, PT_R8(sp)
+ PTR_S a5, PT_R9(sp)
+ PTR_S a6, PT_R10(sp)
+ PTR_S a7, PT_R11(sp)
+#endif
+ .endm
+
+ .macro MCOUNT_RESTORE_REGS
+ PTR_L ra, PT_R31(sp)
+ PTR_L AT, PT_R1(sp)
+ PTR_L a0, PT_R4(sp)
+ PTR_L a1, PT_R5(sp)
+ PTR_L a2, PT_R6(sp)
+ PTR_L a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ PTR_L a4, PT_R8(sp)
+ PTR_L a5, PT_R9(sp)
+ PTR_L a6, PT_R10(sp)
+ PTR_L a7, PT_R11(sp)
+#endif
+ PTR_ADDIU sp, PT_SIZE
+ .endm
+
+ .macro RETURN_BACK
+ jr ra
+ move ra, AT
+ .endm
+
+/*
+ * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass
+ * the location of the parent's return address.
+ */
+#define MCOUNT_RA_ADDRESS_REG $12
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+NESTED(ftrace_caller, PT_SIZE, ra)
+ .globl _mcount
+_mcount:
+ b ftrace_stub
+#ifdef CONFIG_32BIT
+ addiu sp,sp,8
+#else
+ nop
+#endif
+
+ /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
+ lw t1, function_trace_stop
+ bnez t1, ftrace_stub
+ nop
+
+ MCOUNT_SAVE_REGS
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+ PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
+#endif
+
+ PTR_SUBU a0, ra, 8 /* arg1: self address */
+ .globl ftrace_call
+ftrace_call:
+ nop /* a placeholder for the call to a real tracing function */
+ move a1, AT /* arg2: parent's return address */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_call
+ftrace_graph_call:
+ nop
+ nop
+#endif
+
+ MCOUNT_RESTORE_REGS
+ .globl ftrace_stub
+ftrace_stub:
+ RETURN_BACK
+ END(ftrace_caller)
+
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+NESTED(_mcount, PT_SIZE, ra)
+ lw t1, function_trace_stop
+ bnez t1, ftrace_stub
+ nop
+ PTR_LA t1, ftrace_stub
+ PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
+ bne t1, t2, static_trace
+ nop
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ PTR_L t3, ftrace_graph_return
+ bne t1, t3, ftrace_graph_caller
+ nop
+ PTR_LA t1, ftrace_graph_entry_stub
+ PTR_L t3, ftrace_graph_entry
+ bne t1, t3, ftrace_graph_caller
+ nop
+#endif
+ b ftrace_stub
+ nop
+
+static_trace:
+ MCOUNT_SAVE_REGS
+
+ move a0, ra /* arg1: self return address */
+ jalr t2 /* (1) call *ftrace_trace_function */
+ move a1, AT /* arg2: parent's return address */
+
+ MCOUNT_RESTORE_REGS
+ .globl ftrace_stub
+ftrace_stub:
+ RETURN_BACK
+ END(_mcount)
+
+#endif /* ! CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+NESTED(ftrace_graph_caller, PT_SIZE, ra)
+#ifndef CONFIG_DYNAMIC_FTRACE
+ MCOUNT_SAVE_REGS
+#endif
+
+ /* arg1: Get the location of the parent's return address */
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a0, PT_R12(sp)
+#else
+ move a0, MCOUNT_RA_ADDRESS_REG
+#endif
+ bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */
+ nop
+#endif
+ PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */
+1:
+
+ /* arg2: Get self return address */
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a1, PT_R31(sp)
+#else
+ move a1, ra
+#endif
+
+ /* arg3: Get frame pointer of current stack */
+#ifdef CONFIG_64BIT
+ PTR_LA a2, PT_SIZE(sp)
+#else
+ PTR_LA a2, (PT_SIZE+8)(sp)
+#endif
+
+ jal prepare_ftrace_return
+ nop
+ MCOUNT_RESTORE_REGS
+ RETURN_BACK
+ END(ftrace_graph_caller)
+
+ .align 2
+ .globl return_to_handler
+return_to_handler:
+ PTR_SUBU sp, PT_SIZE
+ PTR_S v0, PT_R2(sp)
+
+ jal ftrace_return_to_handler
+ PTR_S v1, PT_R3(sp)
+
+ /* restore the real parent address: v0 -> ra */
+ move ra, v0
+
+ PTR_L v0, PT_R2(sp)
+ PTR_L v1, PT_R3(sp)
+ jr ra
+ PTR_ADDIU sp, PT_SIZE
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+ .set at
+ .set reorder
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
new file mode 100644
index 00000000000..f76f7a08412
--- /dev/null
+++ b/arch/mips/kernel/mips-cm.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/errno.h>
+
+#include <asm/mips-cm.h>
+#include <asm/mipsregs.h>
+
+void __iomem *mips_cm_base;
+void __iomem *mips_cm_l2sync_base;
+
+phys_t __mips_cm_phys_base(void)
+{
+ u32 config3 = read_c0_config3();
+ u32 cmgcr;
+
+ /* Check the CMGCRBase register is implemented */
+ if (!(config3 & MIPS_CONF3_CMGCR))
+ return 0;
+
+ /* Read the address from CMGCRBase */
+ cmgcr = read_c0_cmgcrbase();
+ return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
+}
+
+phys_t mips_cm_phys_base(void)
+ __attribute__((weak, alias("__mips_cm_phys_base")));
+
+phys_t __mips_cm_l2sync_phys_base(void)
+{
+ u32 base_reg;
+
+ /*
+ * If the L2-only sync region is already enabled then leave it at it's
+ * current location.
+ */
+ base_reg = read_gcr_l2_only_sync_base();
+ if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK)
+ return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK;
+
+ /* Default to following the CM */
+ return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
+}
+
+phys_t mips_cm_l2sync_phys_base(void)
+ __attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
+
+static void mips_cm_probe_l2sync(void)
+{
+ unsigned major_rev;
+ phys_t addr;
+
+ /* L2-only sync was introduced with CM major revision 6 */
+ major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >>
+ CM_GCR_REV_MAJOR_SHF;
+ if (major_rev < 6)
+ return;
+
+ /* Find a location for the L2 sync region */
+ addr = mips_cm_l2sync_phys_base();
+ BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr);
+ if (!addr)
+ return;
+
+ /* Set the region base address & enable it */
+ write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK);
+
+ /* Map the region */
+ mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE);
+}
+
+int mips_cm_probe(void)
+{
+ phys_t addr;
+ u32 base_reg;
+
+ addr = mips_cm_phys_base();
+ BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
+ if (!addr)
+ return -ENODEV;
+
+ mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
+ if (!mips_cm_base)
+ return -ENXIO;
+
+ /* sanity check that we're looking at a CM */
+ base_reg = read_gcr_base();
+ if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) {
+ pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
+ (unsigned long)addr);
+ mips_cm_base = NULL;
+ return -ENODEV;
+ }
+
+ /* set default target to memory */
+ base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK;
+ base_reg |= CM_GCR_BASE_CMDEFTGT_MEM;
+ write_gcr_base(base_reg);
+
+ /* disable CM regions */
+ write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+ write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+ write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+ write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
+ write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
+
+ /* probe for an L2-only sync region */
+ mips_cm_probe_l2sync();
+
+ return 0;
+}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
new file mode 100644
index 00000000000..ba473608a34
--- /dev/null
+++ b/arch/mips/kernel/mips-cpc.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+
+void __iomem *mips_cpc_base;
+
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
+
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+
+phys_t __weak mips_cpc_phys_base(void)
+{
+ u32 cpc_base;
+
+ if (!mips_cm_present())
+ return 0;
+
+ if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK))
+ return 0;
+
+ /* If the CPC is already enabled, leave it so */
+ cpc_base = read_gcr_cpc_base();
+ if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
+ return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
+
+ /* Otherwise, give it the default address & enable it */
+ cpc_base = mips_cpc_default_phys_base();
+ write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
+ return cpc_base;
+}
+
+int mips_cpc_probe(void)
+{
+ phys_t addr;
+ unsigned cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(cpc_core_lock, cpu));
+
+ addr = mips_cpc_phys_base();
+ if (!addr)
+ return -ENODEV;
+
+ mips_cpc_base = ioremap_nocache(addr, 0x8000);
+ if (!mips_cpc_base)
+ return -ENXIO;
+
+ return 0;
+}
+
+void mips_cpc_lock_other(unsigned int core)
+{
+ unsigned curr_core;
+ preempt_disable();
+ curr_core = current_cpu_data.core;
+ spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
+}
+
+void mips_cpc_unlock_other(void)
+{
+ unsigned curr_core = current_cpu_data.core;
+ spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ preempt_enable();
+}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index df4d3f2f740..362bb3707e6 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -1,8 +1,9 @@
/*
- * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
+ * General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/cpu.h>
+#include <linux/cpuset.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/kernel.h>
@@ -18,7 +19,7 @@
cpumask_t mt_fpu_cpumask;
static int fpaff_threshold = -1;
-unsigned long mt_fpemul_threshold = 0;
+unsigned long mt_fpemul_threshold;
/*
* Replacement functions for the sys_sched_setaffinity() and
@@ -26,12 +27,12 @@ unsigned long mt_fpemul_threshold = 0;
* FPU affinity with the user's requested processor affinity.
* This code is 98% identical with the sys_sched_setaffinity()
* and sys_sched_getaffinity() system calls, and should be
- * updated when kernel/sched.c changes.
+ * updated when kernel/sched/core.c changes.
*/
/*
* find_process_by_pid - find a process with a matching PID value.
- * used in sys_sched_set/getaffinity() in kernel/sched.c, so
+ * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so
* cloned here.
*/
static inline struct task_struct *find_process_by_pid(pid_t pid)
@@ -39,6 +40,21 @@ static inline struct task_struct *find_process_by_pid(pid_t pid)
return pid ? find_task_by_vpid(pid) : current;
}
+/*
+ * check the target process has a UID that matches the current process's
+ */
+static bool check_same_owner(struct task_struct *p)
+{
+ const struct cred *cred = current_cred(), *pcred;
+ bool match;
+
+ rcu_read_lock();
+ pcred = __task_cred(p);
+ match = (uid_eq(cred->euid, pcred->euid) ||
+ uid_eq(cred->euid, pcred->uid));
+ rcu_read_unlock();
+ return match;
+}
/*
* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
@@ -46,11 +62,10 @@ static inline struct task_struct *find_process_by_pid(pid_t pid)
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
- cpumask_t new_mask;
- cpumask_t effective_mask;
- int retval;
- struct task_struct *p;
+ cpumask_var_t cpus_allowed, new_mask, effective_mask;
struct thread_info *ti;
+ struct task_struct *p;
+ int retval;
if (len < sizeof(new_mask))
return -EINVAL;
@@ -59,52 +74,74 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
return -EFAULT;
get_online_cpus();
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_process_by_pid(pid);
if (!p) {
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
put_online_cpus();
return -ESRCH;
}
- /*
- * It is not safe to call set_cpus_allowed with the
- * tasklist_lock held. We will bump the task_struct's
- * usage count and drop tasklist_lock before invoking
- * set_cpus_allowed.
- */
+ /* Prevent p going away */
get_task_struct(p);
+ rcu_read_unlock();
+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_cpus_allowed;
+ }
+ if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_new_mask;
+ }
retval = -EPERM;
- if ((current->euid != p->euid) && (current->euid != p->uid) &&
- !capable(CAP_SYS_NICE)) {
- read_unlock(&tasklist_lock);
+ if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
goto out_unlock;
- }
- retval = security_task_setscheduler(p, 0, NULL);
+ retval = security_task_setscheduler(p);
if (retval)
goto out_unlock;
/* Record new user-specified CPU set for future reference */
- p->thread.user_cpus_allowed = new_mask;
-
- /* Unlock the task list */
- read_unlock(&tasklist_lock);
+ cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
+ again:
/* Compute new global allowed CPU set if necessary */
ti = task_thread_info(p);
if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
- cpus_intersects(new_mask, mt_fpu_cpumask)) {
- cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
- retval = set_cpus_allowed(p, effective_mask);
+ cpus_intersects(*new_mask, mt_fpu_cpumask)) {
+ cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
+ retval = set_cpus_allowed_ptr(p, effective_mask);
} else {
+ cpumask_copy(effective_mask, new_mask);
clear_ti_thread_flag(ti, TIF_FPUBOUND);
- retval = set_cpus_allowed(p, new_mask);
+ retval = set_cpus_allowed_ptr(p, new_mask);
}
+ if (!retval) {
+ cpuset_cpus_allowed(p, cpus_allowed);
+ if (!cpumask_subset(effective_mask, cpus_allowed)) {
+ /*
+ * We must have raced with a concurrent cpuset
+ * update. Just reset the cpus_allowed to the
+ * cpuset's cpus_allowed
+ */
+ cpumask_copy(new_mask, cpus_allowed);
+ goto again;
+ }
+ }
out_unlock:
+ free_cpumask_var(effective_mask);
+out_free_new_mask:
+ free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+ free_cpumask_var(cpus_allowed);
+out_put_task:
put_task_struct(p);
put_online_cpus();
return retval;
@@ -136,7 +173,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;
- cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
+ cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
out_unlock:
read_unlock(&tasklist_lock);
@@ -159,7 +196,7 @@ __setup("fpaff=", fpaff_thresh);
/*
* FPU Use Factor empirically derived from experiments on 34K
*/
-#define FPUSEFACTOR 333
+#define FPUSEFACTOR 2000
static __init int mt_fp_affinity_init(void)
{
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 640fb0cc6e3..88b1ef5f868 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -1,20 +1,18 @@
/*
- * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
+ * General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/device.h>
-#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/security.h>
#include <asm/cpu.h>
#include <asm/processor.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
+#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/mipsmtregs.h>
@@ -59,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl)
int tc;
unsigned long haltval;
unsigned long tcstatval;
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_soft_dump(void);
-#endif /* CONFIG_MIPT_MT_SMTC */
local_irq_save(flags);
vpflags = dvpe();
@@ -84,9 +79,9 @@ void mips_mt_regdump(unsigned long mvpctl)
read_vpe_c0_vpeconf0());
printk(" VPE%d.Status : %08lx\n",
i, read_vpe_c0_status());
- printk(" VPE%d.EPC : %08lx ",
- i, read_vpe_c0_epc());
- print_symbol("%s\n", read_vpe_c0_epc());
+ printk(" VPE%d.EPC : %08lx %pS\n",
+ i, read_vpe_c0_epc(),
+ (void *) read_vpe_c0_epc());
printk(" VPE%d.Cause : %08lx\n",
i, read_vpe_c0_cause());
printk(" VPE%d.Config7 : %08lx\n",
@@ -111,25 +106,22 @@ void mips_mt_regdump(unsigned long mvpctl)
}
printk(" TCStatus : %08lx\n", tcstatval);
printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
- printk(" TCRestart : %08lx ", read_tc_c0_tcrestart());
- print_symbol("%s\n", read_tc_c0_tcrestart());
+ printk(" TCRestart : %08lx %pS\n",
+ read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
printk(" TCHalt : %08lx\n", haltval);
printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
if (!haltval)
write_tc_c0_tchalt(0);
}
-#ifdef CONFIG_MIPS_MT_SMTC
- smtc_soft_dump();
-#endif /* CONFIG_MIPT_MT_SMTC */
printk("===========================\n");
evpe(vpflags);
local_irq_restore(flags);
}
-static int mt_opt_norps = 0;
+static int mt_opt_norps;
static int mt_opt_rpsctl = -1;
static int mt_opt_nblsu = -1;
-static int mt_opt_forceconfig7 = 0;
+static int mt_opt_forceconfig7;
static int mt_opt_config7 = -1;
static int __init rps_disable(char *s)
@@ -162,8 +154,8 @@ static int __init config7_set(char *str)
__setup("config7=", config7_set);
/* Experimental cache flush control parameters that should go away some day */
-int mt_protiflush = 0;
-int mt_protdflush = 0;
+int mt_protiflush;
+int mt_protdflush;
int mt_n_iflushes = 1;
int mt_n_dflushes = 1;
@@ -195,7 +187,7 @@ static int __init ndflush(char *s)
}
__setup("ndflush=", ndflush);
-static unsigned int itc_base = 0;
+static unsigned int itc_base;
static int __init set_itc_base(char *str)
{
@@ -211,7 +203,7 @@ void mips_mt_set_cpuoptions(void)
unsigned int nconfig7 = oconfig7;
if (mt_opt_norps) {
- printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
+ printk("\"norps\" option deprecated: use \"rpsctl=\"\n");
}
if (mt_opt_rpsctl >= 0) {
printk("34K return prediction stack override set to %d.\n",
@@ -297,21 +289,11 @@ void mips_mt_set_cpuoptions(void)
void mt_cflush_lockdown(void)
{
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_cflush_lockdown(void);
-
- smtc_cflush_lockdown();
-#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
void mt_cflush_release(void)
{
-#ifdef CONFIG_MIPS_MT_SMTC
- void smtc_cflush_release(void);
-
- smtc_cflush_release();
-#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 225755d0c1f..2607c3a4ff7 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -5,22 +5,31 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
+ * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle
* Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
*/
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/checksum.h>
-#include <asm/pgtable.h>
+#include <linux/mm.h>
#include <asm/uaccess.h>
+#include <asm/ftrace.h>
extern void *__bzero(void *__s, size_t __count);
+extern long __strncpy_from_kernel_nocheck_asm(char *__to,
+ const char *__from, long __len);
+extern long __strncpy_from_kernel_asm(char *__to, const char *__from,
+ long __len);
extern long __strncpy_from_user_nocheck_asm(char *__to,
- const char *__from, long __len);
+ const char *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char *__from,
- long __len);
+ long __len);
+extern long __strlen_kernel_nocheck_asm(const char *s);
+extern long __strlen_kernel_asm(const char *s);
extern long __strlen_user_nocheck_asm(const char *s);
extern long __strlen_user_asm(const char *s);
+extern long __strnlen_kernel_nocheck_asm(const char *s);
+extern long __strnlen_kernel_asm(const char *s);
extern long __strnlen_user_nocheck_asm(const char *s);
extern long __strnlen_user_asm(const char *s);
@@ -31,23 +40,45 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(kernel_thread);
+/*
+ * Functions that operate on entire pages. Mostly used by memory management.
+ */
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__copy_user_inatomic);
+#ifdef CONFIG_EVA
+EXPORT_SYMBOL(__copy_from_user_eva);
+EXPORT_SYMBOL(__copy_in_user_eva);
+EXPORT_SYMBOL(__copy_to_user_eva);
+EXPORT_SYMBOL(__copy_user_inatomic_eva);
+#endif
EXPORT_SYMBOL(__bzero);
+EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strncpy_from_kernel_asm);
EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
EXPORT_SYMBOL(__strncpy_from_user_asm);
+EXPORT_SYMBOL(__strlen_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strlen_kernel_asm);
EXPORT_SYMBOL(__strlen_user_nocheck_asm);
EXPORT_SYMBOL(__strlen_user_asm);
+EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strnlen_kernel_asm);
EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
EXPORT_SYMBOL(__strnlen_user_asm);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_partial_copy_user);
+EXPORT_SYMBOL(__csum_partial_copy_kernel);
+EXPORT_SYMBOL(__csum_partial_copy_to_user);
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
EXPORT_SYMBOL(invalid_pte_table);
+#ifdef CONFIG_FUNCTION_TRACER
+/* _mcount is defined in arch/mips/kernel/mcount.S */
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/mips/kernel/mips_machine.c b/arch/mips/kernel/mips_machine.c
new file mode 100644
index 00000000000..87609752969
--- /dev/null
+++ b/arch/mips/kernel/mips_machine.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <asm/mips_machine.h>
+#include <asm/prom.h>
+
+static struct mips_machine *mips_machine __initdata;
+
+#define for_each_machine(mach) \
+ for ((mach) = (struct mips_machine *)&__mips_machines_start; \
+ (mach) && \
+ (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
+ (mach)++)
+
+__init int mips_machtype_setup(char *id)
+{
+ struct mips_machine *mach;
+
+ for_each_machine(mach) {
+ if (mach->mach_id == NULL)
+ continue;
+
+ if (strcmp(mach->mach_id, id) == 0) {
+ mips_machtype = mach->mach_type;
+ return 0;
+ }
+ }
+
+ pr_err("MIPS: no machine found for id '%s', supported machines:\n", id);
+ pr_err("%-24s %s\n", "id", "name");
+ for_each_machine(mach)
+ pr_err("%-24s %s\n", mach->mach_id, mach->mach_name);
+
+ return 1;
+}
+
+__setup("machtype=", mips_machtype_setup);
+
+__init void mips_machine_setup(void)
+{
+ struct mips_machine *mach;
+
+ for_each_machine(mach) {
+ if (mips_machtype == mach->mach_type) {
+ mips_machine = mach;
+ break;
+ }
+ }
+
+ if (!mips_machine)
+ return;
+
+ mips_set_machine_name(mips_machine->mach_name);
+
+ if (mips_machine->mach_setup)
+ mips_machine->mach_setup();
+}
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
new file mode 100644
index 00000000000..2b70723071c
--- /dev/null
+++ b/arch/mips/kernel/module-rela.c
@@ -0,0 +1,145 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2001 Rusty Russell.
+ * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2005 Thiemo Seufer
+ */
+
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/moduleloader.h>
+
+extern int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v);
+
+static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *location = v;
+
+ return 0;
+}
+
+static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n",
+ me->name);
+ return -ENOEXEC;
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+ printk(KERN_ERR
+ "module %s: relocation overflow\n",
+ me->name);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
+
+ return 0;
+}
+
+static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) | (v & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *(Elf_Addr *)location = v;
+
+ return 0;
+}
+
+static int apply_r_mips_higher_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_highest_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
+
+ return 0;
+}
+
+static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
+ Elf_Addr v) = {
+ [R_MIPS_NONE] = apply_r_mips_none,
+ [R_MIPS_32] = apply_r_mips_32_rela,
+ [R_MIPS_26] = apply_r_mips_26_rela,
+ [R_MIPS_HI16] = apply_r_mips_hi16_rela,
+ [R_MIPS_LO16] = apply_r_mips_lo16_rela,
+ [R_MIPS_64] = apply_r_mips_64_rela,
+ [R_MIPS_HIGHER] = apply_r_mips_higher_rela,
+ [R_MIPS_HIGHEST] = apply_r_mips_highest_rela
+};
+
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+ Elf_Sym *sym;
+ u32 *location;
+ unsigned int i;
+ Elf_Addr v;
+ int res;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF_MIPS_R_SYM(rel[i]);
+ if (IS_ERR_VALUE(sym->st_value)) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+ printk(KERN_WARNING "%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+
+ v = sym->st_value + rel[i].r_addend;
+
+ res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index e7ed0ac4853..2a52568dbcd 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -22,13 +22,16 @@
#include <linux/moduleloader.h>
#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/jump_label.h>
+
#include <asm/pgtable.h> /* MODULE_START */
struct mips_hi16 {
@@ -37,47 +40,19 @@ struct mips_hi16 {
Elf_Addr value;
};
-static struct mips_hi16 *mips_hi16_list;
-
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
-void *module_alloc(unsigned long size)
-{
#ifdef MODULE_START
- struct vm_struct *area;
-
- size = PAGE_ALIGN(size);
- if (!size)
- return NULL;
-
- area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
- if (!area)
- return NULL;
-
- return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
-#else
- if (size == 0)
- return NULL;
- return vmalloc(size);
-#endif
-}
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
- char *secstrings, struct module *mod)
+void *module_alloc(unsigned long size)
{
- return 0;
+ return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+ GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
+ __builtin_return_address(0));
}
+#endif
-static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
+int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
return 0;
}
@@ -89,40 +64,14 @@ static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v)
return 0;
}
-static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *location = v;
-
- return 0;
-}
-
static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
{
if (v % 4) {
- printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
- return -ENOEXEC;
- }
-
- if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
+ pr_err("module %s: dangerous R_MIPS_26 REL relocation\n",
me->name);
return -ENOEXEC;
}
- *location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
-
- return 0;
-}
-
-static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- if (v % 4) {
- printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
- return -ENOEXEC;
- }
-
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
printk(KERN_ERR
"module %s: relocation overflow\n",
@@ -130,7 +79,8 @@ static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
return -ENOEXEC;
}
- *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
+ *location = (*location & ~0x03ffffff) |
+ ((*location + (v >> 2)) & 0x03ffffff);
return 0;
}
@@ -150,32 +100,34 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
n->addr = (Elf_Addr *)location;
n->value = v;
- n->next = mips_hi16_list;
- mips_hi16_list = n;
+ n->next = me->arch.r_mips_hi16_list;
+ me->arch.r_mips_hi16_list = n;
return 0;
}
-static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
+static void free_relocation_chain(struct mips_hi16 *l)
{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+ struct mips_hi16 *next;
- return 0;
+ while (l) {
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
}
static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
{
unsigned long insnlo = *location;
+ struct mips_hi16 *l;
Elf_Addr val, vallo;
- /* Sign extend the addend we extract from the lo insn. */
+ /* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
- if (mips_hi16_list != NULL) {
- struct mips_hi16 *l;
-
- l = mips_hi16_list;
+ if (me->arch.r_mips_hi16_list != NULL) {
+ l = me->arch.r_mips_hi16_list;
while (l != NULL) {
struct mips_hi16 *next;
unsigned long insn;
@@ -210,11 +162,11 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
l = next;
}
- mips_hi16_list = NULL;
+ me->arch.r_mips_hi16_list = NULL;
}
/*
- * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
@@ -223,41 +175,12 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
return 0;
out_danger:
- printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
-
- return -ENOEXEC;
-}
+ free_relocation_chain(l);
+ me->arch.r_mips_hi16_list = NULL;
-static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *location = (*location & 0xffff0000) | (v & 0xffff);
-
- return 0;
-}
-
-static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
-{
- *(Elf_Addr *)location = v;
+ pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
- return 0;
-}
-
-static int apply_r_mips_higher_rela(struct module *me, u32 *location,
- Elf_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
-
- return 0;
-}
-
-static int apply_r_mips_highest_rela(struct module *me, u32 *location,
- Elf_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
-
- return 0;
+ return -ENOEXEC;
}
static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
@@ -269,18 +192,6 @@ static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
[R_MIPS_LO16] = apply_r_mips_lo16_rel
};
-static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
- Elf_Addr v) = {
- [R_MIPS_NONE] = apply_r_mips_none,
- [R_MIPS_32] = apply_r_mips_32_rela,
- [R_MIPS_26] = apply_r_mips_26_rela,
- [R_MIPS_HI16] = apply_r_mips_hi16_rela,
- [R_MIPS_LO16] = apply_r_mips_lo16_rela,
- [R_MIPS_64] = apply_r_mips_64_rela,
- [R_MIPS_HIGHER] = apply_r_mips_higher_rela,
- [R_MIPS_HIGHEST] = apply_r_mips_highest_rela
-};
-
int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
@@ -295,6 +206,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
+ me->arch.r_mips_hi16_list = NULL;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -302,7 +214,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
- if (!sym->st_value) {
+ if (IS_ERR_VALUE(sym->st_value)) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
@@ -318,44 +230,17 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
return res;
}
- return 0;
-}
-
-int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
- unsigned int symindex, unsigned int relsec,
- struct module *me)
-{
- Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
- Elf_Sym *sym;
- u32 *location;
- unsigned int i;
- Elf_Addr v;
- int res;
-
- pr_debug("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
-
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- /* This is where to make the change */
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset;
- /* This is the symbol it is referring to */
- sym = (Elf_Sym *)sechdrs[symindex].sh_addr
- + ELF_MIPS_R_SYM(rel[i]);
- if (!sym->st_value) {
- /* Ignore unresolved weak symbol */
- if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
- continue;
- printk(KERN_WARNING "%s: Unknown symbol %s\n",
- me->name, strtab + sym->st_name);
- return -ENOENT;
- }
-
- v = sym->st_value + rel[i].r_addend;
+ /*
+ * Normally the hi16 list should be deallocated at this point. A
+ * malformed binary however could contain a series of R_MIPS_HI16
+ * relocations not followed by a R_MIPS_LO16 relocation. In that
+ * case, free up the list and return an error.
+ */
+ if (me->arch.r_mips_hi16_list) {
+ free_relocation_chain(me->arch.r_mips_hi16_list);
+ me->arch.r_mips_hi16_list = NULL;
- res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
- if (res)
- return res;
+ return -ENOEXEC;
}
return 0;
@@ -377,7 +262,7 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
spin_unlock_irqrestore(&dbe_lock, flags);
/* Now, if we found one, we are running inside it now, hence
- we cannot unload the module, hence no refcnt needed. */
+ we cannot unload the module, hence no refcnt needed. */
return e;
}
@@ -389,6 +274,9 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *s;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ /* Make jump label nops. */
+ jump_label_apply_nops(me);
+
INIT_LIST_HEAD(&me->arch.dbe_list);
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
new file mode 100644
index 00000000000..f6547680c81
--- /dev/null
+++ b/arch/mips/kernel/octeon_switch.S
@@ -0,0 +1,519 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1994, 1995, 1996, by Andreas Busse
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ * written by Carsten Langgaard, carstenl@mips.com
+ */
+
+#define USE_ALTERNATE_RESUME_IMPL 1
+ .set push
+ .set arch=mips64r2
+#include "r4k_switch.S"
+ .set pop
+/*
+ * task_struct *resume(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti, int usedfpu)
+ */
+ .align 7
+ LEAF(resume)
+ .set arch=octeon
+ mfc0 t1, CP0_STATUS
+ LONG_S t1, THREAD_STATUS(a0)
+ cpu_save_nonscratch a0
+ LONG_S ra, THREAD_REG31(a0)
+
+ /*
+ * check if we need to save FPU registers
+ */
+ PTR_L t3, TASK_THREAD_INFO(a0)
+ LONG_L t0, TI_FLAGS(t3)
+ li t1, _TIF_USEDFPU
+ and t2, t0, t1
+ beqz t2, 1f
+ nor t1, zero, t1
+
+ and t0, t0, t1
+ LONG_S t0, TI_FLAGS(t3)
+
+ /*
+ * clear saved user stack CU1 bit
+ */
+ LONG_L t0, ST_OFF(t3)
+ li t1, ~ST0_CU1
+ and t0, t0, t1
+ LONG_S t0, ST_OFF(t3)
+
+ .set push
+ .set arch=mips64r2
+ fpu_save_double a0 t0 t1 # c0_status passed in t0
+ # clobbers t1
+ .set pop
+1:
+
+ /* check if we need to save COP2 registers */
+ PTR_L t2, TASK_THREAD_INFO(a0)
+ LONG_L t0, ST_OFF(t2)
+ bbit0 t0, 30, 1f
+
+ /* Disable COP2 in the stored process state */
+ li t1, ST0_CU2
+ xor t0, t1
+ LONG_S t0, ST_OFF(t2)
+
+ /* Enable COP2 so we can save it */
+ mfc0 t0, CP0_STATUS
+ or t0, t1
+ mtc0 t0, CP0_STATUS
+
+ /* Save COP2 */
+ daddu a0, THREAD_CP2
+ jal octeon_cop2_save
+ dsubu a0, THREAD_CP2
+
+ /* Disable COP2 now that we are done */
+ mfc0 t0, CP0_STATUS
+ li t1, ST0_CU2
+ xor t0, t1
+ mtc0 t0, CP0_STATUS
+
+1:
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+ /* Check if we need to store CVMSEG state */
+ mfc0 t0, $11,7 /* CvmMemCtl */
+ bbit0 t0, 6, 3f /* Is user access enabled? */
+
+ /* Store the CVMSEG state */
+ /* Extract the size of CVMSEG */
+ andi t0, 0x3f
+ /* Multiply * (cache line size/sizeof(long)/2) */
+ sll t0, 7-LONGLOG-1
+ li t1, -32768 /* Base address of CVMSEG */
+ LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
+ synciobdma
+2:
+ .set noreorder
+ LONG_L t8, 0(t1) /* Load from CVMSEG */
+ subu t0, 1 /* Decrement loop var */
+ LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */
+ LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */
+ LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */
+ LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */
+ bnez t0, 2b /* Loop until we've copied it all */
+ LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
+ .set reorder
+
+ /* Disable access to CVMSEG */
+ mfc0 t0, $11,7 /* CvmMemCtl */
+ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */
+ mtc0 t0, $11,7 /* CvmMemCtl */
+#endif
+3:
+
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
+ /*
+ * The order of restoring the registers takes care of the race
+ * updating $28, $29 and kernelsp without disabling ints.
+ */
+ move $28, a2
+ cpu_restore_nonscratch a1
+
+ PTR_ADDU t0, $28, _THREAD_SIZE - 32
+ set_saved_sp t0, t1, t2
+
+ mfc0 t1, CP0_STATUS /* Do we really need this? */
+ li a3, 0xff01
+ and t1, a3
+ LONG_L a2, THREAD_STATUS(a1)
+ nor a3, $0, a3
+ and a2, a3
+ or a2, t1
+ mtc0 a2, CP0_STATUS
+ move v0, a0
+ jr ra
+ END(resume)
+
+/*
+ * void octeon_cop2_save(struct octeon_cop2_state *a0)
+ */
+ .align 7
+ LEAF(octeon_cop2_save)
+
+ dmfc0 t9, $9,7 /* CvmCtl register. */
+
+ /* Save the COP2 CRC state */
+ dmfc2 t0, 0x0201
+ dmfc2 t1, 0x0202
+ dmfc2 t2, 0x0200
+ sd t0, OCTEON_CP2_CRC_IV(a0)
+ sd t1, OCTEON_CP2_CRC_LENGTH(a0)
+ sd t2, OCTEON_CP2_CRC_POLY(a0)
+ /* Skip next instructions if CvmCtl[NODFA_CP2] set */
+ bbit1 t9, 28, 1f
+
+ /* Save the LLM state */
+ dmfc2 t0, 0x0402
+ dmfc2 t1, 0x040A
+ sd t0, OCTEON_CP2_LLM_DAT(a0)
+ sd t1, OCTEON_CP2_LLM_DAT+8(a0)
+
+1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
+
+ /* Save the COP2 crypto state */
+ /* this part is mostly common to both pass 1 and later revisions */
+ dmfc2 t0, 0x0084
+ dmfc2 t1, 0x0080
+ dmfc2 t2, 0x0081
+ dmfc2 t3, 0x0082
+ sd t0, OCTEON_CP2_3DES_IV(a0)
+ dmfc2 t0, 0x0088
+ sd t1, OCTEON_CP2_3DES_KEY(a0)
+ dmfc2 t1, 0x0111 /* only necessary for pass 1 */
+ sd t2, OCTEON_CP2_3DES_KEY+8(a0)
+ dmfc2 t2, 0x0102
+ sd t3, OCTEON_CP2_3DES_KEY+16(a0)
+ dmfc2 t3, 0x0103
+ sd t0, OCTEON_CP2_3DES_RESULT(a0)
+ dmfc2 t0, 0x0104
+ sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */
+ dmfc2 t1, 0x0105
+ sd t2, OCTEON_CP2_AES_IV(a0)
+ dmfc2 t2, 0x0106
+ sd t3, OCTEON_CP2_AES_IV+8(a0)
+ dmfc2 t3, 0x0107
+ sd t0, OCTEON_CP2_AES_KEY(a0)
+ dmfc2 t0, 0x0110
+ sd t1, OCTEON_CP2_AES_KEY+8(a0)
+ dmfc2 t1, 0x0100
+ sd t2, OCTEON_CP2_AES_KEY+16(a0)
+ dmfc2 t2, 0x0101
+ sd t3, OCTEON_CP2_AES_KEY+24(a0)
+ mfc0 t3, $15,0 /* Get the processor ID register */
+ sd t0, OCTEON_CP2_AES_KEYLEN(a0)
+ li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
+ sd t1, OCTEON_CP2_AES_RESULT(a0)
+ sd t2, OCTEON_CP2_AES_RESULT+8(a0)
+ /* Skip to the Pass1 version of the remainder of the COP2 state */
+ beq t3, t0, 2f
+
+ /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
+ dmfc2 t1, 0x0240
+ dmfc2 t2, 0x0241
+ dmfc2 t3, 0x0242
+ dmfc2 t0, 0x0243
+ sd t1, OCTEON_CP2_HSH_DATW(a0)
+ dmfc2 t1, 0x0244
+ sd t2, OCTEON_CP2_HSH_DATW+8(a0)
+ dmfc2 t2, 0x0245
+ sd t3, OCTEON_CP2_HSH_DATW+16(a0)
+ dmfc2 t3, 0x0246
+ sd t0, OCTEON_CP2_HSH_DATW+24(a0)
+ dmfc2 t0, 0x0247
+ sd t1, OCTEON_CP2_HSH_DATW+32(a0)
+ dmfc2 t1, 0x0248
+ sd t2, OCTEON_CP2_HSH_DATW+40(a0)
+ dmfc2 t2, 0x0249
+ sd t3, OCTEON_CP2_HSH_DATW+48(a0)
+ dmfc2 t3, 0x024A
+ sd t0, OCTEON_CP2_HSH_DATW+56(a0)
+ dmfc2 t0, 0x024B
+ sd t1, OCTEON_CP2_HSH_DATW+64(a0)
+ dmfc2 t1, 0x024C
+ sd t2, OCTEON_CP2_HSH_DATW+72(a0)
+ dmfc2 t2, 0x024D
+ sd t3, OCTEON_CP2_HSH_DATW+80(a0)
+ dmfc2 t3, 0x024E
+ sd t0, OCTEON_CP2_HSH_DATW+88(a0)
+ dmfc2 t0, 0x0250
+ sd t1, OCTEON_CP2_HSH_DATW+96(a0)
+ dmfc2 t1, 0x0251
+ sd t2, OCTEON_CP2_HSH_DATW+104(a0)
+ dmfc2 t2, 0x0252
+ sd t3, OCTEON_CP2_HSH_DATW+112(a0)
+ dmfc2 t3, 0x0253
+ sd t0, OCTEON_CP2_HSH_IVW(a0)
+ dmfc2 t0, 0x0254
+ sd t1, OCTEON_CP2_HSH_IVW+8(a0)
+ dmfc2 t1, 0x0255
+ sd t2, OCTEON_CP2_HSH_IVW+16(a0)
+ dmfc2 t2, 0x0256
+ sd t3, OCTEON_CP2_HSH_IVW+24(a0)
+ dmfc2 t3, 0x0257
+ sd t0, OCTEON_CP2_HSH_IVW+32(a0)
+ dmfc2 t0, 0x0258
+ sd t1, OCTEON_CP2_HSH_IVW+40(a0)
+ dmfc2 t1, 0x0259
+ sd t2, OCTEON_CP2_HSH_IVW+48(a0)
+ dmfc2 t2, 0x025E
+ sd t3, OCTEON_CP2_HSH_IVW+56(a0)
+ dmfc2 t3, 0x025A
+ sd t0, OCTEON_CP2_GFM_MULT(a0)
+ dmfc2 t0, 0x025B
+ sd t1, OCTEON_CP2_GFM_MULT+8(a0)
+ sd t2, OCTEON_CP2_GFM_POLY(a0)
+ sd t3, OCTEON_CP2_GFM_RESULT(a0)
+ sd t0, OCTEON_CP2_GFM_RESULT+8(a0)
+ jr ra
+
+2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
+ dmfc2 t3, 0x0040
+ dmfc2 t0, 0x0041
+ dmfc2 t1, 0x0042
+ dmfc2 t2, 0x0043
+ sd t3, OCTEON_CP2_HSH_DATW(a0)
+ dmfc2 t3, 0x0044
+ sd t0, OCTEON_CP2_HSH_DATW+8(a0)
+ dmfc2 t0, 0x0045
+ sd t1, OCTEON_CP2_HSH_DATW+16(a0)
+ dmfc2 t1, 0x0046
+ sd t2, OCTEON_CP2_HSH_DATW+24(a0)
+ dmfc2 t2, 0x0048
+ sd t3, OCTEON_CP2_HSH_DATW+32(a0)
+ dmfc2 t3, 0x0049
+ sd t0, OCTEON_CP2_HSH_DATW+40(a0)
+ dmfc2 t0, 0x004A
+ sd t1, OCTEON_CP2_HSH_DATW+48(a0)
+ sd t2, OCTEON_CP2_HSH_IVW(a0)
+ sd t3, OCTEON_CP2_HSH_IVW+8(a0)
+ sd t0, OCTEON_CP2_HSH_IVW+16(a0)
+
+3: /* pass 1 or CvmCtl[NOCRYPTO] set */
+ jr ra
+ END(octeon_cop2_save)
+
+/*
+ * void octeon_cop2_restore(struct octeon_cop2_state *a0)
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_cop2_restore)
+ /* First cache line was prefetched before the call */
+ pref 4, 128(a0)
+ dmfc0 t9, $9,7 /* CvmCtl register. */
+
+ pref 4, 256(a0)
+ ld t0, OCTEON_CP2_CRC_IV(a0)
+ pref 4, 384(a0)
+ ld t1, OCTEON_CP2_CRC_LENGTH(a0)
+ ld t2, OCTEON_CP2_CRC_POLY(a0)
+
+ /* Restore the COP2 CRC state */
+ dmtc2 t0, 0x0201
+ dmtc2 t1, 0x1202
+ bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */
+ dmtc2 t2, 0x4200
+
+ /* Restore the LLM state */
+ ld t0, OCTEON_CP2_LLM_DAT(a0)
+ ld t1, OCTEON_CP2_LLM_DAT+8(a0)
+ dmtc2 t0, 0x0402
+ dmtc2 t1, 0x040A
+
+2:
+ bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */
+ nop
+
+ /* Restore the COP2 crypto state common to pass 1 and pass 2 */
+ ld t0, OCTEON_CP2_3DES_IV(a0)
+ ld t1, OCTEON_CP2_3DES_KEY(a0)
+ ld t2, OCTEON_CP2_3DES_KEY+8(a0)
+ dmtc2 t0, 0x0084
+ ld t0, OCTEON_CP2_3DES_KEY+16(a0)
+ dmtc2 t1, 0x0080
+ ld t1, OCTEON_CP2_3DES_RESULT(a0)
+ dmtc2 t2, 0x0081
+ ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */
+ dmtc2 t0, 0x0082
+ ld t0, OCTEON_CP2_AES_IV(a0)
+ dmtc2 t1, 0x0098
+ ld t1, OCTEON_CP2_AES_IV+8(a0)
+ dmtc2 t2, 0x010A /* only really needed for pass 1 */
+ ld t2, OCTEON_CP2_AES_KEY(a0)
+ dmtc2 t0, 0x0102
+ ld t0, OCTEON_CP2_AES_KEY+8(a0)
+ dmtc2 t1, 0x0103
+ ld t1, OCTEON_CP2_AES_KEY+16(a0)
+ dmtc2 t2, 0x0104
+ ld t2, OCTEON_CP2_AES_KEY+24(a0)
+ dmtc2 t0, 0x0105
+ ld t0, OCTEON_CP2_AES_KEYLEN(a0)
+ dmtc2 t1, 0x0106
+ ld t1, OCTEON_CP2_AES_RESULT(a0)
+ dmtc2 t2, 0x0107
+ ld t2, OCTEON_CP2_AES_RESULT+8(a0)
+ mfc0 t3, $15,0 /* Get the processor ID register */
+ dmtc2 t0, 0x0110
+ li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
+ dmtc2 t1, 0x0100
+ bne t0, t3, 3f /* Skip the next stuff for non-pass1 */
+ dmtc2 t2, 0x0101
+
+ /* this code is specific for pass 1 */
+ ld t0, OCTEON_CP2_HSH_DATW(a0)
+ ld t1, OCTEON_CP2_HSH_DATW+8(a0)
+ ld t2, OCTEON_CP2_HSH_DATW+16(a0)
+ dmtc2 t0, 0x0040
+ ld t0, OCTEON_CP2_HSH_DATW+24(a0)
+ dmtc2 t1, 0x0041
+ ld t1, OCTEON_CP2_HSH_DATW+32(a0)
+ dmtc2 t2, 0x0042
+ ld t2, OCTEON_CP2_HSH_DATW+40(a0)
+ dmtc2 t0, 0x0043
+ ld t0, OCTEON_CP2_HSH_DATW+48(a0)
+ dmtc2 t1, 0x0044
+ ld t1, OCTEON_CP2_HSH_IVW(a0)
+ dmtc2 t2, 0x0045
+ ld t2, OCTEON_CP2_HSH_IVW+8(a0)
+ dmtc2 t0, 0x0046
+ ld t0, OCTEON_CP2_HSH_IVW+16(a0)
+ dmtc2 t1, 0x0048
+ dmtc2 t2, 0x0049
+ b done_restore /* unconditional branch */
+ dmtc2 t0, 0x004A
+
+3: /* this is post-pass1 code */
+ ld t2, OCTEON_CP2_HSH_DATW(a0)
+ ld t0, OCTEON_CP2_HSH_DATW+8(a0)
+ ld t1, OCTEON_CP2_HSH_DATW+16(a0)
+ dmtc2 t2, 0x0240
+ ld t2, OCTEON_CP2_HSH_DATW+24(a0)
+ dmtc2 t0, 0x0241
+ ld t0, OCTEON_CP2_HSH_DATW+32(a0)
+ dmtc2 t1, 0x0242
+ ld t1, OCTEON_CP2_HSH_DATW+40(a0)
+ dmtc2 t2, 0x0243
+ ld t2, OCTEON_CP2_HSH_DATW+48(a0)
+ dmtc2 t0, 0x0244
+ ld t0, OCTEON_CP2_HSH_DATW+56(a0)
+ dmtc2 t1, 0x0245
+ ld t1, OCTEON_CP2_HSH_DATW+64(a0)
+ dmtc2 t2, 0x0246
+ ld t2, OCTEON_CP2_HSH_DATW+72(a0)
+ dmtc2 t0, 0x0247
+ ld t0, OCTEON_CP2_HSH_DATW+80(a0)
+ dmtc2 t1, 0x0248
+ ld t1, OCTEON_CP2_HSH_DATW+88(a0)
+ dmtc2 t2, 0x0249
+ ld t2, OCTEON_CP2_HSH_DATW+96(a0)
+ dmtc2 t0, 0x024A
+ ld t0, OCTEON_CP2_HSH_DATW+104(a0)
+ dmtc2 t1, 0x024B
+ ld t1, OCTEON_CP2_HSH_DATW+112(a0)
+ dmtc2 t2, 0x024C
+ ld t2, OCTEON_CP2_HSH_IVW(a0)
+ dmtc2 t0, 0x024D
+ ld t0, OCTEON_CP2_HSH_IVW+8(a0)
+ dmtc2 t1, 0x024E
+ ld t1, OCTEON_CP2_HSH_IVW+16(a0)
+ dmtc2 t2, 0x0250
+ ld t2, OCTEON_CP2_HSH_IVW+24(a0)
+ dmtc2 t0, 0x0251
+ ld t0, OCTEON_CP2_HSH_IVW+32(a0)
+ dmtc2 t1, 0x0252
+ ld t1, OCTEON_CP2_HSH_IVW+40(a0)
+ dmtc2 t2, 0x0253
+ ld t2, OCTEON_CP2_HSH_IVW+48(a0)
+ dmtc2 t0, 0x0254
+ ld t0, OCTEON_CP2_HSH_IVW+56(a0)
+ dmtc2 t1, 0x0255
+ ld t1, OCTEON_CP2_GFM_MULT(a0)
+ dmtc2 t2, 0x0256
+ ld t2, OCTEON_CP2_GFM_MULT+8(a0)
+ dmtc2 t0, 0x0257
+ ld t0, OCTEON_CP2_GFM_POLY(a0)
+ dmtc2 t1, 0x0258
+ ld t1, OCTEON_CP2_GFM_RESULT(a0)
+ dmtc2 t2, 0x0259
+ ld t2, OCTEON_CP2_GFM_RESULT+8(a0)
+ dmtc2 t0, 0x025E
+ dmtc2 t1, 0x025A
+ dmtc2 t2, 0x025B
+
+done_restore:
+ jr ra
+ nop
+ END(octeon_cop2_restore)
+ .set pop
+
+/*
+ * void octeon_mult_save()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in SAVE_SOME in stackframe.h. It can only
+ * safely modify k0 and k1.
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_mult_save)
+ dmfc0 k0, $9,7 /* CvmCtl register. */
+ bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */
+ nop
+
+ /* Save the multiplier state */
+ v3mulu k0, $0, $0
+ v3mulu k1, $0, $0
+ sd k0, PT_MTP(sp) /* PT_MTP has P0 */
+ v3mulu k0, $0, $0
+ sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */
+ ori k1, $0, 1
+ v3mulu k1, k1, $0
+ sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */
+ v3mulu k0, $0, $0
+ sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */
+ v3mulu k1, $0, $0
+ sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */
+ jr ra
+ sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */
+
+1: /* Resume here if CvmCtl[NOMUL] */
+ jr ra
+ END(octeon_mult_save)
+ .set pop
+
+/*
+ * void octeon_mult_restore()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in RESTORE_SOME in stackframe.h.
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_mult_restore)
+ dmfc0 k1, $9,7 /* CvmCtl register. */
+ ld v0, PT_MPL(sp) /* MPL0 */
+ ld v1, PT_MPL+8(sp) /* MPL1 */
+ ld k0, PT_MPL+16(sp) /* MPL2 */
+ bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */
+ /* Normally falls through, so no time wasted here */
+ nop
+
+ /* Restore the multiplier state */
+ ld k1, PT_MTP+16(sp) /* P2 */
+ MTM0 v0 /* MPL0 */
+ ld v0, PT_MTP+8(sp) /* P1 */
+ MTM1 v1 /* MPL1 */
+ ld v1, PT_MTP(sp) /* P0 */
+ MTM2 k0 /* MPL2 */
+ MTP2 k1 /* P2 */
+ MTP1 v0 /* P1 */
+ jr ra
+ MTP0 v1 /* P0 */
+
+1: /* Resume here if CvmCtl[NOMUL] */
+ jr ra
+ nop
+ END(octeon_mult_restore)
+ .set pop
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
new file mode 100644
index 00000000000..c1cf9c6c3f7
--- /dev/null
+++ b/arch/mips/kernel/perf_event.c
@@ -0,0 +1,69 @@
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/perf_event.h>
+
+#include <asm/stacktrace.h>
+
+/* Callchain handling code. */
+
+/*
+ * Leave userspace callchain empty for now. When we find a way to trace
+ * the user stack callchains, we will add it here.
+ */
+
+static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
+ unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ perf_callchain_store(entry, addr);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ }
+ }
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(current);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_perf_callchain(entry, sp);
+ return;
+ }
+ do {
+ perf_callchain_store(entry, pc);
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ break;
+ pc = unwind_stack(current, &sp, pc, &ra);
+ } while (pc);
+#else
+ save_raw_perf_callchain(entry, sp);
+#endif
+}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
new file mode 100644
index 00000000000..4f2d9dece7a
--- /dev/null
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -0,0 +1,1710 @@
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Copyright (C) 2011 Cavium Networks, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/stacktrace.h>
+#include <asm/time.h> /* For perf_irq */
+
+#define MIPS_MAX_HWEVENTS 4
+#define MIPS_TCS_PER_COUNTER 2
+#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
+
+struct cpu_hw_events {
+ /* Array of events on this cpu. */
+ struct perf_event *events[MIPS_MAX_HWEVENTS];
+
+ /*
+ * Set the bit (indexed by the counter number) when the counter
+ * is used for an event.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
+
+ /*
+ * Software copy of the control register for each performance counter.
+ * MIPS CPUs vary in performance counters. They use this differently,
+ * and even may not use it.
+ */
+ unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
+};
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+ .saved_ctrl = {0},
+};
+
+/* The description of MIPS performance events. */
+struct mips_perf_event {
+ unsigned int event_id;
+ /*
+ * MIPS performance counters are indexed starting from 0.
+ * CNTR_EVEN indicates the indexes of the counters to be used are
+ * even numbers.
+ */
+ unsigned int cntr_mask;
+ #define CNTR_EVEN 0x55555555
+ #define CNTR_ODD 0xaaaaaaaa
+ #define CNTR_ALL 0xffffffff
+#ifdef CONFIG_MIPS_MT_SMP
+ enum {
+ T = 0,
+ V = 1,
+ P = 2,
+ } range;
+#else
+ #define T
+ #define V
+ #define P
+#endif
+};
+
+static struct mips_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+struct mips_pmu {
+ u64 max_period;
+ u64 valid_count;
+ u64 overflow;
+ const char *name;
+ int irq;
+ u64 (*read_counter)(unsigned int idx);
+ void (*write_counter)(unsigned int idx, u64 val);
+ const struct mips_perf_event *(*map_raw_event)(u64 config);
+ const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
+ const struct mips_perf_event (*cache_event_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+ unsigned int num_counters;
+};
+
+static struct mips_pmu mipspmu;
+
+#define M_CONFIG1_PC (1 << 4)
+
+#define M_PERFCTL_EXL (1 << 0)
+#define M_PERFCTL_KERNEL (1 << 1)
+#define M_PERFCTL_SUPERVISOR (1 << 2)
+#define M_PERFCTL_USER (1 << 3)
+#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
+#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
+
+#ifdef CONFIG_CPU_BMIPS5000
+#define M_PERFCTL_MT_EN(filter) 0
+#else /* !CONFIG_CPU_BMIPS5000 */
+#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
+#endif /* CONFIG_CPU_BMIPS5000 */
+
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
+#define M_PERFCTL_WIDE (1 << 30)
+#define M_PERFCTL_MORE (1 << 31)
+#define M_PERFCTL_TC (1 << 30)
+
+#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
+ M_PERFCTL_KERNEL | \
+ M_PERFCTL_USER | \
+ M_PERFCTL_SUPERVISOR | \
+ M_PERFCTL_INTERRUPT_ENABLE)
+
+#ifdef CONFIG_MIPS_MT_SMP
+#define M_PERFCTL_CONFIG_MASK 0x3fff801f
+#else
+#define M_PERFCTL_CONFIG_MASK 0x1f
+#endif
+#define M_PERFCTL_EVENT_MASK 0xfe0
+
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+static int cpu_has_mipsmt_pertccounters;
+
+static DEFINE_RWLOCK(pmuint_rwlock);
+
+#if defined(CONFIG_CPU_BMIPS5000)
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
+#else
+/*
+ * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
+ * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
+ */
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : smp_processor_id())
+#endif
+
+/* Copied from op_model_mipsxx.c */
+static unsigned int vpe_shift(void)
+{
+ if (num_possible_cpus() > 1)
+ return 1;
+
+ return 0;
+}
+
+static unsigned int counters_total_to_per_cpu(unsigned int counters)
+{
+ return counters >> vpe_shift();
+}
+
+#else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
+#define vpe_id() 0
+
+#endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
+
+static void resume_local_counters(void);
+static void pause_local_counters(void);
+static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
+static int mipsxx_pmu_handle_shared_irq(void);
+
+static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
+{
+ if (vpe_id() == 1)
+ idx = (idx + 2) & 3;
+ return idx;
+}
+
+static u64 mipsxx_pmu_read_counter(unsigned int idx)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ /*
+ * The counters are unsigned, we must cast to truncate
+ * off the high bits.
+ */
+ return (u32)read_c0_perfcntr0();
+ case 1:
+ return (u32)read_c0_perfcntr1();
+ case 2:
+ return (u32)read_c0_perfcntr2();
+ case 3:
+ return (u32)read_c0_perfcntr3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ return read_c0_perfcntr0_64();
+ case 1:
+ return read_c0_perfcntr1_64();
+ case 2:
+ return read_c0_perfcntr2_64();
+ case 3:
+ return read_c0_perfcntr3_64();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfcntr0(val);
+ return;
+ case 1:
+ write_c0_perfcntr1(val);
+ return;
+ case 2:
+ write_c0_perfcntr2(val);
+ return;
+ case 3:
+ write_c0_perfcntr3(val);
+ return;
+ }
+}
+
+static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfcntr0_64(val);
+ return;
+ case 1:
+ write_c0_perfcntr1_64(val);
+ return;
+ case 2:
+ write_c0_perfcntr2_64(val);
+ return;
+ case 3:
+ write_c0_perfcntr3_64(val);
+ return;
+ }
+}
+
+static unsigned int mipsxx_pmu_read_control(unsigned int idx)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ return read_c0_perfctrl0();
+ case 1:
+ return read_c0_perfctrl1();
+ case 2:
+ return read_c0_perfctrl2();
+ case 3:
+ return read_c0_perfctrl3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfctrl0(val);
+ return;
+ case 1:
+ write_c0_perfctrl1(val);
+ return;
+ case 2:
+ write_c0_perfctrl2(val);
+ return;
+ case 3:
+ write_c0_perfctrl3(val);
+ return;
+ }
+}
+
+static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc)
+{
+ int i;
+
+ /*
+ * We only need to care the counter mask. The range has been
+ * checked definitely.
+ */
+ unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
+
+ for (i = mipspmu.num_counters - 1; i >= 0; i--) {
+ /*
+ * Note that some MIPS perf events can be counted by both
+ * even and odd counters, wheresas many other are only by
+ * even _or_ odd counters. This introduces an issue that
+ * when the former kind of event takes the counter the
+ * latter kind of event wants to use, then the "counter
+ * allocation" for the latter event will fail. In fact if
+ * they can be dynamically swapped, they both feel happy.
+ * But here we leave this issue alone for now.
+ */
+ if (test_bit(i, &cntr_mask) &&
+ !test_and_set_bit(i, cpuc->used_mask))
+ return i;
+ }
+
+ return -EAGAIN;
+}
+
+static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) |
+ /* Make sure interrupt enabled. */
+ M_PERFCTL_INTERRUPT_ENABLE;
+ if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
+ /* enable the counter for the calling thread */
+ cpuc->saved_ctrl[idx] |=
+ (1 << (12 + vpe_id())) | M_PERFCTL_TC;
+
+ /*
+ * We do not actually let the counter run. Leave it until start().
+ */
+}
+
+static void mipsxx_pmu_disable_event(int idx)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ unsigned long flags;
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER;
+ mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
+ local_irq_restore(flags);
+}
+
+static int mipspmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ u64 left = local64_read(&hwc->period_left);
+ u64 period = hwc->sample_period;
+ int ret = 0;
+
+ if (unlikely((left + period) & (1ULL << 63))) {
+ /* left underflowed by more than period. */
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ } else if (unlikely((left + period) <= period)) {
+ /* left underflowed by less than period. */
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > mipspmu.max_period) {
+ left = mipspmu.max_period;
+ local64_set(&hwc->period_left, left);
+ }
+
+ local64_set(&hwc->prev_count, mipspmu.overflow - left);
+
+ mipspmu.write_counter(idx, mipspmu.overflow - left);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static void mipspmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ u64 prev_raw_count, new_raw_count;
+ u64 delta;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = mipspmu.read_counter(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = new_raw_count - prev_raw_count;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static void mipspmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ /* Set the period for the event. */
+ mipspmu_event_set_period(event, hwc, hwc->idx);
+
+ /* Enable the event. */
+ mipsxx_pmu_enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ /* We are working on a local event. */
+ mipsxx_pmu_disable_event(hwc->idx);
+ barrier();
+ mipspmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static int mipspmu_add(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ perf_pmu_disable(event->pmu);
+
+ /* To look for a free counter for this event. */
+ idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
+ mipsxx_pmu_disable_event(idx);
+ cpuc->events[idx] = event;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ mipspmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ perf_pmu_enable(event->pmu);
+ return err;
+}
+
+static void mipspmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ mipspmu_stop(event, PERF_EF_UPDATE);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void mipspmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ mipspmu_event_update(event, hwc, hwc->idx);
+}
+
+static void mipspmu_enable(struct pmu *pmu)
+{
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ write_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+}
+
+/*
+ * MIPS performance counters can be per-TC. The control registers can
+ * not be directly accessed accross CPUs. Hence if we want to do global
+ * control, we need cross CPU calls. on_each_cpu() can help us, but we
+ * can not make sure this function is called with interrupts enabled. So
+ * here we pause local counters and then grab a rwlock and leave the
+ * counters on other CPUs alone. If any counter interrupt raises while
+ * we own the write lock, simply pause local counters on that CPU and
+ * spin in the handler. Also we know we won't be switched to another
+ * CPU after pausing local counters and before grabbing the lock.
+ */
+static void mipspmu_disable(struct pmu *pmu)
+{
+ pause_local_counters();
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ write_lock(&pmuint_rwlock);
+#endif
+}
+
+static atomic_t active_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pmu_reserve_mutex);
+static int (*save_perf_irq)(void);
+
+static int mipspmu_get_irq(void)
+{
+ int err;
+
+ if (mipspmu.irq >= 0) {
+ /* Request my own irq handler. */
+ err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
+ IRQF_PERCPU | IRQF_NOBALANCING,
+ "mips_perf_pmu", NULL);
+ if (err) {
+ pr_warning("Unable to request IRQ%d for MIPS "
+ "performance counters!\n", mipspmu.irq);
+ }
+ } else if (cp0_perfcount_irq < 0) {
+ /*
+ * We are sharing the irq number with the timer interrupt.
+ */
+ save_perf_irq = perf_irq;
+ perf_irq = mipsxx_pmu_handle_shared_irq;
+ err = 0;
+ } else {
+ pr_warning("The platform hasn't properly defined its "
+ "interrupt controller.\n");
+ err = -ENOENT;
+ }
+
+ return err;
+}
+
+static void mipspmu_free_irq(void)
+{
+ if (mipspmu.irq >= 0)
+ free_irq(mipspmu.irq, NULL);
+ else if (cp0_perfcount_irq < 0)
+ perf_irq = save_perf_irq;
+}
+
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events,
+ &pmu_reserve_mutex)) {
+ /*
+ * We must not call the destroy function with interrupts
+ * disabled.
+ */
+ on_each_cpu(reset_counters,
+ (void *)(long)mipspmu.num_counters, 1);
+ mipspmu_free_irq();
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+static int mipspmu_event_init(struct perf_event *event)
+{
+ int err = 0;
+
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ if (event->cpu >= nr_cpumask_bits ||
+ (event->cpu >= 0 && !cpu_online(event->cpu)))
+ return -ENODEV;
+
+ if (!atomic_inc_not_zero(&active_events)) {
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0)
+ err = mipspmu_get_irq();
+
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ if (err)
+ return err;
+
+ return __hw_perf_event_init(event);
+}
+
+static struct pmu pmu = {
+ .pmu_enable = mipspmu_enable,
+ .pmu_disable = mipspmu_disable,
+ .event_init = mipspmu_event_init,
+ .add = mipspmu_add,
+ .del = mipspmu_del,
+ .start = mipspmu_start,
+ .stop = mipspmu_stop,
+ .read = mipspmu_read,
+};
+
+static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
+{
+/*
+ * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
+ * event_id.
+ */
+#ifdef CONFIG_MIPS_MT_SMP
+ return ((unsigned int)pev->range << 24) |
+ (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+#else
+ return (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+#endif
+}
+
+static const struct mips_perf_event *mipspmu_map_general_event(int idx)
+{
+
+ if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
+ return ERR_PTR(-EOPNOTSUPP);
+ return &(*mipspmu.general_event_map)[idx];
+}
+
+static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ const struct mips_perf_event *pev;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ pev = &((*mipspmu.cache_event_map)
+ [cache_type]
+ [cache_op]
+ [cache_result]);
+
+ if (pev->cntr_mask == 0)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return pev;
+
+}
+
+static int validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct cpu_hw_events fake_cpuc;
+
+ memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
+ return -EINVAL;
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
+ return -EINVAL;
+ }
+
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* This is needed by specific irq handlers in perf_event_*.c */
+static void handle_associated_event(struct cpu_hw_events *cpuc,
+ int idx, struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc = &event->hw;
+
+ mipspmu_event_update(event, hwc, idx);
+ data->period = event->hw.last_period;
+ if (!mipspmu_event_set_period(event, hwc, idx))
+ return;
+
+ if (perf_event_overflow(event, data, regs))
+ mipsxx_pmu_disable_event(idx);
+}
+
+
+static int __n_counters(void)
+{
+ if (!(read_c0_config1() & M_CONFIG1_PC))
+ return 0;
+ if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
+ return 1;
+ if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
+ return 2;
+ if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
+ return 3;
+
+ return 4;
+}
+
+static int n_counters(void)
+{
+ int counters;
+
+ switch (current_cpu_type()) {
+ case CPU_R10000:
+ counters = 2;
+ break;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ counters = 4;
+ break;
+
+ default:
+ counters = __n_counters();
+ }
+
+ return counters;
+}
+
+static void reset_counters(void *arg)
+{
+ int counters = (int)(long)arg;
+ switch (counters) {
+ case 4:
+ mipsxx_pmu_write_control(3, 0);
+ mipspmu.write_counter(3, 0);
+ case 3:
+ mipsxx_pmu_write_control(2, 0);
+ mipspmu.write_counter(2, 0);
+ case 2:
+ mipsxx_pmu_write_control(1, 0);
+ mipspmu.write_counter(1, 0);
+ case 1:
+ mipsxx_pmu_write_control(0, 0);
+ mipspmu.write_counter(0, 0);
+ }
+}
+
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
+static const struct mips_perf_event mipsxxcore_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+};
+
+/* 74K/proAptiv core has different branch event code. */
+static const struct mips_perf_event mipsxxcore_event_map2
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
+};
+
+static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
+ [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
+};
+
+static const struct mips_perf_event bmips5000_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+};
+
+static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
+};
+
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+},
+};
+
+/* 74K/proAptiv core has completely different cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map2
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
+ },
+},
+/*
+ * 74K core does not have specific DTLB events. proAptiv core has
+ * "speculative" DTLB events which are numbered 0x63 (even/odd) and
+ * not included here. One can use raw events if really needed.
+ */
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+},
+};
+
+/* BMIPS5000 */
+static const struct mips_perf_event bmips5000_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
+ [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
+ [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+},
+};
+
+
+static const struct mips_perf_event octeon_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
+ },
+},
+[C(DTLB)] = {
+ /*
+ * Only general DTLB misses are counted use the same event for
+ * read and write.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
+ },
+},
+};
+
+static const struct mips_perf_event xlp_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
+ [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
+ [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
+ [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
+ [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
+ [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
+ },
+},
+[C(DTLB)] = {
+ /*
+ * Only general DTLB misses are counted use the same event for
+ * read and write.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
+ },
+},
+[C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
+ },
+},
+};
+
+#ifdef CONFIG_MIPS_MT_SMP
+static void check_and_calc_range(struct perf_event *event,
+ const struct mips_perf_event *pev)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->cpu >= 0) {
+ if (pev->range > V) {
+ /*
+ * The user selected an event that is processor
+ * wide, while expecting it to be VPE wide.
+ */
+ hwc->config_base |= M_TC_EN_ALL;
+ } else {
+ /*
+ * FIXME: cpu_data[event->cpu].vpe_id reports 0
+ * for both CPUs.
+ */
+ hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
+ hwc->config_base |= M_TC_EN_VPE;
+ }
+ } else
+ hwc->config_base |= M_TC_EN_ALL;
+}
+#else
+static void check_and_calc_range(struct perf_event *event,
+ const struct mips_perf_event *pev)
+{
+}
+#endif
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ const struct mips_perf_event *pev;
+ int err;
+
+ /* Returning MIPS event descriptor for generic perf event. */
+ if (PERF_TYPE_HARDWARE == event->attr.type) {
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ pev = mipspmu_map_general_event(event->attr.config);
+ } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+ pev = mipspmu_map_cache_event(event->attr.config);
+ } else if (PERF_TYPE_RAW == event->attr.type) {
+ /* We are working on the global raw event. */
+ mutex_lock(&raw_event_mutex);
+ pev = mipspmu.map_raw_event(event->attr.config);
+ } else {
+ /* The event type is not (yet) supported. */
+ return -EOPNOTSUPP;
+ }
+
+ if (IS_ERR(pev)) {
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+ return PTR_ERR(pev);
+ }
+
+ /*
+ * We allow max flexibility on how each individual counter shared
+ * by the single CPU operates (the mode exclusion and the range).
+ */
+ hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
+
+ /* Calculate range bits and validate it. */
+ if (num_possible_cpus() > 1)
+ check_and_calc_range(event, pev);
+
+ hwc->event_base = mipspmu_perf_event_encode(pev);
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+
+ if (!attr->exclude_user)
+ hwc->config_base |= M_PERFCTL_USER;
+ if (!attr->exclude_kernel) {
+ hwc->config_base |= M_PERFCTL_KERNEL;
+ /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
+ hwc->config_base |= M_PERFCTL_EXL;
+ }
+ if (!attr->exclude_hv)
+ hwc->config_base |= M_PERFCTL_SUPERVISOR;
+
+ hwc->config_base &= M_PERFCTL_CONFIG_MASK;
+ /*
+ * The event can belong to another cpu. We do not assign a local
+ * counter for it for now.
+ */
+ hwc->idx = -1;
+ hwc->config = 0;
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = mipspmu.max_period;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event)
+ err = validate_group(event);
+
+ event->destroy = hw_perf_event_destroy;
+
+ if (err)
+ event->destroy(event);
+
+ return err;
+}
+
+static void pause_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int ctr = mipspmu.num_counters;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ do {
+ ctr--;
+ cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
+ mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ } while (ctr > 0);
+ local_irq_restore(flags);
+}
+
+static void resume_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int ctr = mipspmu.num_counters;
+
+ do {
+ ctr--;
+ mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
+ } while (ctr > 0);
+}
+
+static int mipsxx_pmu_handle_shared_irq(void)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct perf_sample_data data;
+ unsigned int counters = mipspmu.num_counters;
+ u64 counter;
+ int handled = IRQ_NONE;
+ struct pt_regs *regs;
+
+ if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
+ return handled;
+ /*
+ * First we pause the local counters, so that when we are locked
+ * here, the counters are all paused. When it gets locked due to
+ * perf_disable(), the timer interrupt handler will be delayed.
+ *
+ * See also mipsxx_pmu_start().
+ */
+ pause_local_counters();
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ read_lock(&pmuint_rwlock);
+#endif
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0, 0);
+
+ switch (counters) {
+#define HANDLE_COUNTER(n) \
+ case n + 1: \
+ if (test_bit(n, cpuc->used_mask)) { \
+ counter = mipspmu.read_counter(n); \
+ if (counter & mipspmu.overflow) { \
+ handle_associated_event(cpuc, n, &data, regs); \
+ handled = IRQ_HANDLED; \
+ } \
+ }
+ HANDLE_COUNTER(3)
+ HANDLE_COUNTER(2)
+ HANDLE_COUNTER(1)
+ HANDLE_COUNTER(0)
+ }
+
+ /*
+ * Do all the work for the pending perf events. We can do this
+ * in here because the performance counter interrupt is a regular
+ * interrupt, not NMI.
+ */
+ if (handled == IRQ_HANDLED)
+ irq_work_run();
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ read_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+ return handled;
+}
+
+static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
+{
+ return mipsxx_pmu_handle_shared_irq();
+}
+
+/* 24K */
+#define IS_BOTH_COUNTERS_24K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+
+/* 34K */
+#define IS_BOTH_COUNTERS_34K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_34K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
+ (r) == 176 || ((b) >= 50 && (b) <= 55) || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
+#endif
+
+/* 74K */
+#define IS_BOTH_COUNTERS_74K_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* proAptiv */
+#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* 1004K */
+#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_1004K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
+ (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
+ (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
+#endif
+
+/* interAptiv */
+#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
+#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
+ (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
+ (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
+#endif
+
+/* BMIPS5000 */
+#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+
+/*
+ * User can use 0-255 raw events, where 0-127 for the events of even
+ * counters, and 128-255 for odd counters. Note that bit 7 is used to
+ * indicate the parity. So, for example, when user wants to take the
+ * Event Num of 15 for odd counters (by referring to the user manual),
+ * then 128 needs to be added to 15 as the input for the event config,
+ * i.e., 143 (0x8F) to be used.
+ */
+static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+ unsigned int base_id = raw_id & 0x7f;
+
+ raw_event.event_id = base_id;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ /*
+ * This is actually doing nothing. Non-multithreading
+ * CPUs will not check and calculate the range.
+ */
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_34K:
+ if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_74K:
+ case CPU_1074K:
+ if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_PROAPTIV:
+ if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_1004K:
+ if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_INTERAPTIV:
+ if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_BMIPS5000:
+ if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+ }
+
+ return &raw_event;
+}
+
+static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+ unsigned int base_id = raw_id & 0x7f;
+
+
+ raw_event.cntr_mask = CNTR_ALL;
+ raw_event.event_id = base_id;
+
+ if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
+ if (base_id > 0x42)
+ return ERR_PTR(-EOPNOTSUPP);
+ } else {
+ if (base_id > 0x3a)
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ switch (base_id) {
+ case 0x00:
+ case 0x0f:
+ case 0x1e:
+ case 0x1f:
+ case 0x2f:
+ case 0x34:
+ case 0x3b ... 0x3f:
+ return ERR_PTR(-EOPNOTSUPP);
+ default:
+ break;
+ }
+
+ return &raw_event;
+}
+
+static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
+{
+ unsigned int raw_id = config & 0xff;
+
+ /* Only 1-63 are defined */
+ if ((raw_id < 0x01) || (raw_id > 0x3f))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ raw_event.cntr_mask = CNTR_ALL;
+ raw_event.event_id = raw_id;
+
+ return &raw_event;
+}
+
+static int __init
+init_hw_perf_events(void)
+{
+ int counters, irq;
+ int counter_bits;
+
+ pr_info("Performance counters: ");
+
+ counters = n_counters();
+ if (counters == 0) {
+ pr_cont("No available PMU.\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
+ if (!cpu_has_mipsmt_pertccounters)
+ counters = counters_total_to_per_cpu(counters);
+#endif
+
+#ifdef MSC01E_INT_BASE
+ if (cpu_has_veic) {
+ /*
+ * Using platform specific interrupt controller defines.
+ */
+ irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
+ } else {
+#endif
+ if ((cp0_perfcount_irq >= 0) &&
+ (cp0_compare_irq != cp0_perfcount_irq))
+ irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ irq = -1;
+#ifdef MSC01E_INT_BASE
+ }
+#endif
+
+ mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ mipspmu.name = "mips/24K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_34K:
+ mipspmu.name = "mips/34K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_74K:
+ mipspmu.name = "mips/74K";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_PROAPTIV:
+ mipspmu.name = "mips/proAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_1004K:
+ mipspmu.name = "mips/1004K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_1074K:
+ mipspmu.name = "mips/1074K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_INTERAPTIV:
+ mipspmu.name = "mips/interAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_LOONGSON1:
+ mipspmu.name = "mips/loongson1";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ mipspmu.name = "octeon";
+ mipspmu.general_event_map = &octeon_event_map;
+ mipspmu.cache_event_map = &octeon_cache_map;
+ mipspmu.map_raw_event = octeon_pmu_map_raw_event;
+ break;
+ case CPU_BMIPS5000:
+ mipspmu.name = "BMIPS5000";
+ mipspmu.general_event_map = &bmips5000_event_map;
+ mipspmu.cache_event_map = &bmips5000_cache_map;
+ break;
+ case CPU_XLP:
+ mipspmu.name = "xlp";
+ mipspmu.general_event_map = &xlp_event_map;
+ mipspmu.cache_event_map = &xlp_cache_map;
+ mipspmu.map_raw_event = xlp_pmu_map_raw_event;
+ break;
+ default:
+ pr_cont("Either hardware does not support performance "
+ "counters, or not yet implemented.\n");
+ return -ENODEV;
+ }
+
+ mipspmu.num_counters = counters;
+ mipspmu.irq = irq;
+
+ if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
+ mipspmu.max_period = (1ULL << 63) - 1;
+ mipspmu.valid_count = (1ULL << 63) - 1;
+ mipspmu.overflow = 1ULL << 63;
+ mipspmu.read_counter = mipsxx_pmu_read_counter_64;
+ mipspmu.write_counter = mipsxx_pmu_write_counter_64;
+ counter_bits = 64;
+ } else {
+ mipspmu.max_period = (1ULL << 31) - 1;
+ mipspmu.valid_count = (1ULL << 31) - 1;
+ mipspmu.overflow = 1ULL << 31;
+ mipspmu.read_counter = mipsxx_pmu_read_counter;
+ mipspmu.write_counter = mipsxx_pmu_write_counter;
+ counter_bits = 32;
+ }
+
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
+ pr_cont("%s PMU enabled, %d %d-bit counters available to each "
+ "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
+ irq < 0 ? " (share with timer interrupt)" : "");
+
+ perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
+ return 0;
+}
+early_initcall(init_hw_perf_events);
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
new file mode 100644
index 00000000000..c4c2069d3a2
--- /dev/null
+++ b/arch/mips/kernel/pm-cps.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/cacheops.h>
+#include <asm/idle.h>
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+#include <asm/mipsmtregs.h>
+#include <asm/pm.h>
+#include <asm/pm-cps.h>
+#include <asm/smp-cps.h>
+#include <asm/uasm.h>
+
+/*
+ * cps_nc_entry_fn - type of a generated non-coherent state entry function
+ * @online: the count of online coupled VPEs
+ * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
+ *
+ * The code entering & exiting non-coherent states is generated at runtime
+ * using uasm, in order to ensure that the compiler cannot insert a stray
+ * memory access at an unfortunate time and to allow the generation of optimal
+ * core-specific code particularly for cache routines. If coupled_coherence
+ * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
+ * returns the number of VPEs that were in the wait state at the point this
+ * VPE left it. Returns garbage if coupled_coherence is zero or this is not
+ * the entry function for CPS_PM_NC_WAIT.
+ */
+typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
+
+/*
+ * The entry point of the generated non-coherent idle state entry/exit
+ * functions. Actually per-core rather than per-CPU.
+ */
+static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
+ nc_asm_enter);
+
+/* Bitmap indicating which states are supported by the system */
+DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
+
+/*
+ * Indicates the number of coupled VPEs ready to operate in a non-coherent
+ * state. Actually per-core rather than per-CPU.
+ */
+static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
+static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
+
+/* Indicates online CPUs coupled with the current CPU */
+static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
+
+/*
+ * Used to synchronize entry to deep idle states. Actually per-core rather
+ * than per-CPU.
+ */
+static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
+
+/* Saved CPU state across the CPS_PM_POWER_GATED state */
+DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
+
+/* A somewhat arbitrary number of labels & relocs for uasm */
+static struct uasm_label labels[32] __initdata;
+static struct uasm_reloc relocs[32] __initdata;
+
+/* CPU dependant sync types */
+static unsigned stype_intervention;
+static unsigned stype_memory;
+static unsigned stype_ordering;
+
+enum mips_reg {
+ zero, at, v0, v1, a0, a1, a2, a3,
+ t0, t1, t2, t3, t4, t5, t6, t7,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9, k0, k1, gp, sp, fp, ra,
+};
+
+bool cps_pm_support_state(enum cps_pm_state state)
+{
+ return test_bit(state, state_support);
+}
+
+static void coupled_barrier(atomic_t *a, unsigned online)
+{
+ /*
+ * This function is effectively the same as
+ * cpuidle_coupled_parallel_barrier, which can't be used here since
+ * there's no cpuidle device.
+ */
+
+ if (!coupled_coherence)
+ return;
+
+ smp_mb__before_atomic();
+ atomic_inc(a);
+
+ while (atomic_read(a) < online)
+ cpu_relax();
+
+ if (atomic_inc_return(a) == online * 2) {
+ atomic_set(a, 0);
+ return;
+ }
+
+ while (atomic_read(a) > online)
+ cpu_relax();
+}
+
+int cps_pm_enter_state(enum cps_pm_state state)
+{
+ unsigned cpu = smp_processor_id();
+ unsigned core = current_cpu_data.core;
+ unsigned online, left;
+ cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
+ u32 *core_ready_count, *nc_core_ready_count;
+ void *nc_addr;
+ cps_nc_entry_fn entry;
+ struct core_boot_config *core_cfg;
+ struct vpe_boot_config *vpe_cfg;
+
+ /* Check that there is an entry function for this state */
+ entry = per_cpu(nc_asm_enter, core)[state];
+ if (!entry)
+ return -EINVAL;
+
+ /* Calculate which coupled CPUs (VPEs) are online */
+#ifdef CONFIG_MIPS_MT
+ if (cpu_online(cpu)) {
+ cpumask_and(coupled_mask, cpu_online_mask,
+ &cpu_sibling_map[cpu]);
+ online = cpumask_weight(coupled_mask);
+ cpumask_clear_cpu(cpu, coupled_mask);
+ } else
+#endif
+ {
+ cpumask_clear(coupled_mask);
+ online = 1;
+ }
+
+ /* Setup the VPE to run mips_cps_pm_restore when started again */
+ if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ core_cfg = &mips_cps_core_bootcfg[core];
+ vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id];
+ vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
+ vpe_cfg->gp = (unsigned long)current_thread_info();
+ vpe_cfg->sp = 0;
+ }
+
+ /* Indicate that this CPU might not be coherent */
+ cpumask_clear_cpu(cpu, &cpu_coherent_mask);
+ smp_mb__after_atomic();
+
+ /* Create a non-coherent mapping of the core ready_count */
+ core_ready_count = per_cpu(ready_count, core);
+ nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
+ (unsigned long)core_ready_count);
+ nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
+ nc_core_ready_count = nc_addr;
+
+ /* Ensure ready_count is zero-initialised before the assembly runs */
+ ACCESS_ONCE(*nc_core_ready_count) = 0;
+ coupled_barrier(&per_cpu(pm_barrier, core), online);
+
+ /* Run the generated entry code */
+ left = entry(online, nc_core_ready_count);
+
+ /* Remove the non-coherent mapping of ready_count */
+ kunmap_noncoherent();
+
+ /* Indicate that this CPU is definitely coherent */
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+
+ /*
+ * If this VPE is the first to leave the non-coherent wait state then
+ * it needs to wake up any coupled VPEs still running their wait
+ * instruction so that they return to cpuidle, which can then complete
+ * coordination between the coupled VPEs & provide the governor with
+ * a chance to reflect on the length of time the VPEs were in the
+ * idle state.
+ */
+ if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
+ arch_send_call_function_ipi_mask(coupled_mask);
+
+ return 0;
+}
+
+static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ const struct cache_desc *cache,
+ unsigned op, int lbl)
+{
+ unsigned cache_size = cache->ways << cache->waybit;
+ unsigned i;
+ const unsigned unroll_lines = 32;
+
+ /* If the cache isn't present this function has it easy */
+ if (cache->flags & MIPS_CACHE_NOT_PRESENT)
+ return;
+
+ /* Load base address */
+ UASM_i_LA(pp, t0, (long)CKSEG0);
+
+ /* Calculate end address */
+ if (cache_size < 0x8000)
+ uasm_i_addiu(pp, t1, t0, cache_size);
+ else
+ UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
+
+ /* Start of cache op loop */
+ uasm_build_label(pl, *pp, lbl);
+
+ /* Generate the cache ops */
+ for (i = 0; i < unroll_lines; i++)
+ uasm_i_cache(pp, op, i * cache->linesz, t0);
+
+ /* Update the base address */
+ uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
+
+ /* Loop if we haven't reached the end address yet */
+ uasm_il_bne(pp, pr, t0, t1, lbl);
+ uasm_i_nop(pp);
+}
+
+static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ const struct cpuinfo_mips *cpu_info,
+ int lbl)
+{
+ unsigned i, fsb_size = 8;
+ unsigned num_loads = (fsb_size * 3) / 2;
+ unsigned line_stride = 2;
+ unsigned line_size = cpu_info->dcache.linesz;
+ unsigned perf_counter, perf_event;
+ unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
+
+ /*
+ * Determine whether this CPU requires an FSB flush, and if so which
+ * performance counter/event reflect stalls due to a full FSB.
+ */
+ switch (__get_cpu_type(cpu_info->cputype)) {
+ case CPU_INTERAPTIV:
+ perf_counter = 1;
+ perf_event = 51;
+ break;
+
+ case CPU_PROAPTIV:
+ /* Newer proAptiv cores don't require this workaround */
+ if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
+ return 0;
+
+ /* On older ones it's unavailable */
+ return -1;
+
+ /* CPUs which do not require the workaround */
+ case CPU_P5600:
+ return 0;
+
+ default:
+ WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
+ return -1;
+ }
+
+ /*
+ * Ensure that the fill/store buffer (FSB) is not holding the results
+ * of a prefetch, since if it is then the CPC sequencer may become
+ * stuck in the D3 (ClrBus) state whilst entering a low power state.
+ */
+
+ /* Preserve perf counter setup */
+ uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
+
+ /* Setup perf counter to count FSB full pipeline stalls */
+ uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
+ uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_ehb(pp);
+ uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
+ uasm_i_ehb(pp);
+
+ /* Base address for loads */
+ UASM_i_LA(pp, t0, (long)CKSEG0);
+
+ /* Start of clear loop */
+ uasm_build_label(pl, *pp, lbl);
+
+ /* Perform some loads to fill the FSB */
+ for (i = 0; i < num_loads; i++)
+ uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
+
+ /*
+ * Invalidate the new D-cache entries so that the cache will need
+ * refilling (via the FSB) if the loop is executed again.
+ */
+ for (i = 0; i < num_loads; i++) {
+ uasm_i_cache(pp, Hit_Invalidate_D,
+ i * line_size * line_stride, t0);
+ uasm_i_cache(pp, Hit_Writeback_Inv_SD,
+ i * line_size * line_stride, t0);
+ }
+
+ /* Completion barrier */
+ uasm_i_sync(pp, stype_memory);
+ uasm_i_ehb(pp);
+
+ /* Check whether the pipeline stalled due to the FSB being full */
+ uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
+
+ /* Loop if it didn't */
+ uasm_il_beqz(pp, pr, t1, lbl);
+ uasm_i_nop(pp);
+
+ /* Restore perf counter 1. The count may well now be wrong... */
+ uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_ehb(pp);
+ uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
+ uasm_i_ehb(pp);
+
+ return 0;
+}
+
+static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ unsigned r_addr, int lbl)
+{
+ uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
+ uasm_build_label(pl, *pp, lbl);
+ uasm_i_ll(pp, t1, 0, r_addr);
+ uasm_i_or(pp, t1, t1, t0);
+ uasm_i_sc(pp, t1, 0, r_addr);
+ uasm_il_beqz(pp, pr, t1, lbl);
+ uasm_i_nop(pp);
+}
+
+static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
+{
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ u32 *buf, *p;
+ const unsigned r_online = a0;
+ const unsigned r_nc_count = a1;
+ const unsigned r_pcohctl = t7;
+ const unsigned max_instrs = 256;
+ unsigned cpc_cmd;
+ int err;
+ enum {
+ lbl_incready = 1,
+ lbl_poll_cont,
+ lbl_secondary_hang,
+ lbl_disable_coherence,
+ lbl_flush_fsb,
+ lbl_invicache,
+ lbl_flushdcache,
+ lbl_hang,
+ lbl_set_cont,
+ lbl_secondary_cont,
+ lbl_decready,
+ };
+
+ /* Allocate a buffer to hold the generated code */
+ p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ /* Clear labels & relocs ready for (re)use */
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ /*
+ * Save CPU state. Note the non-standard calling convention
+ * with the return address placed in v0 to avoid clobbering
+ * the ra register before it is saved.
+ */
+ UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
+ uasm_i_jalr(&p, v0, t0);
+ uasm_i_nop(&p);
+ }
+
+ /*
+ * Load addresses of required CM & CPC registers. This is done early
+ * because they're needed in both the enable & disable coherence steps
+ * but in the coupled case the enable step will only run on one VPE.
+ */
+ UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
+
+ if (coupled_coherence) {
+ /* Increment ready_count */
+ uasm_i_sync(&p, stype_ordering);
+ uasm_build_label(&l, p, lbl_incready);
+ uasm_i_ll(&p, t1, 0, r_nc_count);
+ uasm_i_addiu(&p, t2, t1, 1);
+ uasm_i_sc(&p, t2, 0, r_nc_count);
+ uasm_il_beqz(&p, &r, t2, lbl_incready);
+ uasm_i_addiu(&p, t1, t1, 1);
+
+ /* Ordering barrier */
+ uasm_i_sync(&p, stype_ordering);
+
+ /*
+ * If this is the last VPE to become ready for non-coherence
+ * then it should branch below.
+ */
+ uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
+ uasm_i_nop(&p);
+
+ if (state < CPS_PM_POWER_GATED) {
+ /*
+ * Otherwise this is not the last VPE to become ready
+ * for non-coherence. It needs to wait until coherence
+ * has been disabled before proceeding, which it will do
+ * by polling for the top bit of ready_count being set.
+ */
+ uasm_i_addiu(&p, t1, zero, -1);
+ uasm_build_label(&l, p, lbl_poll_cont);
+ uasm_i_lw(&p, t0, 0, r_nc_count);
+ uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
+ uasm_i_ehb(&p);
+ uasm_i_yield(&p, zero, t1);
+ uasm_il_b(&p, &r, lbl_poll_cont);
+ uasm_i_nop(&p);
+ } else {
+ /*
+ * The core will lose power & this VPE will not continue
+ * so it can simply halt here.
+ */
+ uasm_i_addiu(&p, t0, zero, TCHALT_H);
+ uasm_i_mtc0(&p, t0, 2, 4);
+ uasm_build_label(&l, p, lbl_secondary_hang);
+ uasm_il_b(&p, &r, lbl_secondary_hang);
+ uasm_i_nop(&p);
+ }
+ }
+
+ /*
+ * This is the point of no return - this VPE will now proceed to
+ * disable coherence. At this point we *must* be sure that no other
+ * VPE within the core will interfere with the L1 dcache.
+ */
+ uasm_build_label(&l, p, lbl_disable_coherence);
+
+ /* Invalidate the L1 icache */
+ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
+ Index_Invalidate_I, lbl_invicache);
+
+ /* Writeback & invalidate the L1 dcache */
+ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
+ Index_Writeback_Inv_D, lbl_flushdcache);
+
+ /* Completion barrier */
+ uasm_i_sync(&p, stype_memory);
+ uasm_i_ehb(&p);
+
+ /*
+ * Disable all but self interventions. The load from COHCTL is defined
+ * by the interAptiv & proAptiv SUMs as ensuring that the operation
+ * resulting from the preceeding store is complete.
+ */
+ uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
+ uasm_i_sw(&p, t0, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ /* Sync to ensure previous interventions are complete */
+ uasm_i_sync(&p, stype_intervention);
+ uasm_i_ehb(&p);
+
+ /* Disable coherence */
+ uasm_i_sw(&p, zero, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ if (state >= CPS_PM_CLOCK_GATED) {
+ err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
+ lbl_flush_fsb);
+ if (err)
+ goto out_err;
+
+ /* Determine the CPC command to issue */
+ switch (state) {
+ case CPS_PM_CLOCK_GATED:
+ cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
+ break;
+ case CPS_PM_POWER_GATED:
+ cpc_cmd = CPC_Cx_CMD_PWRDOWN;
+ break;
+ default:
+ BUG();
+ goto out_err;
+ }
+
+ /* Issue the CPC command */
+ UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
+ uasm_i_addiu(&p, t1, zero, cpc_cmd);
+ uasm_i_sw(&p, t1, 0, t0);
+
+ if (state == CPS_PM_POWER_GATED) {
+ /* If anything goes wrong just hang */
+ uasm_build_label(&l, p, lbl_hang);
+ uasm_il_b(&p, &r, lbl_hang);
+ uasm_i_nop(&p);
+
+ /*
+ * There's no point generating more code, the core is
+ * powered down & if powered back up will run from the
+ * reset vector not from here.
+ */
+ goto gen_done;
+ }
+
+ /* Completion barrier */
+ uasm_i_sync(&p, stype_memory);
+ uasm_i_ehb(&p);
+ }
+
+ if (state == CPS_PM_NC_WAIT) {
+ /*
+ * At this point it is safe for all VPEs to proceed with
+ * execution. This VPE will set the top bit of ready_count
+ * to indicate to the other VPEs that they may continue.
+ */
+ if (coupled_coherence)
+ cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
+ lbl_set_cont);
+
+ /*
+ * VPEs which did not disable coherence will continue
+ * executing, after coherence has been disabled, from this
+ * point.
+ */
+ uasm_build_label(&l, p, lbl_secondary_cont);
+
+ /* Now perform our wait */
+ uasm_i_wait(&p, 0);
+ }
+
+ /*
+ * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
+ * will run this. The first will actually re-enable coherence & the
+ * rest will just be performing a rather unusual nop.
+ */
+ uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
+ uasm_i_sw(&p, t0, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ /* Completion barrier */
+ uasm_i_sync(&p, stype_memory);
+ uasm_i_ehb(&p);
+
+ if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
+ /* Decrement ready_count */
+ uasm_build_label(&l, p, lbl_decready);
+ uasm_i_sync(&p, stype_ordering);
+ uasm_i_ll(&p, t1, 0, r_nc_count);
+ uasm_i_addiu(&p, t2, t1, -1);
+ uasm_i_sc(&p, t2, 0, r_nc_count);
+ uasm_il_beqz(&p, &r, t2, lbl_decready);
+ uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
+
+ /* Ordering barrier */
+ uasm_i_sync(&p, stype_ordering);
+ }
+
+ if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
+ /*
+ * At this point it is safe for all VPEs to proceed with
+ * execution. This VPE will set the top bit of ready_count
+ * to indicate to the other VPEs that they may continue.
+ */
+ cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
+
+ /*
+ * This core will be reliant upon another core sending a
+ * power-up command to the CPC in order to resume operation.
+ * Thus an arbitrary VPE can't trigger the core leaving the
+ * idle state and the one that disables coherence might as well
+ * be the one to re-enable it. The rest will continue from here
+ * after that has been done.
+ */
+ uasm_build_label(&l, p, lbl_secondary_cont);
+
+ /* Ordering barrier */
+ uasm_i_sync(&p, stype_ordering);
+ }
+
+ /* The core is coherent, time to return to C code */
+ uasm_i_jr(&p, ra);
+ uasm_i_nop(&p);
+
+gen_done:
+ /* Ensure the code didn't exceed the resources allocated for it */
+ BUG_ON((p - buf) > max_instrs);
+ BUG_ON((l - labels) > ARRAY_SIZE(labels));
+ BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
+
+ /* Patch branch offsets */
+ uasm_resolve_relocs(relocs, labels);
+
+ /* Flush the icache */
+ local_flush_icache_range((unsigned long)buf, (unsigned long)p);
+
+ return buf;
+out_err:
+ kfree(buf);
+ return NULL;
+}
+
+static int __init cps_gen_core_entries(unsigned cpu)
+{
+ enum cps_pm_state state;
+ unsigned core = cpu_data[cpu].core;
+ unsigned dlinesz = cpu_data[cpu].dcache.linesz;
+ void *entry_fn, *core_rc;
+
+ for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
+ if (per_cpu(nc_asm_enter, core)[state])
+ continue;
+ if (!test_bit(state, state_support))
+ continue;
+
+ entry_fn = cps_gen_entry_code(cpu, state);
+ if (!entry_fn) {
+ pr_err("Failed to generate core %u state %u entry\n",
+ core, state);
+ clear_bit(state, state_support);
+ }
+
+ per_cpu(nc_asm_enter, core)[state] = entry_fn;
+ }
+
+ if (!per_cpu(ready_count, core)) {
+ core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
+ if (!core_rc) {
+ pr_err("Failed allocate core %u ready_count\n", core);
+ return -ENOMEM;
+ }
+ per_cpu(ready_count_alloc, core) = core_rc;
+
+ /* Ensure ready_count is aligned to a cacheline boundary */
+ core_rc += dlinesz - 1;
+ core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
+ per_cpu(ready_count, core) = core_rc;
+ }
+
+ return 0;
+}
+
+static int __init cps_pm_init(void)
+{
+ unsigned cpu;
+ int err;
+
+ /* Detect appropriate sync types for the system */
+ switch (current_cpu_data.cputype) {
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_M5150:
+ case CPU_P5600:
+ stype_intervention = 0x2;
+ stype_memory = 0x3;
+ stype_ordering = 0x10;
+ break;
+
+ default:
+ pr_warn("Power management is using heavyweight sync 0\n");
+ }
+
+ /* A CM is required for all non-coherent states */
+ if (!mips_cm_present()) {
+ pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
+ goto out;
+ }
+
+ /*
+ * If interrupts were enabled whilst running a wait instruction on a
+ * non-coherent core then the VPE may end up processing interrupts
+ * whilst non-coherent. That would be bad.
+ */
+ if (cpu_wait == r4k_wait_irqoff)
+ set_bit(CPS_PM_NC_WAIT, state_support);
+ else
+ pr_warn("pm-cps: non-coherent wait unavailable\n");
+
+ /* Detect whether a CPC is present */
+ if (mips_cpc_present()) {
+ /* Detect whether clock gating is implemented */
+ if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK)
+ set_bit(CPS_PM_CLOCK_GATED, state_support);
+ else
+ pr_warn("pm-cps: CPC does not support clock gating\n");
+
+ /* Power gating is available with CPS SMP & any CPC */
+ if (mips_cps_smp_in_use())
+ set_bit(CPS_PM_POWER_GATED, state_support);
+ else
+ pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
+ } else {
+ pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
+ }
+
+ for_each_present_cpu(cpu) {
+ err = cps_gen_core_entries(cpu);
+ if (err)
+ return err;
+ }
+out:
+ return 0;
+}
+arch_initcall(cps_pm_init);
diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c
new file mode 100644
index 00000000000..fefdf39d3df
--- /dev/null
+++ b/arch/mips/kernel/pm.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * CPU PM notifiers for saving/restoring general CPU state.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/init.h>
+
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/pm.h>
+#include <asm/watch.h>
+
+/* Used by PM helper macros in asm/pm.h */
+struct mips_static_suspend_state mips_static_suspend_state;
+
+/**
+ * mips_cpu_save() - Save general CPU state.
+ * Ensures that general CPU context is saved, notably FPU and DSP.
+ */
+static int mips_cpu_save(void)
+{
+ /* Save FPU state */
+ lose_fpu(1);
+
+ /* Save DSP state */
+ save_dsp(current);
+
+ return 0;
+}
+
+/**
+ * mips_cpu_restore() - Restore general CPU state.
+ * Restores important CPU context.
+ */
+static void mips_cpu_restore(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /* Restore ASID */
+ if (current->mm)
+ write_c0_entryhi(cpu_asid(cpu, current->mm));
+
+ /* Restore DSP state */
+ restore_dsp(current);
+
+ /* Restore UserLocal */
+ if (cpu_has_userlocal)
+ write_c0_userlocal(current_thread_info()->tp_value);
+
+ /* Restore watch registers */
+ __restore_watch();
+}
+
+/**
+ * mips_pm_notifier() - Notifier for preserving general CPU context.
+ * @self: Notifier block.
+ * @cmd: CPU PM event.
+ * @v: Private data (unused).
+ *
+ * This is called when a CPU power management event occurs, and is used to
+ * ensure that important CPU context is preserved across a CPU power down.
+ */
+static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ int ret;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ ret = mips_cpu_save();
+ if (ret)
+ return NOTIFY_STOP;
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ mips_cpu_restore();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mips_pm_notifier_block = {
+ .notifier_call = mips_pm_notifier,
+};
+
+static int __init mips_pm_init(void)
+{
+ return cpu_pm_register_notifier(&mips_pm_notifier_block);
+}
+arch_initcall(mips_pm_init);
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 36f06539824..037a44d962f 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -1,9 +1,7 @@
/*
- * linux/arch/mips/kernel/proc.c
- *
* Copyright (C) 1995, 1996, 2001 Ralf Baechle
* Copyright (C) 2001, 2004 MIPS Technologies, Inc.
- * Copyright (C) 2004 Maciej W. Rozycki
+ * Copyright (C) 2004 Maciej W. Rozycki
*/
#include <linux/delay.h>
#include <linux/kernel.h>
@@ -12,62 +10,132 @@
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
+#include <asm/idle.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
+#include <asm/prom.h>
unsigned int vced_count, vcei_count;
+/*
+ * * No lock; only written during early bootup by CPU 0.
+ * */
+static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
+
+int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
+}
+
+int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
+}
+
static int show_cpuinfo(struct seq_file *m, void *v)
{
+ struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
unsigned long n = (unsigned long) v - 1;
unsigned int version = cpu_data[n].processor_id;
unsigned int fp_vers = cpu_data[n].fpu_id;
char fmt [64];
+ int i;
#ifdef CONFIG_SMP
- if (!cpu_isset(n, cpu_online_map))
+ if (!cpu_online(n))
return 0;
#endif
/*
* For the first processor also print the system type
*/
- if (n == 0)
+ if (n == 0) {
seq_printf(m, "system type\t\t: %s\n", get_system_type());
+ if (mips_get_machine_name())
+ seq_printf(m, "machine\t\t\t: %s\n",
+ mips_get_machine_name());
+ }
seq_printf(m, "processor\t\t: %ld\n", n);
sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
- cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
- seq_printf(m, fmt, __cpu_name[smp_processor_id()],
- (version >> 4) & 0x0f, version & 0x0f,
- (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
- seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n",
- cpu_data[n].udelay_val / (500000/HZ),
- (cpu_data[n].udelay_val / (5000/HZ)) % 100);
+ cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
+ seq_printf(m, fmt, __cpu_name[n],
+ (version >> 4) & 0x0f, version & 0x0f,
+ (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
+ seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
+ cpu_data[n].udelay_val / (500000/HZ),
+ (cpu_data[n].udelay_val / (5000/HZ)) % 100);
seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
seq_printf(m, "microsecond timers\t: %s\n",
- cpu_has_counter ? "yes" : "no");
+ cpu_has_counter ? "yes" : "no");
seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
seq_printf(m, "extra interrupt vector\t: %s\n",
- cpu_has_divec ? "yes" : "no");
- seq_printf(m, "hardware watchpoint\t: %s\n",
- cpu_has_watch ? "yes" : "no");
- seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n",
- cpu_has_mips16 ? " mips16" : "",
- cpu_has_mdmx ? " mdmx" : "",
- cpu_has_mips3d ? " mips3d" : "",
- cpu_has_smartmips ? " smartmips" : "",
- cpu_has_dsp ? " dsp" : "",
- cpu_has_mipsmt ? " mt" : ""
- );
+ cpu_has_divec ? "yes" : "no");
+ seq_printf(m, "hardware watchpoint\t: %s",
+ cpu_has_watch ? "yes, " : "no\n");
+ if (cpu_has_watch) {
+ seq_printf(m, "count: %d, address/irw mask: [",
+ cpu_data[n].watch_reg_count);
+ for (i = 0; i < cpu_data[n].watch_reg_count; i++)
+ seq_printf(m, "%s0x%04x", i ? ", " : "" ,
+ cpu_data[n].watch_reg_masks[i]);
+ seq_printf(m, "]\n");
+ }
+
+ seq_printf(m, "isa\t\t\t: mips1");
+ if (cpu_has_mips_2)
+ seq_printf(m, "%s", " mips2");
+ if (cpu_has_mips_3)
+ seq_printf(m, "%s", " mips3");
+ if (cpu_has_mips_4)
+ seq_printf(m, "%s", " mips4");
+ if (cpu_has_mips_5)
+ seq_printf(m, "%s", " mips5");
+ if (cpu_has_mips32r1)
+ seq_printf(m, "%s", " mips32r1");
+ if (cpu_has_mips32r2)
+ seq_printf(m, "%s", " mips32r2");
+ if (cpu_has_mips64r1)
+ seq_printf(m, "%s", " mips64r1");
+ if (cpu_has_mips64r2)
+ seq_printf(m, "%s", " mips64r2");
+ seq_printf(m, "\n");
+
+ seq_printf(m, "ASEs implemented\t:");
+ if (cpu_has_mips16) seq_printf(m, "%s", " mips16");
+ if (cpu_has_mdmx) seq_printf(m, "%s", " mdmx");
+ if (cpu_has_mips3d) seq_printf(m, "%s", " mips3d");
+ if (cpu_has_smartmips) seq_printf(m, "%s", " smartmips");
+ if (cpu_has_dsp) seq_printf(m, "%s", " dsp");
+ if (cpu_has_dsp2) seq_printf(m, "%s", " dsp2");
+ if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
+ if (cpu_has_mmips) seq_printf(m, "%s", " micromips");
+ if (cpu_has_vz) seq_printf(m, "%s", " vz");
+ if (cpu_has_msa) seq_printf(m, "%s", " msa");
+ if (cpu_has_eva) seq_printf(m, "%s", " eva");
+ seq_printf(m, "\n");
+
+ if (cpu_has_mmips) {
+ seq_printf(m, "micromips kernel\t: %s\n",
+ (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
+ }
seq_printf(m, "shadow register sets\t: %d\n",
- cpu_data[n].srsets);
+ cpu_data[n].srsets);
+ seq_printf(m, "kscratch registers\t: %d\n",
+ hweight8(cpu_data[n].kscratch_mask));
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
- cpu_has_vce ? "%u" : "not available");
+ cpu_has_vce ? "%u" : "not available");
seq_printf(m, fmt, 'D', vced_count);
seq_printf(m, fmt, 'I', vcei_count);
+
+ proc_cpuinfo_notifier_args.m = m;
+ proc_cpuinfo_notifier_args.n = n;
+
+ raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
+ &proc_cpuinfo_notifier_args);
+
seq_printf(m, "\n");
return 0;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 2c09a442e5e..0a1ec0f3bef 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -7,22 +7,21 @@
* Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/errno.h>
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
-#include <linux/slab.h>
#include <linux/mman.h>
#include <linux/personality.h>
#include <linux/sys.h>
#include <linux/user.h>
-#include <linux/a.out.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
@@ -33,8 +32,8 @@
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
+#include <asm/msa.h>
#include <asm/pgtable.h>
-#include <asm/system.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
@@ -44,33 +43,17 @@
#include <asm/inst.h>
#include <asm/stacktrace.h>
-/*
- * The idle thread. There's no useful work to be done, so just try to conserve
- * power and have a low exit latency (ie sit in a loop waiting for somebody to
- * say that they'd like to reschedule)
- */
-void __noreturn cpu_idle(void)
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
{
- /* endless idle loop with no priority at all */
- while (1) {
- tick_nohz_stop_sched_tick();
- while (!need_resched()) {
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- extern void smtc_idle_loop_hook(void);
-
- smtc_idle_loop_hook();
-#endif
- if (cpu_wait)
- (*cpu_wait)();
- }
- tick_nohz_restart_sched_tick();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
+ /* What the heck is this check doing ? */
+ if (!cpu_isset(smp_processor_id(), cpu_callin_map))
+ play_dead();
}
+#endif
asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
@@ -78,18 +61,15 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
/* New thread loses kernel privileges. */
status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
-#ifdef CONFIG_64BIT
- status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
-#endif
status |= KU_USER;
regs->cp0_status = status;
clear_used_math();
clear_fpu_owner();
- if (cpu_has_dsp)
- __init_dsp();
+ init_dsp();
+ clear_thread_flag(TIF_MSA_CTX_LIVE);
+ disable_msa();
regs->cp0_epc = pc;
regs->regs[29] = sp;
- current_thread_info()->addr_limit = USER_DS;
}
void exit_thread(void)
@@ -100,19 +80,21 @@ void flush_thread(void)
{
}
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
- unsigned long unused, struct task_struct *p, struct pt_regs *regs)
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long arg, struct task_struct *p)
{
struct thread_info *ti = task_thread_info(p);
- struct pt_regs *childregs;
- long childksp;
+ struct pt_regs *childregs, *regs = current_pt_regs();
+ unsigned long childksp;
p->set_child_tid = p->clear_child_tid = NULL;
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
preempt_disable();
- if (is_fpu_owner())
+ if (is_msa_enabled())
+ save_msa(p);
+ else if (is_fpu_owner())
save_fp(p);
if (cpu_has_dsp)
@@ -122,27 +104,33 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
/* set up new TSS. */
childregs = (struct pt_regs *) childksp - 1;
- *childregs = *regs;
- childregs->regs[7] = 0; /* Clear error flag */
-
-#if defined(CONFIG_BINFMT_IRIX)
- if (current->personality != PER_LINUX) {
- /* Under IRIX things are a little different. */
- childregs->regs[3] = 1;
- regs->regs[3] = 0;
- }
-#endif
- childregs->regs[2] = 0; /* Child gets zero as return value */
- regs->regs[2] = p->pid;
-
- if (childregs->cp0_status & ST0_CU0) {
- childregs->regs[28] = (unsigned long) ti;
- childregs->regs[29] = childksp;
+ /* Put the stack after the struct pt_regs. */
+ childksp = (unsigned long) childregs;
+ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ unsigned long status = p->thread.cp0_status;
+ memset(childregs, 0, sizeof(struct pt_regs));
ti->addr_limit = KERNEL_DS;
- } else {
- childregs->regs[29] = usp;
- ti->addr_limit = USER_DS;
+ p->thread.reg16 = usp; /* fn */
+ p->thread.reg17 = arg;
+ p->thread.reg29 = childksp;
+ p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+ status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
+ ((status & (ST0_KUC | ST0_IEC)) << 2);
+#else
+ status |= ST0_EXL;
+#endif
+ childregs->cp0_status = status;
+ return 0;
}
+ *childregs = *regs;
+ childregs->regs[7] = 0; /* Clear error flag */
+ childregs->regs[2] = 0; /* Child gets zero as return value */
+ if (usp)
+ childregs->regs[29] = usp;
+ ti->addr_limit = USER_DS;
+
p->thread.reg29 = (unsigned long) childregs;
p->thread.reg31 = (unsigned long) ret_from_fork;
@@ -150,19 +138,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
* New tasks lose permission to use the fpu. This accelerates context
* switching for most programs since they don't use the fpu.
*/
- p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
+
clear_tsk_thread_flag(p, TIF_USEDFPU);
#ifdef CONFIG_MIPS_MT_FPAFF
- /*
- * FPU affinity support is cleaner if we track the
- * user-visible CPU affinity from the very beginning.
- * The generic cpus_allowed mask will already have
- * been copied from the parent before copy_thread
- * is invoked.
- */
- p->thread.user_cpus_allowed = p->cpus_allowed;
+ clear_tsk_thread_flag(p, TIF_FPUBOUND);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (clone_flags & CLONE_SETTLS)
@@ -174,7 +155,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
/* Fill in the fpu structure for a core dump.. */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
{
- memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
+ int i;
+
+ for (i = 0; i < NUM_FPU_REGS; i++)
+ memcpy(&r[i], &current->thread.fpu.fpr[i], sizeof(*r));
+
+ memcpy(&r[NUM_FPU_REGS], &current->thread.fpu.fcr31,
+ sizeof(current->thread.fpu.fcr31));
return 1;
}
@@ -209,43 +196,23 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
{
- memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
-
- return 1;
-}
+ int i;
-/*
- * Create a kernel thread
- */
-static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *))
-{
- do_exit(fn(arg));
-}
+ for (i = 0; i < NUM_FPU_REGS; i++)
+ memcpy(&fpr[i], &t->thread.fpu.fpr[i], sizeof(*fpr));
-long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
-{
- struct pt_regs regs;
+ memcpy(&fpr[NUM_FPU_REGS], &t->thread.fpu.fcr31,
+ sizeof(t->thread.fpu.fcr31));
- memset(&regs, 0, sizeof(regs));
+ return 1;
+}
- regs.regs[4] = (unsigned long) arg;
- regs.regs[5] = (unsigned long) fn;
- regs.cp0_epc = (unsigned long) kernel_thread_helper;
- regs.cp0_status = read_c0_status();
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
- regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
- ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2);
-#else
- regs.cp0_status |= ST0_EXL;
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
#endif
- /* Ok, create the new process.. */
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
-}
-
-/*
- *
- */
struct mips_frame_info {
void *func;
unsigned long func_size;
@@ -253,36 +220,122 @@ struct mips_frame_info {
int pc_offset;
};
+#define J_TARGET(pc,target) \
+ (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
+
static inline int is_ra_save_ins(union mips_instruction *ip)
{
+#ifdef CONFIG_CPU_MICROMIPS
+ union mips_instruction mmi;
+
+ /*
+ * swsp ra,offset
+ * swm16 reglist,offset(sp)
+ * swm32 reglist,offset(sp)
+ * sw32 ra,offset(sp)
+ * jradiussp - NOT SUPPORTED
+ *
+ * microMIPS is way more fun...
+ */
+ if (mm_insn_16bit(ip->halfword[0])) {
+ mmi.word = (ip->halfword[0] << 16);
+ return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
+ mmi.mm16_r5_format.rt == 31) ||
+ (mmi.mm16_m_format.opcode == mm_pool16c_op &&
+ mmi.mm16_m_format.func == mm_swm16_op));
+ }
+ else {
+ mmi.halfword[0] = ip->halfword[1];
+ mmi.halfword[1] = ip->halfword[0];
+ return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
+ mmi.mm_m_format.rd > 9 &&
+ mmi.mm_m_format.base == 29 &&
+ mmi.mm_m_format.func == mm_swm32_func) ||
+ (mmi.i_format.opcode == mm_sw32_op &&
+ mmi.i_format.rs == 29 &&
+ mmi.i_format.rt == 31));
+ }
+#else
/* sw / sd $ra, offset($sp) */
return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
ip->i_format.rs == 29 &&
ip->i_format.rt == 31;
+#endif
}
-static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
+static inline int is_jump_ins(union mips_instruction *ip)
{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * jr16,jrc,jalr16,jalr16
+ * jal
+ * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
+ * jraddiusp - NOT SUPPORTED
+ *
+ * microMIPS is kind of more fun...
+ */
+ union mips_instruction mmi;
+
+ mmi.word = (ip->halfword[0] << 16);
+
+ if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
+ (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
+ ip->j_format.opcode == mm_jal32_op)
+ return 1;
+ if (ip->r_format.opcode != mm_pool32a_op ||
+ ip->r_format.func != mm_pool32axf_op)
+ return 0;
+ return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
+#else
+ if (ip->j_format.opcode == j_op)
+ return 1;
if (ip->j_format.opcode == jal_op)
return 1;
if (ip->r_format.opcode != spec_op)
return 0;
return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+#endif
}
static inline int is_sp_move_ins(union mips_instruction *ip)
{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * addiusp -imm
+ * addius5 sp,-imm
+ * addiu32 sp,sp,-imm
+ * jradiussp - NOT SUPPORTED
+ *
+ * microMIPS is not more fun...
+ */
+ if (mm_insn_16bit(ip->halfword[0])) {
+ union mips_instruction mmi;
+
+ mmi.word = (ip->halfword[0] << 16);
+ return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
+ mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
+ (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
+ mmi.mm16_r5_format.rt == 29));
+ }
+ return (ip->mm_i_format.opcode == mm_addiu32_op &&
+ ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
+#else
/* addiu/daddiu sp,sp,-imm */
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
return 0;
if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
return 1;
+#endif
return 0;
}
static int get_frame_info(struct mips_frame_info *info)
{
+#ifdef CONFIG_CPU_MICROMIPS
+ union mips_instruction *ip = (void *) (((char *) info->func) - 1);
+#else
union mips_instruction *ip = info->func;
+#endif
unsigned max_insns = info->func_size / sizeof(union mips_instruction);
unsigned i;
@@ -298,11 +351,30 @@ static int get_frame_info(struct mips_frame_info *info)
for (i = 0; i < max_insns; i++, ip++) {
- if (is_jal_jalr_jr_ins(ip))
+ if (is_jump_ins(ip))
break;
if (!info->frame_size) {
if (is_sp_move_ins(ip))
+ {
+#ifdef CONFIG_CPU_MICROMIPS
+ if (mm_insn_16bit(ip->halfword[0]))
+ {
+ unsigned short tmp;
+
+ if (ip->halfword[0] & mm_addiusp_func)
+ {
+ tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
+ info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
+ } else {
+ tmp = (ip->halfword[0] >> 1);
+ info->frame_size = -(signed short)(tmp & 0xf);
+ }
+ ip = (void *) &ip->halfword[1];
+ ip--;
+ } else
+#endif
info->frame_size = - ip->i_format.simmediate;
+ }
continue;
}
if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
@@ -322,15 +394,42 @@ err:
static struct mips_frame_info schedule_mfi __read_mostly;
+#ifdef CONFIG_KALLSYMS
+static unsigned long get___schedule_addr(void)
+{
+ return kallsyms_lookup_name("__schedule");
+}
+#else
+static unsigned long get___schedule_addr(void)
+{
+ union mips_instruction *ip = (void *)schedule;
+ int max_insns = 8;
+ int i;
+
+ for (i = 0; i < max_insns; i++, ip++) {
+ if (ip->j_format.opcode == j_op)
+ return J_TARGET(ip, ip->j_format.target);
+ }
+ return 0;
+}
+#endif
+
static int __init frame_info_init(void)
{
unsigned long size = 0;
#ifdef CONFIG_KALLSYMS
unsigned long ofs;
+#endif
+ unsigned long addr;
+
+ addr = get___schedule_addr();
+ if (!addr)
+ addr = (unsigned long)schedule;
- kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
+#ifdef CONFIG_KALLSYMS
+ kallsyms_lookup_size_offset(addr, &size, &ofs);
#endif
- schedule_mfi.func = schedule;
+ schedule_mfi.func = (void *)addr;
schedule_mfi.func_size = size;
get_frame_info(&schedule_mfi);
@@ -364,18 +463,18 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
#ifdef CONFIG_KALLSYMS
-/* used by show_backtrace() */
-unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
- unsigned long pc, unsigned long *ra)
+/* generic stack unwinding function */
+unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
+ unsigned long *sp,
+ unsigned long pc,
+ unsigned long *ra)
{
- unsigned long stack_page;
struct mips_frame_info info;
unsigned long size, ofs;
int leaf;
extern void ret_from_irq(void);
extern void ret_from_exception(void);
- stack_page = (unsigned long)task_stack_page(task);
if (!stack_page)
return 0;
@@ -401,7 +500,7 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
return 0;
/*
- * Return ra if an exception occured at the first instruction
+ * Return ra if an exception occurred at the first instruction
*/
if (unlikely(ofs == 0)) {
pc = *ra;
@@ -434,6 +533,15 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
*ra = 0;
return __kernel_text_address(pc) ? pc : 0;
}
+EXPORT_SYMBOL(unwind_stack_by_address);
+
+/* used by show_backtrace() */
+unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+ unsigned long pc, unsigned long *ra)
+{
+ unsigned long stack_page = (unsigned long)task_stack_page(task);
+ return unwind_stack_by_address(stack_page, sp, pc, ra);
+}
#endif
/*
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
new file mode 100644
index 00000000000..5d39bb85bf3
--- /dev/null
+++ b/arch/mips/kernel/prom.c
@@ -0,0 +1,57 @@
+/*
+ * MIPS support for CONFIG_OF device tree support
+ *
+ * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+
+#include <asm/page.h>
+#include <asm/prom.h>
+
+static char mips_machine_name[64] = "Unknown";
+
+__init void mips_set_machine_name(const char *name)
+{
+ if (name == NULL)
+ return;
+
+ strlcpy(mips_machine_name, name, sizeof(mips_machine_name));
+ pr_info("MIPS: machine is %s\n", mips_get_machine_name());
+}
+
+char *mips_get_machine_name(void)
+{
+ return mips_machine_name;
+}
+
+#ifdef CONFIG_OF
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ return add_memory_region(base, size, BOOT_MEM_RAM);
+}
+
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
+}
+
+void __init __dt_setup_arch(void *bph)
+{
+ if (!early_init_dt_scan(bph))
+ return;
+
+ mips_set_machine_name(of_flat_dt_get_machine_name());
+}
+#endif
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 35234b92b9a..f639ccd5060 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -15,16 +15,21 @@
* binaries.
*/
#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
+#include <linux/regset.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/security.h>
+#include <linux/tracehook.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
+#include <linux/ftrace.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
@@ -34,11 +39,14 @@
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <asm/system.h>
+#include <asm/syscall.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
#include <asm/reg.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -46,11 +54,12 @@
*/
void ptrace_disable(struct task_struct *child)
{
- /* Nothing to do.. */
+ /* Don't load the watchpoint registers for the ex-child. */
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
}
/*
- * Read a general register set. We always use the 64-bit format, even
+ * Read a general register set. We always use the 64-bit format, even
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
*/
@@ -105,51 +114,30 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
{
int i;
- unsigned int tmp;
if (!access_ok(VERIFY_WRITE, data, 33 * 8))
return -EIO;
if (tsk_used_math(child)) {
- fpureg_t *fregs = get_fpu_regs(child);
+ union fpureg *fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
- __put_user(fregs[i], i + (__u64 __user *) data);
+ __put_user(get_fpr64(&fregs[i], 0),
+ i + (__u64 __user *)data);
} else {
for (i = 0; i < 32; i++)
__put_user((__u64) -1, i + (__u64 __user *) data);
}
__put_user(child->thread.fpu.fcr31, data + 64);
-
- preempt_disable();
- if (cpu_has_fpu) {
- unsigned int flags;
-
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
- write_c0_status(flags);
- }
- } else {
- tmp = 0;
- }
- preempt_enable();
- __put_user(tmp, data + 65);
+ __put_user(current_cpu_data.fpu_id, data + 65);
return 0;
}
int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
- fpureg_t *fregs;
+ union fpureg *fregs;
+ u64 fpr_val;
int i;
if (!access_ok(VERIFY_READ, data, 33 * 8))
@@ -157,8 +145,10 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
fregs = get_fpu_regs(child);
- for (i = 0; i < 32; i++)
- __get_user(fregs[i], i + (__u64 __user *) data);
+ for (i = 0; i < 32; i++) {
+ __get_user(fpr_val, i + (__u64 __user *)data);
+ set_fpr64(&fregs[i], 0, fpr_val);
+ }
__get_user(child->thread.fpu.fcr31, data + 64);
@@ -167,9 +157,261 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
return 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+int ptrace_get_watch_regs(struct task_struct *child,
+ struct pt_watch_regs __user *addr)
+{
+ enum pt_watch_style style;
+ int i;
+
+ if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
+ return -EIO;
+ if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
+ return -EIO;
+
+#ifdef CONFIG_32BIT
+ style = pt_watch_style_mips32;
+#define WATCH_STYLE mips32
+#else
+ style = pt_watch_style_mips64;
+#define WATCH_STYLE mips64
+#endif
+
+ __put_user(style, &addr->style);
+ __put_user(boot_cpu_data.watch_reg_use_cnt,
+ &addr->WATCH_STYLE.num_valid);
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ __put_user(child->thread.watch.mips3264.watchlo[i],
+ &addr->WATCH_STYLE.watchlo[i]);
+ __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
+ &addr->WATCH_STYLE.watchhi[i]);
+ __put_user(boot_cpu_data.watch_reg_masks[i],
+ &addr->WATCH_STYLE.watch_masks[i]);
+ }
+ for (; i < 8; i++) {
+ __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
+ __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
+ __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
+ }
+
+ return 0;
+}
+
+int ptrace_set_watch_regs(struct task_struct *child,
+ struct pt_watch_regs __user *addr)
+{
+ int i;
+ int watch_active = 0;
+ unsigned long lt[NUM_WATCH_REGS];
+ u16 ht[NUM_WATCH_REGS];
+
+ if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
+ return -EIO;
+ if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
+ return -EIO;
+ /* Check the values. */
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
+#ifdef CONFIG_32BIT
+ if (lt[i] & __UA_LIMIT)
+ return -EINVAL;
+#else
+ if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
+ if (lt[i] & 0xffffffff80000000UL)
+ return -EINVAL;
+ } else {
+ if (lt[i] & __UA_LIMIT)
+ return -EINVAL;
+ }
+#endif
+ __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
+ if (ht[i] & ~0xff8)
+ return -EINVAL;
+ }
+ /* Install them. */
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ if (lt[i] & 7)
+ watch_active = 1;
+ child->thread.watch.mips3264.watchlo[i] = lt[i];
+ /* Set the G bit. */
+ child->thread.watch.mips3264.watchhi[i] = ht[i];
+ }
+
+ if (watch_active)
+ set_tsk_thread_flag(child, TIF_LOAD_WATCH);
+ else
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
+
+ return 0;
+}
+
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs, 0, sizeof(*regs));
+}
+
+static int gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs newregs;
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &newregs,
+ 0, sizeof(newregs));
+ if (ret)
+ return ret;
+
+ *task_pt_regs(target) = newregs;
+
+ return 0;
+}
+
+static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ unsigned i;
+ int err;
+ u64 fpr_val;
+
+ /* XXX fcr31 */
+
+ if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu,
+ 0, sizeof(elf_fpregset_t));
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned i;
+ int err;
+ u64 fpr_val;
+
+ /* XXX fcr31 */
+
+ if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu,
+ 0, sizeof(elf_fpregset_t));
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+ }
+
+ return 0;
+}
+
+enum mips_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+};
+
+static const struct user_regset mips_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned int),
+ .align = sizeof(unsigned int),
+ .get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .get = fpr_get,
+ .set = fpr_set,
+ },
+};
+
+static const struct user_regset_view user_mips_view = {
+ .name = "mips",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips_regsets,
+ .n = ARRAY_SIZE(mips_regsets),
+};
+
+static const struct user_regset mips64_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .get = fpr_get,
+ .set = fpr_set,
+ },
+};
+
+static const struct user_regset_view user_mips64_view = {
+ .name = "mips",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips64_regsets,
+ .n = ARRAY_SIZE(mips_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_32BIT
+ return &user_mips_view;
+#endif
+
+#ifdef CONFIG_MIPS32_O32
+ if (test_thread_flag(TIF_32BIT_REGS))
+ return &user_mips_view;
+#endif
+
+ return &user_mips64_view;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ void __user *addrp = (void __user *) addr;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = (void __user *) data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
@@ -181,6 +423,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
+ union fpureg *fregs;
unsigned long tmp = 0;
regs = task_pt_regs(child);
@@ -191,26 +434,26 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = regs->regs[addr];
break;
case FPR_BASE ... FPR_BASE + 31:
- if (tsk_used_math(child)) {
- fpureg_t *fregs = get_fpu_regs(child);
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ tmp = -1;
+ break;
+ }
+ fregs = get_fpu_regs(child);
#ifdef CONFIG_32BIT
+ if (test_thread_flag(TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
- if (addr & 1)
- tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
- else
- tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
-#endif
-#ifdef CONFIG_64BIT
- tmp = fregs[addr - FPR_BASE];
-#endif
- } else {
- tmp = -1; /* FP not yet used */
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
+ break;
}
+#endif
+ tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
@@ -235,44 +478,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
- unsigned int flags;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned int irqflags;
- unsigned int mtflags;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- preempt_disable();
- if (!cpu_has_fpu) {
- preempt_enable();
- break;
- }
-
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify-write of Status must be atomic */
- local_irq_save(irqflags);
- mtflags = dmt();
-#endif /* CONFIG_MIPS_MT_SMTC */
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- }
-#ifdef CONFIG_MIPS_MT_SMTC
- emt(mtflags);
- local_irq_restore(irqflags);
-#endif /* CONFIG_MIPS_MT_SMTC */
- preempt_enable();
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = current_cpu_data.fpu_id;
break;
- }
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -298,7 +507,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
goto out;
}
- ret = put_user(tmp, (unsigned long __user *) data);
+ ret = put_user(tmp, datalp);
break;
}
@@ -318,7 +527,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
regs->regs[addr] = data;
break;
case FPR_BASE ... FPR_BASE + 31: {
- fpureg_t *fregs = get_fpu_regs(child);
+ union fpureg *fregs = get_fpu_regs(child);
if (!tsk_used_math(child)) {
/* FP not yet used */
@@ -327,22 +536,18 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
child->thread.fpu.fcr31 = 0;
}
#ifdef CONFIG_32BIT
- /*
- * The odd registers are actually the high order bits
- * of the values stored in the even registers - unless
- * we're using r2k_switch.S.
- */
- if (addr & 1) {
- fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
- fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
- } else {
- fregs[addr - FPR_BASE] &= ~0xffffffffLL;
- fregs[addr - FPR_BASE] |= data;
+ if (test_thread_flag(TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers - unless we're using r2k_switch.S.
+ */
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
+ break;
}
#endif
-#ifdef CONFIG_64BIT
- fregs[addr - FPR_BASE] = data;
-#endif
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case PC:
@@ -390,54 +595,31 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (__s64 __user *) data);
+ ret = ptrace_getregs(child, datavp);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (__s64 __user *) data);
+ ret = ptrace_setregs(child, datavp);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs(child, (__u32 __user *) data);
+ ret = ptrace_getfpregs(child, datavp);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs(child, (__u32 __user *) data);
+ ret = ptrace_setfpregs(child, datavp);
break;
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT: { /* restart after signal. */
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL) {
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
- else {
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_thread_info(child)->tp_value, datalp);
break;
- }
- /*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL:
- ret = 0;
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- wake_up_process(child);
+ case PTRACE_GET_WATCH_REGS:
+ ret = ptrace_get_watch_regs(child, addrp);
break;
- case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *) data);
+ case PTRACE_SET_WATCH_REGS:
+ ret = ptrace_set_watch_regs(child, addrp);
break;
default:
@@ -448,56 +630,52 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
-static inline int audit_arch(void)
-{
- int arch = EM_MIPS;
-#ifdef CONFIG_64BIT
- arch |= __AUDIT_ARCH_64BIT;
-#endif
-#if defined(__LITTLE_ENDIAN)
- arch |= __AUDIT_ARCH_LE;
-#endif
- return arch;
-}
-
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
-asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
+asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
{
- /* do the secure computing check first */
- if (!entryexit)
- secure_computing(regs->regs[0]);
+ long ret = 0;
+ user_exit();
- if (unlikely(current->audit_context) && entryexit)
- audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
- regs->regs[2]);
+ if (secure_computing(syscall) == -1)
+ return -1;
- if (!(current->ptrace & PT_PTRACED))
- goto out;
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ ret = -1;
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- goto out;
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[2]);
- /* The 0x80 provides a way for the tracing parent to distinguish
- between a syscall stop and SIGTRAP delivery */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
- 0x80 : 0));
+ audit_syscall_entry(syscall_get_arch(),
+ syscall,
+ regs->regs[4], regs->regs[5],
+ regs->regs[6], regs->regs[7]);
+ return syscall;
+}
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
+/*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+{
+ /*
+ * We may come here right after calling schedule_user()
+ * or do_notify_resume(), in which case we can be in RCU
+ * user mode.
*/
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ user_exit();
+
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->regs[2]);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, 0);
-out:
- if (unlikely(current->audit_context) && !entryexit)
- audit_syscall_entry(audit_arch(), regs->regs[0],
- regs->regs[4], regs->regs[5],
- regs->regs[6], regs->regs[7]);
+ user_enter();
}
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 76818be6ba7..b40c3ca60ee 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -15,13 +15,13 @@
* binaries.
*/
#include <linux/compiler.h>
+#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/user.h>
#include <linux/security.h>
@@ -32,65 +32,21 @@
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
-int ptrace_getregs(struct task_struct *child, __s64 __user *data);
-int ptrace_setregs(struct task_struct *child, __s64 __user *data);
-
-int ptrace_getfpregs(struct task_struct *child, __u32 __user *data);
-int ptrace_setfpregs(struct task_struct *child, __u32 __user *data);
-
/*
* Tracing a 32-bit process with a 64-bit strace and vice versa will not
* work. I don't know how to fix this.
*/
-asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t caddr, compat_ulong_t cdata)
{
- struct task_struct *child;
+ int addr = caddr;
+ int data = cdata;
int ret;
-#if 0
- printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
- (int) request, (int) pid, (unsigned long) addr,
- (unsigned long) data);
-#endif
- lock_kernel();
- if (request == PTRACE_TRACEME) {
- ret = ptrace_traceme();
- goto out;
- }
-
- child = ptrace_get_task_struct(pid);
- if (IS_ERR(child)) {
- ret = PTR_ERR(child);
- goto out;
- }
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
- /* when I and D space are separate, these will need to be fixed. */
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned int tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp, (unsigned int __user *) (unsigned long) data);
- break;
- }
/*
* Read 4 bytes of the other process' storage
@@ -124,6 +80,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
+ union fpureg *fregs;
unsigned int tmp;
regs = task_pt_regs(child);
@@ -134,21 +91,23 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
tmp = regs->regs[addr];
break;
case FPR_BASE ... FPR_BASE + 31:
- if (tsk_used_math(child)) {
- fpureg_t *fregs = get_fpu_regs(child);
-
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ tmp = -1;
+ break;
+ }
+ fregs = get_fpu_regs(child);
+ if (test_thread_flag(TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
- if (addr & 1)
- tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
- else
- tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
- } else {
- tmp = -1; /* FP not yet used */
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
+ break;
}
+ tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
@@ -168,53 +127,17 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
- unsigned int flags;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned int irqflags;
- unsigned int mtflags;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- preempt_disable();
- if (!cpu_has_fpu) {
- preempt_enable();
- tmp = 0;
- break;
- }
-
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify-write of Status must be atomic */
- local_irq_save(irqflags);
- mtflags = dmt();
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- }
-#ifdef CONFIG_MIPS_MT_SMTC
- emt(mtflags);
- local_irq_restore(irqflags);
-#endif /* CONFIG_MIPS_MT_SMTC */
- preempt_enable();
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = current_cpu_data.fpu_id;
break;
- }
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
- goto out_tsk;
+ goto out;
}
dregs = __get_dsp_regs(child);
tmp = (unsigned long) (dregs[addr - DSP_BASE]);
@@ -224,29 +147,19 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
- goto out_tsk;
+ goto out;
}
tmp = child->thread.dsp.dspcontrol;
break;
default:
tmp = 0;
ret = -EIO;
- goto out_tsk;
+ goto out;
}
ret = put_user(tmp, (unsigned __user *) (unsigned long) data);
break;
}
- /* when I and D space are separate, this will have to be fixed. */
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1)
- == sizeof(data))
- break;
- ret = -EIO;
- break;
-
/*
* Write 4 bytes into the other process' storage
* data is the 4 bytes that the user wants written
@@ -282,7 +195,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
regs->regs[addr] = data;
break;
case FPR_BASE ... FPR_BASE + 31: {
- fpureg_t *fregs = get_fpu_regs(child);
+ union fpureg *fregs = get_fpu_regs(child);
if (!tsk_used_math(child)) {
/* FP not yet used */
@@ -290,20 +203,17 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
sizeof(child->thread.fpu));
child->thread.fpu.fcr31 = 0;
}
- /*
- * The odd registers are actually the high order bits
- * of the values stored in the even registers - unless
- * we're using r2k_switch.S.
- */
- if (addr & 1) {
- fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
- fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
- } else {
- fregs[addr - FPR_BASE] &= ~0xffffffffLL;
- /* Must cast, lest sign extension fill upper
- bits! */
- fregs[addr - FPR_BASE] |= (unsigned int)data;
+ if (test_thread_flag(TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers - unless we're using r2k_switch.S.
+ */
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
+ break;
}
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case PC:
@@ -361,63 +271,30 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
break;
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT: { /* restart after signal. */
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL) {
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
- else {
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
- break;
- }
-
- /*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL:
- ret = 0;
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- wake_up_process(child);
- break;
-
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
(unsigned int __user *) (unsigned long) data);
break;
- case PTRACE_DETACH: /* detach a process that was attached. */
- ret = ptrace_detach(child, data);
+ case PTRACE_GET_THREAD_AREA_3264:
+ ret = put_user(task_thread_info(child)->tp_value,
+ (unsigned long __user *) (unsigned long) data);
break;
- case PTRACE_GETEVENTMSG:
- ret = put_user(child->ptrace_message,
- (unsigned int __user *) (unsigned long) data);
+ case PTRACE_GET_WATCH_REGS:
+ ret = ptrace_get_watch_regs(child,
+ (struct pt_watch_regs __user *) (unsigned long) addr);
break;
- case PTRACE_GET_THREAD_AREA_3264:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *) (unsigned long) data);
+ case PTRACE_SET_WATCH_REGS:
+ ret = ptrace_set_watch_regs(child,
+ (struct pt_watch_regs __user *) (unsigned long) addr);
break;
default:
- ret = ptrace_request(child, request, addr, data);
+ ret = compat_ptrace_request(child, request, addr, data);
break;
}
-
-out_tsk:
- put_task_struct(child);
out:
- unlock_kernel();
return ret;
}
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index ac68e68339d..f31063dbdae 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -6,7 +6,7 @@
* Copyright (C) 1996, 1998 by Ralf Baechle
*
* Multi-arch abstraction and asm macros for easier reading:
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*
* Further modifications to make this work:
* Copyright (c) 1998 Harald Koerfgen
@@ -30,38 +30,38 @@
LEAF(_save_fp_context)
li v0, 0 # assume success
cfc1 t1,fcr31
- EX(swc1 $f0,(SC_FPREGS+0)(a0))
- EX(swc1 $f1,(SC_FPREGS+8)(a0))
- EX(swc1 $f2,(SC_FPREGS+16)(a0))
- EX(swc1 $f3,(SC_FPREGS+24)(a0))
- EX(swc1 $f4,(SC_FPREGS+32)(a0))
- EX(swc1 $f5,(SC_FPREGS+40)(a0))
- EX(swc1 $f6,(SC_FPREGS+48)(a0))
- EX(swc1 $f7,(SC_FPREGS+56)(a0))
- EX(swc1 $f8,(SC_FPREGS+64)(a0))
- EX(swc1 $f9,(SC_FPREGS+72)(a0))
- EX(swc1 $f10,(SC_FPREGS+80)(a0))
- EX(swc1 $f11,(SC_FPREGS+88)(a0))
- EX(swc1 $f12,(SC_FPREGS+96)(a0))
- EX(swc1 $f13,(SC_FPREGS+104)(a0))
- EX(swc1 $f14,(SC_FPREGS+112)(a0))
- EX(swc1 $f15,(SC_FPREGS+120)(a0))
- EX(swc1 $f16,(SC_FPREGS+128)(a0))
- EX(swc1 $f17,(SC_FPREGS+136)(a0))
- EX(swc1 $f18,(SC_FPREGS+144)(a0))
- EX(swc1 $f19,(SC_FPREGS+152)(a0))
- EX(swc1 $f20,(SC_FPREGS+160)(a0))
- EX(swc1 $f21,(SC_FPREGS+168)(a0))
- EX(swc1 $f22,(SC_FPREGS+176)(a0))
- EX(swc1 $f23,(SC_FPREGS+184)(a0))
- EX(swc1 $f24,(SC_FPREGS+192)(a0))
- EX(swc1 $f25,(SC_FPREGS+200)(a0))
- EX(swc1 $f26,(SC_FPREGS+208)(a0))
- EX(swc1 $f27,(SC_FPREGS+216)(a0))
- EX(swc1 $f28,(SC_FPREGS+224)(a0))
- EX(swc1 $f29,(SC_FPREGS+232)(a0))
- EX(swc1 $f30,(SC_FPREGS+240)(a0))
- EX(swc1 $f31,(SC_FPREGS+248)(a0))
+ EX(swc1 $f0,(SC_FPREGS+0)(a0))
+ EX(swc1 $f1,(SC_FPREGS+8)(a0))
+ EX(swc1 $f2,(SC_FPREGS+16)(a0))
+ EX(swc1 $f3,(SC_FPREGS+24)(a0))
+ EX(swc1 $f4,(SC_FPREGS+32)(a0))
+ EX(swc1 $f5,(SC_FPREGS+40)(a0))
+ EX(swc1 $f6,(SC_FPREGS+48)(a0))
+ EX(swc1 $f7,(SC_FPREGS+56)(a0))
+ EX(swc1 $f8,(SC_FPREGS+64)(a0))
+ EX(swc1 $f9,(SC_FPREGS+72)(a0))
+ EX(swc1 $f10,(SC_FPREGS+80)(a0))
+ EX(swc1 $f11,(SC_FPREGS+88)(a0))
+ EX(swc1 $f12,(SC_FPREGS+96)(a0))
+ EX(swc1 $f13,(SC_FPREGS+104)(a0))
+ EX(swc1 $f14,(SC_FPREGS+112)(a0))
+ EX(swc1 $f15,(SC_FPREGS+120)(a0))
+ EX(swc1 $f16,(SC_FPREGS+128)(a0))
+ EX(swc1 $f17,(SC_FPREGS+136)(a0))
+ EX(swc1 $f18,(SC_FPREGS+144)(a0))
+ EX(swc1 $f19,(SC_FPREGS+152)(a0))
+ EX(swc1 $f20,(SC_FPREGS+160)(a0))
+ EX(swc1 $f21,(SC_FPREGS+168)(a0))
+ EX(swc1 $f22,(SC_FPREGS+176)(a0))
+ EX(swc1 $f23,(SC_FPREGS+184)(a0))
+ EX(swc1 $f24,(SC_FPREGS+192)(a0))
+ EX(swc1 $f25,(SC_FPREGS+200)(a0))
+ EX(swc1 $f26,(SC_FPREGS+208)(a0))
+ EX(swc1 $f27,(SC_FPREGS+216)(a0))
+ EX(swc1 $f28,(SC_FPREGS+224)(a0))
+ EX(swc1 $f29,(SC_FPREGS+232)(a0))
+ EX(swc1 $f30,(SC_FPREGS+240)(a0))
+ EX(swc1 $f31,(SC_FPREGS+248)(a0))
EX(sw t1,(SC_FPC_CSR)(a0))
cfc1 t0,$0 # implementation/version
jr ra
@@ -82,38 +82,38 @@ LEAF(_save_fp_context)
LEAF(_restore_fp_context)
li v0, 0 # assume success
EX(lw t0,(SC_FPC_CSR)(a0))
- EX(lwc1 $f0,(SC_FPREGS+0)(a0))
- EX(lwc1 $f1,(SC_FPREGS+8)(a0))
- EX(lwc1 $f2,(SC_FPREGS+16)(a0))
- EX(lwc1 $f3,(SC_FPREGS+24)(a0))
- EX(lwc1 $f4,(SC_FPREGS+32)(a0))
- EX(lwc1 $f5,(SC_FPREGS+40)(a0))
- EX(lwc1 $f6,(SC_FPREGS+48)(a0))
- EX(lwc1 $f7,(SC_FPREGS+56)(a0))
- EX(lwc1 $f8,(SC_FPREGS+64)(a0))
- EX(lwc1 $f9,(SC_FPREGS+72)(a0))
- EX(lwc1 $f10,(SC_FPREGS+80)(a0))
- EX(lwc1 $f11,(SC_FPREGS+88)(a0))
- EX(lwc1 $f12,(SC_FPREGS+96)(a0))
- EX(lwc1 $f13,(SC_FPREGS+104)(a0))
- EX(lwc1 $f14,(SC_FPREGS+112)(a0))
- EX(lwc1 $f15,(SC_FPREGS+120)(a0))
- EX(lwc1 $f16,(SC_FPREGS+128)(a0))
- EX(lwc1 $f17,(SC_FPREGS+136)(a0))
- EX(lwc1 $f18,(SC_FPREGS+144)(a0))
- EX(lwc1 $f19,(SC_FPREGS+152)(a0))
- EX(lwc1 $f20,(SC_FPREGS+160)(a0))
- EX(lwc1 $f21,(SC_FPREGS+168)(a0))
- EX(lwc1 $f22,(SC_FPREGS+176)(a0))
- EX(lwc1 $f23,(SC_FPREGS+184)(a0))
- EX(lwc1 $f24,(SC_FPREGS+192)(a0))
- EX(lwc1 $f25,(SC_FPREGS+200)(a0))
- EX(lwc1 $f26,(SC_FPREGS+208)(a0))
- EX(lwc1 $f27,(SC_FPREGS+216)(a0))
- EX(lwc1 $f28,(SC_FPREGS+224)(a0))
- EX(lwc1 $f29,(SC_FPREGS+232)(a0))
- EX(lwc1 $f30,(SC_FPREGS+240)(a0))
- EX(lwc1 $f31,(SC_FPREGS+248)(a0))
+ EX(lwc1 $f0,(SC_FPREGS+0)(a0))
+ EX(lwc1 $f1,(SC_FPREGS+8)(a0))
+ EX(lwc1 $f2,(SC_FPREGS+16)(a0))
+ EX(lwc1 $f3,(SC_FPREGS+24)(a0))
+ EX(lwc1 $f4,(SC_FPREGS+32)(a0))
+ EX(lwc1 $f5,(SC_FPREGS+40)(a0))
+ EX(lwc1 $f6,(SC_FPREGS+48)(a0))
+ EX(lwc1 $f7,(SC_FPREGS+56)(a0))
+ EX(lwc1 $f8,(SC_FPREGS+64)(a0))
+ EX(lwc1 $f9,(SC_FPREGS+72)(a0))
+ EX(lwc1 $f10,(SC_FPREGS+80)(a0))
+ EX(lwc1 $f11,(SC_FPREGS+88)(a0))
+ EX(lwc1 $f12,(SC_FPREGS+96)(a0))
+ EX(lwc1 $f13,(SC_FPREGS+104)(a0))
+ EX(lwc1 $f14,(SC_FPREGS+112)(a0))
+ EX(lwc1 $f15,(SC_FPREGS+120)(a0))
+ EX(lwc1 $f16,(SC_FPREGS+128)(a0))
+ EX(lwc1 $f17,(SC_FPREGS+136)(a0))
+ EX(lwc1 $f18,(SC_FPREGS+144)(a0))
+ EX(lwc1 $f19,(SC_FPREGS+152)(a0))
+ EX(lwc1 $f20,(SC_FPREGS+160)(a0))
+ EX(lwc1 $f21,(SC_FPREGS+168)(a0))
+ EX(lwc1 $f22,(SC_FPREGS+176)(a0))
+ EX(lwc1 $f23,(SC_FPREGS+184)(a0))
+ EX(lwc1 $f24,(SC_FPREGS+192)(a0))
+ EX(lwc1 $f25,(SC_FPREGS+200)(a0))
+ EX(lwc1 $f26,(SC_FPREGS+208)(a0))
+ EX(lwc1 $f27,(SC_FPREGS+216)(a0))
+ EX(lwc1 $f28,(SC_FPREGS+224)(a0))
+ EX(lwc1 $f29,(SC_FPREGS+232)(a0))
+ EX(lwc1 $f30,(SC_FPREGS+240)(a0))
+ EX(lwc1 $f31,(SC_FPREGS+248)(a0))
jr ra
ctc1 t0,fcr31
END(_restore_fp_context)
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 656bde2e11b..20b7b040e76 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -5,7 +5,7 @@
* Copyright (C) 1994, 1995, 1996 by Andreas Busse
*
* Multi-cpu abstraction and macros for easier reading:
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*
* Further modifications to make this work:
* Copyright (c) 1998-2000 Harald Koerfgen
@@ -15,7 +15,6 @@
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
-#include <asm/page.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
@@ -43,29 +42,17 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti) )
+ * struct thread_info *next_ti, int usedfpu)
*/
LEAF(resume)
-#ifndef CONFIG_CPU_HAS_LLSC
- sw zero, ll_bit
-#endif
mfc0 t1, CP0_STATUS
sw t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
- /*
- * check if we need to save FPU registers
- */
- lw t3, TASK_THREAD_INFO(a0)
- lw t0, TI_FLAGS(t3)
- li t1, _TIF_USEDFPU
- and t2, t0, t1
- beqz t2, 1f
- nor t1, zero, t1
+ beqz a3, 1f
- and t0, t0, t1
- sw t0, TI_FLAGS(t3)
+ PTR_L t3, TASK_THREAD_INFO(a0)
/*
* clear saved user stack CU1 bit
@@ -78,6 +65,13 @@ LEAF(resume)
fpu_save_single a0, t0 # clobbers t0
1:
+
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index dbd42adc52e..8352523568e 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -6,7 +6,7 @@
* Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle
*
* Multi-arch abstraction and asm macros for easier reading:
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc.
@@ -30,12 +30,20 @@
.endm
.set noreorder
- .set mips3
+ .set arch=r4000
LEAF(_save_fp_context)
cfc1 t1, fcr31
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+ .set push
+#ifdef CONFIG_CPU_MIPS32_R2
+ .set mips64r2
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5
+ bgez t0, 1f # skip storing odd if FR=0
+ nop
+#endif
/* Store the 16 odd double precision registers */
EX sdc1 $f1, SC_FPREGS+8(a0)
EX sdc1 $f3, SC_FPREGS+24(a0)
@@ -53,6 +61,7 @@ LEAF(_save_fp_context)
EX sdc1 $f27, SC_FPREGS+216(a0)
EX sdc1 $f29, SC_FPREGS+232(a0)
EX sdc1 $f31, SC_FPREGS+248(a0)
+1: .set pop
#endif
/* Store the 16 even double precision registers */
@@ -82,7 +91,31 @@ LEAF(_save_fp_context)
LEAF(_save_fp_context32)
cfc1 t1, fcr31
- EX sdc1 $f0, SC32_FPREGS+0(a0)
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5
+ bgez t0, 1f # skip storing odd if FR=0
+ nop
+
+ /* Store the 16 odd double precision registers */
+ EX sdc1 $f1, SC32_FPREGS+8(a0)
+ EX sdc1 $f3, SC32_FPREGS+24(a0)
+ EX sdc1 $f5, SC32_FPREGS+40(a0)
+ EX sdc1 $f7, SC32_FPREGS+56(a0)
+ EX sdc1 $f9, SC32_FPREGS+72(a0)
+ EX sdc1 $f11, SC32_FPREGS+88(a0)
+ EX sdc1 $f13, SC32_FPREGS+104(a0)
+ EX sdc1 $f15, SC32_FPREGS+120(a0)
+ EX sdc1 $f17, SC32_FPREGS+136(a0)
+ EX sdc1 $f19, SC32_FPREGS+152(a0)
+ EX sdc1 $f21, SC32_FPREGS+168(a0)
+ EX sdc1 $f23, SC32_FPREGS+184(a0)
+ EX sdc1 $f25, SC32_FPREGS+200(a0)
+ EX sdc1 $f27, SC32_FPREGS+216(a0)
+ EX sdc1 $f29, SC32_FPREGS+232(a0)
+ EX sdc1 $f31, SC32_FPREGS+248(a0)
+
+ /* Store the 16 even double precision registers */
+1: EX sdc1 $f0, SC32_FPREGS+0(a0)
EX sdc1 $f2, SC32_FPREGS+16(a0)
EX sdc1 $f4, SC32_FPREGS+32(a0)
EX sdc1 $f6, SC32_FPREGS+48(a0)
@@ -113,8 +146,17 @@ LEAF(_save_fp_context32)
* - cp1 status/control register
*/
LEAF(_restore_fp_context)
- EX lw t0, SC_FPC_CSR(a0)
-#ifdef CONFIG_64BIT
+ EX lw t1, SC_FPC_CSR(a0)
+
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+ .set push
+#ifdef CONFIG_CPU_MIPS32_R2
+ .set mips64r2
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5
+ bgez t0, 1f # skip loading odd if FR=0
+ nop
+#endif
EX ldc1 $f1, SC_FPREGS+8(a0)
EX ldc1 $f3, SC_FPREGS+24(a0)
EX ldc1 $f5, SC_FPREGS+40(a0)
@@ -131,6 +173,7 @@ LEAF(_restore_fp_context)
EX ldc1 $f27, SC_FPREGS+216(a0)
EX ldc1 $f29, SC_FPREGS+232(a0)
EX ldc1 $f31, SC_FPREGS+248(a0)
+1: .set pop
#endif
EX ldc1 $f0, SC_FPREGS+0(a0)
EX ldc1 $f2, SC_FPREGS+16(a0)
@@ -148,7 +191,7 @@ LEAF(_restore_fp_context)
EX ldc1 $f26, SC_FPREGS+208(a0)
EX ldc1 $f28, SC_FPREGS+224(a0)
EX ldc1 $f30, SC_FPREGS+240(a0)
- ctc1 t0, fcr31
+ ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context)
@@ -156,8 +199,31 @@ LEAF(_restore_fp_context)
#ifdef CONFIG_MIPS32_COMPAT
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
- EX lw t0, SC32_FPC_CSR(a0)
- EX ldc1 $f0, SC32_FPREGS+0(a0)
+ EX lw t1, SC32_FPC_CSR(a0)
+
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5
+ bgez t0, 1f # skip loading odd if FR=0
+ nop
+
+ EX ldc1 $f1, SC32_FPREGS+8(a0)
+ EX ldc1 $f3, SC32_FPREGS+24(a0)
+ EX ldc1 $f5, SC32_FPREGS+40(a0)
+ EX ldc1 $f7, SC32_FPREGS+56(a0)
+ EX ldc1 $f9, SC32_FPREGS+72(a0)
+ EX ldc1 $f11, SC32_FPREGS+88(a0)
+ EX ldc1 $f13, SC32_FPREGS+104(a0)
+ EX ldc1 $f15, SC32_FPREGS+120(a0)
+ EX ldc1 $f17, SC32_FPREGS+136(a0)
+ EX ldc1 $f19, SC32_FPREGS+152(a0)
+ EX ldc1 $f21, SC32_FPREGS+168(a0)
+ EX ldc1 $f23, SC32_FPREGS+184(a0)
+ EX ldc1 $f25, SC32_FPREGS+200(a0)
+ EX ldc1 $f27, SC32_FPREGS+216(a0)
+ EX ldc1 $f29, SC32_FPREGS+232(a0)
+ EX ldc1 $f31, SC32_FPREGS+248(a0)
+
+1: EX ldc1 $f0, SC32_FPREGS+0(a0)
EX ldc1 $f2, SC32_FPREGS+16(a0)
EX ldc1 $f4, SC32_FPREGS+32(a0)
EX ldc1 $f6, SC32_FPREGS+48(a0)
@@ -173,7 +239,7 @@ LEAF(_restore_fp_context32)
EX ldc1 $f26, SC32_FPREGS+208(a0)
EX ldc1 $f28, SC32_FPREGS+224(a0)
EX ldc1 $f30, SC32_FPREGS+240(a0)
- ctc1 t0, fcr31
+ ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context32)
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index d9bfae53c43..81ca3f70fe2 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1994, 1995, 1996, by Andreas Busse
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2000 MIPS Technologies, Inc.
@@ -15,7 +15,6 @@
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
-#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
@@ -29,54 +28,56 @@
*/
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
-/*
- * FPU context is saved iff the process has used it's FPU in the current
- * time slice as indicated by _TIF_USEDFPU. In any case, the CU1 bit for user
- * space STATUS register should be 0, so that a process *always* starts its
- * userland with FPU disabled after each context switch.
- *
- * FPU will be enabled as soon as the process accesses FPU again, through
- * do_cpu() trap.
- */
-
+#ifndef USE_ALTERNATE_RESUME_IMPL
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti)
+ * struct thread_info *next_ti, s32 fp_save)
*/
.align 5
LEAF(resume)
-#ifndef CONFIG_CPU_HAS_LLSC
- sw zero, ll_bit
-#endif
mfc0 t1, CP0_STATUS
LONG_S t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
/*
- * check if we need to save FPU registers
+ * Check whether we need to save any FP context. FP context is saved
+ * iff the process has used the context with the scalar FPU or the MSA
+ * ASE in the current time slice, as indicated by _TIF_USEDFPU and
+ * _TIF_USEDMSA respectively. switch_to will have set fp_save
+ * accordingly to an FP_SAVE_ enum value.
*/
- PTR_L t3, TASK_THREAD_INFO(a0)
- LONG_L t0, TI_FLAGS(t3)
- li t1, _TIF_USEDFPU
- and t2, t0, t1
- beqz t2, 1f
- nor t1, zero, t1
-
- and t0, t0, t1
- LONG_S t0, TI_FLAGS(t3)
+ beqz a3, 2f
/*
- * clear saved user stack CU1 bit
+ * We do. Clear the saved CU1 bit for prev, such that next time it is
+ * scheduled it will start in userland with the FPU disabled. If the
+ * task uses the FPU then it will be enabled again via the do_cpu trap.
+ * This allows us to lazily restore the FP context.
*/
+ PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, ST_OFF(t3)
li t1, ~ST0_CU1
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
+ /* Check whether we're saving scalar or vector context. */
+ bgtz a3, 1f
+
+ /* Save 128b MSA vector context. */
+ msa_save_all a0
+ b 2f
+
+1: /* Save 32b/64b scalar FP context. */
fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1
-1:
+2:
+
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
/*
* The order of restoring the registers takes care of the race
@@ -87,18 +88,6 @@
PTR_ADDU t0, $28, _THREAD_SIZE - 32
set_saved_sp t0, t1, t2
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify-writes of Status must be atomic on a VPE */
- mfc0 t2, CP0_TCSTATUS
- ori t1, t2, TCSTATUS_IXMT
- mtc0 t1, CP0_TCSTATUS
- andi t2, t2, TCSTATUS_IXMT
- _ehb
- DMT 8 # dmt t0
- move t1,ra
- jal mips_ihb
- move ra,t1
-#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t1, CP0_STATUS /* Do we really need this? */
li a3, 0xff01
and t1, a3
@@ -107,27 +96,17 @@
and a2, a3
or a2, t1
mtc0 a2, CP0_STATUS
-#ifdef CONFIG_MIPS_MT_SMTC
- _ehb
- andi t0, t0, VPECONTROL_TE
- beqz t0, 1f
- emt
-1:
- mfc0 t1, CP0_TCSTATUS
- xori t1, t1, TCSTATUS_IXMT
- or t1, t1, t2
- mtc0 t1, CP0_TCSTATUS
- _ehb
-#endif /* CONFIG_MIPS_MT_SMTC */
move v0, a0
jr ra
END(resume)
+#endif /* USE_ALTERNATE_RESUME_IMPL */
+
/*
* Save a thread's fp context.
*/
LEAF(_save_fp)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
mfc0 t0, CP0_STATUS
#endif
fpu_save_double a0 t0 t1 # clobbers t1
@@ -138,13 +117,33 @@ LEAF(_save_fp)
* Restore a thread's fp context.
*/
LEAF(_restore_fp)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
mfc0 t0, CP0_STATUS
#endif
fpu_restore_double a0 t0 t1 # clobbers t1
jr ra
END(_restore_fp)
+#ifdef CONFIG_CPU_HAS_MSA
+
+/*
+ * Save a thread's MSA vector context.
+ */
+LEAF(_save_msa)
+ msa_save_all a0
+ jr ra
+ END(_save_msa)
+
+/*
+ * Restore a thread's MSA vector context.
+ */
+LEAF(_restore_msa)
+ msa_restore_all a0
+ jr ra
+ END(_restore_msa)
+
+#endif
+
/*
* Load the FPU with signalling NANS. This bit pattern we're using has
* the property that no matter whether considered as single or as double
@@ -156,19 +155,10 @@ LEAF(_restore_fp)
#define FPU_DEFAULT 0x00000000
LEAF(_init_fpu)
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
- mfc0 t0, CP0_TCSTATUS
- /* Bit position is the same for Status, TCStatus */
- li t1, ST0_CU1
- or t0, t1
- mtc0 t0, CP0_TCSTATUS
-#else /* Normal MIPS CU1 enable */
mfc0 t0, CP0_STATUS
li t1, ST0_CU1
or t0, t1
mtc0 t0, CP0_STATUS
-#endif /* CONFIG_MIPS_MT_SMTC */
enable_fpu_hazard
li t1, FPU_DEFAULT
@@ -232,8 +222,49 @@ LEAF(_init_fpu)
mtc1 t1, $f29
mtc1 t1, $f30
mtc1 t1, $f31
+
+#ifdef CONFIG_CPU_MIPS32_R2
+ .set push
+ .set mips64r2
+ sll t0, t0, 5 # is Status.FR set?
+ bgez t0, 1f # no: skip setting upper 32b
+
+ mthc1 t1, $f0
+ mthc1 t1, $f1
+ mthc1 t1, $f2
+ mthc1 t1, $f3
+ mthc1 t1, $f4
+ mthc1 t1, $f5
+ mthc1 t1, $f6
+ mthc1 t1, $f7
+ mthc1 t1, $f8
+ mthc1 t1, $f9
+ mthc1 t1, $f10
+ mthc1 t1, $f11
+ mthc1 t1, $f12
+ mthc1 t1, $f13
+ mthc1 t1, $f14
+ mthc1 t1, $f15
+ mthc1 t1, $f16
+ mthc1 t1, $f17
+ mthc1 t1, $f18
+ mthc1 t1, $f19
+ mthc1 t1, $f20
+ mthc1 t1, $f21
+ mthc1 t1, $f22
+ mthc1 t1, $f23
+ mthc1 t1, $f24
+ mthc1 t1, $f25
+ mthc1 t1, $f26
+ mthc1 t1, $f27
+ mthc1 t1, $f28
+ mthc1 t1, $f29
+ mthc1 t1, $f30
+ mthc1 t1, $f31
+1: .set pop
+#endif /* CONFIG_CPU_MIPS32_R2 */
#else
- .set mips3
+ .set arch=r4000
dmtc1 t1, $f0
dmtc1 t1, $f2
dmtc1 t1, $f4
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index 43cda53f5af..da0fbe46d83 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -8,7 +8,7 @@
* Copyright (C) 1996 by Ralf Baechle
*
* Multi-arch abstraction and asm macros for easier reading:
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*/
#include <asm/asm.h>
#include <asm/fpregdef.h>
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 87481f916a6..74bab9ddd0e 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -9,12 +9,16 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
-#include <asm/page.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/addrspace.h>
LEAF(relocate_new_kernel)
+ PTR_L a0, arg0
+ PTR_L a1, arg1
+ PTR_L a2, arg2
+ PTR_L a3, arg3
+
PTR_L s0, kexec_indirection_page
PTR_L s1, kexec_start_address
@@ -22,15 +26,20 @@ process_entry:
PTR_L s2, (s0)
PTR_ADD s0, s0, SZREG
+ /*
+ * In case of a kdump/crash kernel, the indirection page is not
+ * populated as the kernel is directly copied to a reserved location
+ */
+ beqz s2, done
+
/* destination page */
and s3, s2, 0x1
beq s3, zero, 1f
and s4, s2, ~0x1 /* store destination addr in s4 */
- move a0, s4
b process_entry
1:
- /* indirection page, update s0 */
+ /* indirection page, update s0 */
and s3, s2, 0x2
beq s3, zero, 1f
and s0, s2, ~0x2
@@ -46,7 +55,7 @@ process_entry:
and s3, s2, 0x8
beq s3, zero, process_entry
and s2, s2, ~0x8
- li s6, (1 << PAGE_SHIFT) / SZREG
+ li s6, (1 << _PAGE_SHIFT) / SZREG
copy_word:
/* copy page word by word */
@@ -60,10 +69,111 @@ copy_word:
b process_entry
done:
+#ifdef CONFIG_SMP
+ /* kexec_flag reset is signal to other CPUs what kernel
+ was moved to it's location. Note - we need relocated address
+ of kexec_flag. */
+
+ bal 1f
+ 1: move t1,ra;
+ PTR_LA t2,1b
+ PTR_LA t0,kexec_flag
+ PTR_SUB t0,t0,t2;
+ PTR_ADD t0,t1,t0;
+ LONG_S zero,(t0)
+#endif
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ /* We need to flush I-cache before jumping to new kernel.
+ * Unfortunatelly, this code is cpu-specific.
+ */
+ .set push
+ .set noreorder
+ syncw
+ syncw
+ synci 0($0)
+ .set pop
+#else
+ sync
+#endif
/* jump to kexec_start_address */
j s1
END(relocate_new_kernel)
+#ifdef CONFIG_SMP
+/*
+ * Other CPUs should wait until code is relocated and
+ * then start at entry (?) point.
+ */
+LEAF(kexec_smp_wait)
+ PTR_L a0, s_arg0
+ PTR_L a1, s_arg1
+ PTR_L a2, s_arg2
+ PTR_L a3, s_arg3
+ PTR_L s1, kexec_start_address
+
+ /* Non-relocated address works for args and kexec_start_address ( old
+ * kernel is not overwritten). But we need relocated address of
+ * kexec_flag.
+ */
+
+ bal 1f
+1: move t1,ra;
+ PTR_LA t2,1b
+ PTR_LA t0,kexec_flag
+ PTR_SUB t0,t0,t2;
+ PTR_ADD t0,t1,t0;
+
+1: LONG_L s0, (t0)
+ bne s0, zero,1b
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ .set push
+ .set noreorder
+ synci 0($0)
+ .set pop
+#else
+ sync
+#endif
+ j s1
+ END(kexec_smp_wait)
+#endif
+
+#ifdef __mips64
+ /* all PTR's must be aligned to 8 byte in 64-bit mode */
+ .align 3
+#endif
+
+/* All parameters to new kernel are passed in registers a0-a3.
+ * kexec_args[0..3] are uses to prepare register values.
+ */
+
+kexec_args:
+ EXPORT(kexec_args)
+arg0: PTR 0x0
+arg1: PTR 0x0
+arg2: PTR 0x0
+arg3: PTR 0x0
+ .size kexec_args,PTRSIZE*4
+
+#ifdef CONFIG_SMP
+/*
+ * Secondary CPUs may have different kernel parameters in
+ * their registers a0-a3. secondary_kexec_args[0..3] are used
+ * to prepare register values.
+ */
+secondary_kexec_args:
+ EXPORT(secondary_kexec_args)
+s_arg0: PTR 0x0
+s_arg1: PTR 0x0
+s_arg2: PTR 0x0
+s_arg3: PTR 0x0
+ .size secondary_kexec_args,PTRSIZE*4
+kexec_flag:
+ LONG 0x1
+
+#endif
+
kexec_start_address:
EXPORT(kexec_start_address)
PTR 0x0
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
index 060563a712b..07fc5244aed 100644
--- a/arch/mips/kernel/reset.c
+++ b/arch/mips/kernel/reset.c
@@ -7,7 +7,7 @@
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <linux/reboot.h>
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
new file mode 100644
index 00000000000..758fb3cd232
--- /dev/null
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -0,0 +1,119 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+#include <asm/rtlx.h>
+
+static int major;
+
+static void rtlx_interrupt(void)
+{
+ int i;
+ struct rtlx_info *info;
+ struct rtlx_info **p = vpe_get_shared(aprp_cpu_index());
+
+ if (p == NULL || *p == NULL)
+ return;
+
+ info = *p;
+
+ if (info->ap_int_pending == 1 && smp_processor_id() == 0) {
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ wake_up(&channel_wqs[i].lx_queue);
+ wake_up(&channel_wqs[i].rt_queue);
+ }
+ info->ap_int_pending = 0;
+ }
+}
+
+void _interrupt_sp(void)
+{
+ smp_send_reschedule(aprp_cpu_index());
+}
+
+int __init rtlx_module_init(void)
+{
+ struct device *dev;
+ int i, err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (num_possible_cpus() - aprp_cpu_index() < 1) {
+ pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
+ "Pass maxcpus=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
+ if (major < 0) {
+ pr_err("rtlx_module_init: unable to register device\n");
+ return major;
+ }
+
+ /* initialise the wait queues */
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ init_waitqueue_head(&channel_wqs[i].rt_queue);
+ init_waitqueue_head(&channel_wqs[i].lx_queue);
+ atomic_set(&channel_wqs[i].in_open, 0);
+ mutex_init(&channel_wqs[i].mutex);
+
+ dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
+ "%s%d", RTLX_MODULE_NAME, i);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ goto out_chrdev;
+ }
+ }
+
+ /* set up notifiers */
+ rtlx_notify.start = rtlx_starting;
+ rtlx_notify.stop = rtlx_stopping;
+ vpe_notify(aprp_cpu_index(), &rtlx_notify);
+
+ if (cpu_has_vint) {
+ aprp_hook = rtlx_interrupt;
+ } else {
+ pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
+ err = -ENODEV;
+ goto out_class;
+ }
+
+ return 0;
+
+out_class:
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+out_chrdev:
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ return err;
+}
+
+void __exit rtlx_module_exit(void)
+{
+ int i;
+
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
+}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
new file mode 100644
index 00000000000..5a66b975989
--- /dev/null
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -0,0 +1,150 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+#include <asm/rtlx.h>
+
+static int major;
+
+static void rtlx_dispatch(void)
+{
+ if (read_c0_cause() & read_c0_status() & C_SW0)
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
+}
+
+/*
+ * Interrupt handler may be called before rtlx_init has otherwise had
+ * a chance to run.
+ */
+static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
+{
+ unsigned int vpeflags;
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ vpeflags = dvpe();
+ set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
+ irq_enable_hazard();
+ evpe(vpeflags);
+ local_irq_restore(flags);
+
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ wake_up(&channel_wqs[i].lx_queue);
+ wake_up(&channel_wqs[i].rt_queue);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction rtlx_irq = {
+ .handler = rtlx_interrupt,
+ .name = "RTLX",
+};
+
+static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
+
+void _interrupt_sp(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ dvpe();
+ settc(1);
+ write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0);
+ evpe(EVPE_ENABLE);
+ local_irq_restore(flags);
+}
+
+int __init rtlx_module_init(void)
+{
+ struct device *dev;
+ int i, err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (aprp_cpu_index() == 0) {
+ pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
+ "Pass maxtcs=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
+ if (major < 0) {
+ pr_err("rtlx_module_init: unable to register device\n");
+ return major;
+ }
+
+ /* initialise the wait queues */
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ init_waitqueue_head(&channel_wqs[i].rt_queue);
+ init_waitqueue_head(&channel_wqs[i].lx_queue);
+ atomic_set(&channel_wqs[i].in_open, 0);
+ mutex_init(&channel_wqs[i].mutex);
+
+ dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
+ "%s%d", RTLX_MODULE_NAME, i);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ goto out_chrdev;
+ }
+ }
+
+ /* set up notifiers */
+ rtlx_notify.start = rtlx_starting;
+ rtlx_notify.stop = rtlx_stopping;
+ vpe_notify(aprp_cpu_index(), &rtlx_notify);
+
+ if (cpu_has_vint) {
+ aprp_hook = rtlx_dispatch;
+ } else {
+ pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
+ err = -ENODEV;
+ goto out_class;
+ }
+
+ rtlx_irq.dev_id = rtlx;
+ err = setup_irq(rtlx_irq_num, &rtlx_irq);
+ if (err)
+ goto out_class;
+
+ return 0;
+
+out_class:
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+out_chrdev:
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ return err;
+}
+
+void __exit rtlx_module_exit(void)
+{
+ int i;
+
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
+}
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 0233798f715..31b1b763cb2 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -1,106 +1,51 @@
/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org)
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
*/
-
-#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/init.h>
-#include <asm/uaccess.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/vmalloc.h>
-#include <linux/elf.h>
-#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
-#include <linux/interrupt.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/atomic.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
-#include <asm/cacheflush.h>
-#include <asm/atomic.h>
-#include <asm/cpu.h>
#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/vpe.h>
#include <asm/rtlx.h>
+#include <asm/setup.h>
+#include <asm/vpe.h>
-static struct rtlx_info *rtlx;
-static int major;
-static char module_name[] = "rtlx";
-
-static struct chan_waitqueues {
- wait_queue_head_t rt_queue;
- wait_queue_head_t lx_queue;
- atomic_t in_open;
- struct mutex mutex;
-} channel_wqs[RTLX_CHANNELS];
-
-static struct vpe_notifications notify;
-static int sp_stopping = 0;
-
-extern void *vpe_get_shared(int index);
-
-static void rtlx_dispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
-}
-
-
-/* Interrupt handler may be called before rtlx_init has otherwise had
- a chance to run.
-*/
-static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
-{
- int i;
-
- for (i = 0; i < RTLX_CHANNELS; i++) {
- wake_up(&channel_wqs[i].lx_queue);
- wake_up(&channel_wqs[i].rt_queue);
- }
-
- return IRQ_HANDLED;
-}
+static int sp_stopping;
+struct rtlx_info *rtlx;
+struct chan_waitqueues channel_wqs[RTLX_CHANNELS];
+struct vpe_notifications rtlx_notify;
+void (*aprp_hook)(void) = NULL;
+EXPORT_SYMBOL(aprp_hook);
static void __used dump_rtlx(void)
{
int i;
- printk("id 0x%lx state %d\n", rtlx->id, rtlx->state);
+ pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state);
for (i = 0; i < RTLX_CHANNELS; i++) {
struct rtlx_channel *chan = &rtlx->channel[i];
- printk(" rt_state %d lx_state %d buffer_size %d\n",
- chan->rt_state, chan->lx_state, chan->buffer_size);
+ pr_info(" rt_state %d lx_state %d buffer_size %d\n",
+ chan->rt_state, chan->lx_state, chan->buffer_size);
- printk(" rt_read %d rt_write %d\n",
- chan->rt_read, chan->rt_write);
+ pr_info(" rt_read %d rt_write %d\n",
+ chan->rt_read, chan->rt_write);
- printk(" lx_read %d lx_write %d\n",
- chan->lx_read, chan->lx_write);
+ pr_info(" lx_read %d lx_write %d\n",
+ chan->lx_read, chan->lx_write);
- printk(" rt_buffer <%s>\n", chan->rt_buffer);
- printk(" lx_buffer <%s>\n", chan->lx_buffer);
+ pr_info(" rt_buffer <%s>\n", chan->rt_buffer);
+ pr_info(" lx_buffer <%s>\n", chan->lx_buffer);
}
}
@@ -108,7 +53,7 @@ static void __used dump_rtlx(void)
static int rtlx_init(struct rtlx_info *rtlxi)
{
if (rtlxi->id != RTLX_ID) {
- printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id);
+ pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id);
return -ENOEXEC;
}
@@ -118,20 +63,20 @@ static int rtlx_init(struct rtlx_info *rtlxi)
}
/* notifications */
-static void starting(int vpe)
+void rtlx_starting(int vpe)
{
int i;
sp_stopping = 0;
/* force a reload of rtlx */
- rtlx=NULL;
+ rtlx = NULL;
/* wake up any sleeping rtlx_open's */
for (i = 0; i < RTLX_CHANNELS; i++)
wake_up_interruptible(&channel_wqs[i].lx_queue);
}
-static void stopping(int vpe)
+void rtlx_stopping(int vpe)
{
int i;
@@ -149,28 +94,27 @@ int rtlx_open(int index, int can_sleep)
int ret = 0;
if (index >= RTLX_CHANNELS) {
- printk(KERN_DEBUG "rtlx_open index out of range\n");
+ pr_debug(KERN_DEBUG "rtlx_open index out of range\n");
return -ENOSYS;
}
if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
- printk(KERN_DEBUG "rtlx_open channel %d already opened\n",
- index);
+ pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
ret = -EBUSY;
goto out_fail;
}
if (rtlx == NULL) {
- if( (p = vpe_get_shared(tclimit)) == NULL) {
+ p = vpe_get_shared(aprp_cpu_index());
+ if (p == NULL) {
if (can_sleep) {
- __wait_event_interruptible(channel_wqs[index].lx_queue,
- (p = vpe_get_shared(tclimit)),
- ret);
+ ret = __wait_event_interruptible(
+ channel_wqs[index].lx_queue,
+ (p = vpe_get_shared(aprp_cpu_index())));
if (ret)
goto out_fail;
} else {
- printk(KERN_DEBUG "No SP program loaded, and device "
- "opened with O_NONBLOCK\n");
+ pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n");
ret = -ENOSYS;
goto out_fail;
}
@@ -182,7 +126,9 @@ int rtlx_open(int index, int can_sleep)
DEFINE_WAIT(wait);
for (;;) {
- prepare_to_wait(&channel_wqs[index].lx_queue, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(
+ &channel_wqs[index].lx_queue,
+ &wait, TASK_INTERRUPTIBLE);
smp_rmb();
if (*p != NULL)
break;
@@ -193,23 +139,24 @@ int rtlx_open(int index, int can_sleep)
ret = -ERESTARTSYS;
goto out_fail;
}
- finish_wait(&channel_wqs[index].lx_queue, &wait);
+ finish_wait(&channel_wqs[index].lx_queue,
+ &wait);
} else {
- printk(" *vpe_get_shared is NULL. "
- "Has an SP program been loaded?\n");
+ pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n");
ret = -ENOSYS;
goto out_fail;
}
}
if ((unsigned int)*p < KSEG0) {
- printk(KERN_WARNING "vpe_get_shared returned an invalid pointer "
- "maybe an error code %d\n", (int)*p);
+ pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n",
+ (int)*p);
ret = -ENOSYS;
goto out_fail;
}
- if ((ret = rtlx_init(*p)) < 0)
+ ret = rtlx_init(*p);
+ if (ret < 0)
goto out_ret;
}
@@ -232,27 +179,30 @@ out_ret:
int rtlx_release(int index)
{
+ if (rtlx == NULL) {
+ pr_err("rtlx_release() with null rtlx\n");
+ return 0;
+ }
rtlx->channel[index].lx_state = RTLX_STATE_UNUSED;
return 0;
}
unsigned int rtlx_read_poll(int index, int can_sleep)
{
- struct rtlx_channel *chan;
+ struct rtlx_channel *chan;
- if (rtlx == NULL)
- return 0;
+ if (rtlx == NULL)
+ return 0;
- chan = &rtlx->channel[index];
+ chan = &rtlx->channel[index];
/* data available to read? */
if (chan->lx_read == chan->lx_write) {
if (can_sleep) {
- int ret = 0;
-
- __wait_event_interruptible(channel_wqs[index].lx_queue,
- chan->lx_read != chan->lx_write || sp_stopping,
- ret);
+ int ret = __wait_event_interruptible(
+ channel_wqs[index].lx_queue,
+ (chan->lx_read != chan->lx_write) ||
+ sp_stopping);
if (ret)
return ret;
@@ -282,7 +232,9 @@ static inline int write_spacefree(int read, int write, int size)
unsigned int rtlx_write_poll(int index)
{
struct rtlx_channel *chan = &rtlx->channel[index];
- return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size);
+
+ return write_spacefree(chan->rt_read, chan->rt_write,
+ chan->buffer_size);
}
ssize_t rtlx_read(int index, void __user *buff, size_t count)
@@ -335,7 +287,7 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
size_t fl;
if (rtlx == NULL)
- return(-ENOSYS);
+ return -ENOSYS;
rt = &rtlx->channel[index];
@@ -344,8 +296,8 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
rt_read = rt->rt_read;
/* total number of bytes to copy */
- count = min(count,
- (size_t)write_spacefree(rt_read, rt->rt_write, rt->buffer_size));
+ count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write,
+ rt->buffer_size));
/* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
@@ -355,9 +307,8 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
goto out;
/* if there's any left copy to the beginning of the buffer */
- if (count - fl) {
+ if (count - fl)
failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
- }
out:
count -= failed;
@@ -367,31 +318,27 @@ out:
smp_wmb();
mutex_unlock(&channel_wqs[index].mutex);
+ _interrupt_sp();
+
return count;
}
static int file_open(struct inode *inode, struct file *filp)
{
- int minor = iminor(inode);
-
- return rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
+ return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1);
}
static int file_release(struct inode *inode, struct file *filp)
{
- int minor = iminor(inode);
-
- return rtlx_release(minor);
+ return rtlx_release(iminor(inode));
}
-static unsigned int file_poll(struct file *file, poll_table * wait)
+static unsigned int file_poll(struct file *file, poll_table *wait)
{
- int minor;
+ int minor = iminor(file_inode(file));
unsigned int mask = 0;
- minor = iminor(file->f_path.dentry->d_inode);
-
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
@@ -409,38 +356,32 @@ static unsigned int file_poll(struct file *file, poll_table * wait)
return mask;
}
-static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
- loff_t * ppos)
+static ssize_t file_read(struct file *file, char __user *buffer, size_t count,
+ loff_t *ppos)
{
- int minor = iminor(file->f_path.dentry->d_inode);
+ int minor = iminor(file_inode(file));
/* data available? */
- if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) {
- return 0; // -EAGAIN makes cat whinge
- }
+ if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1))
+ return 0; /* -EAGAIN makes 'cat' whine */
return rtlx_read(minor, buffer, count);
}
-static ssize_t file_write(struct file *file, const char __user * buffer,
- size_t count, loff_t * ppos)
+static ssize_t file_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
- int minor;
- struct rtlx_channel *rt;
-
- minor = iminor(file->f_path.dentry->d_inode);
- rt = &rtlx->channel[minor];
+ int minor = iminor(file_inode(file));
/* any space left... */
if (!rtlx_write_poll(minor)) {
- int ret = 0;
+ int ret;
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- __wait_event_interruptible(channel_wqs[minor].rt_queue,
- rtlx_write_poll(minor),
- ret);
+ ret = __wait_event_interruptible(channel_wqs[minor].rt_queue,
+ rtlx_write_poll(minor));
if (ret)
return ret;
}
@@ -448,95 +389,16 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
return rtlx_write(minor, buffer, count);
}
-static const struct file_operations rtlx_fops = {
+const struct file_operations rtlx_fops = {
.owner = THIS_MODULE,
.open = file_open,
.release = file_release,
.write = file_write,
.read = file_read,
- .poll = file_poll
+ .poll = file_poll,
+ .llseek = noop_llseek,
};
-static struct irqaction rtlx_irq = {
- .handler = rtlx_interrupt,
- .flags = IRQF_DISABLED,
- .name = "RTLX",
-};
-
-static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
-
-static char register_chrdev_failed[] __initdata =
- KERN_ERR "rtlx_module_init: unable to register device\n";
-
-static int __init rtlx_module_init(void)
-{
- struct device *dev;
- int i, err;
-
- if (!cpu_has_mipsmt) {
- printk("VPE loader: not a MIPS MT capable processor\n");
- return -ENODEV;
- }
-
- if (tclimit == 0) {
- printk(KERN_WARNING "No TCs reserved for AP/SP, not "
- "initializing RTLX.\nPass maxtcs=<n> argument as kernel "
- "argument\n");
-
- return -ENODEV;
- }
-
- major = register_chrdev(0, module_name, &rtlx_fops);
- if (major < 0) {
- printk(register_chrdev_failed);
- return major;
- }
-
- /* initialise the wait queues */
- for (i = 0; i < RTLX_CHANNELS; i++) {
- init_waitqueue_head(&channel_wqs[i].rt_queue);
- init_waitqueue_head(&channel_wqs[i].lx_queue);
- atomic_set(&channel_wqs[i].in_open, 0);
- mutex_init(&channel_wqs[i].mutex);
-
- dev = device_create(mt_class, NULL, MKDEV(major, i),
- "%s%d", module_name, i);
- if (IS_ERR(dev)) {
- err = PTR_ERR(dev);
- goto out_chrdev;
- }
- }
-
- /* set up notifiers */
- notify.start = starting;
- notify.stop = stopping;
- vpe_notify(tclimit, &notify);
-
- if (cpu_has_vint)
- set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
-
- rtlx_irq.dev_id = rtlx;
- setup_irq(rtlx_irq_num, &rtlx_irq);
-
- return 0;
-
-out_chrdev:
- for (i = 0; i < RTLX_CHANNELS; i++)
- device_destroy(mt_class, MKDEV(major, i));
-
- return err;
-}
-
-static void __exit rtlx_module_exit(void)
-{
- int i;
-
- for (i = 0; i < RTLX_CHANNELS; i++)
- device_destroy(mt_class, MKDEV(major, i));
-
- unregister_chrdev(major, module_name);
-}
-
module_init(rtlx_module_init);
module_exit(rtlx_module_exit);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 08a9c5070ea..3245474f19d 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -6,6 +6,7 @@
* Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2014 Imagination Technologies Ltd.
*/
#include <linux/errno.h>
#include <asm/asm.h>
@@ -24,7 +25,7 @@
/* Highest syscall used of any syscall flavour */
#define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls
- .align 5
+ .align 5
NESTED(handle_sys, PT_SIZE, sp)
.set noat
SAVE_SOME
@@ -34,31 +35,68 @@ NESTED(handle_sys, PT_SIZE, sp)
lw t1, PT_EPC(sp) # skip syscall on return
-#if defined(CONFIG_BINFMT_IRIX)
- sltiu t0, v0, MAX_SYSCALL_NO + 1 # check syscall number
-#else
subu v0, v0, __NR_O32_Linux # check syscall number
sltiu t0, v0, __NR_O32_Linux_syscalls + 1
-#endif
addiu t1, 4 # skip to next instruction
sw t1, PT_EPC(sp)
beqz t0, illegal_syscall
- sll t0, v0, 3
+ sll t0, v0, 2
la t1, sys_call_table
addu t1, t0
lw t2, (t1) # syscall routine
- lw t3, 4(t1) # >= 0 if we need stack arguments
beqz t2, illegal_syscall
sw a3, PT_R26(sp) # save a3 for syscall restarting
- bgez t3, stackargs
-stack_done:
+ /*
+ * More than four arguments. Try to deal with it by copying the
+ * stack arguments from the user stack to the kernel stack.
+ * This Sucks (TM).
+ */
+ lw t0, PT_R29(sp) # get old user stack pointer
+
+ /*
+ * We intentionally keep the kernel stack a little below the top of
+ * userspace so we don't have to do a slower byte accurate check here.
+ */
+ lw t5, TI_ADDR_LIMIT($28)
+ addu t4, t0, 32
+ and t5, t4
+ bltz t5, bad_stack # -> sp is bad
+
+ /*
+ * Ok, copy the args from the luser stack to the kernel stack.
+ * t3 is the precomputed number of instruction bytes needed to
+ * load or store arguments 6-8.
+ */
+
+ .set push
+ .set noreorder
+ .set nomacro
+
+1: user_lw(t5, 16(t0)) # argument #5 from usp
+4: user_lw(t6, 20(t0)) # argument #6 from usp
+3: user_lw(t7, 24(t0)) # argument #7 from usp
+2: user_lw(t8, 28(t0)) # argument #8 from usp
+
+ sw t5, 16(sp) # argument #5 to ksp
+ sw t6, 20(sp) # argument #6 to ksp
+ sw t7, 24(sp) # argument #7 to ksp
+ sw t8, 28(sp) # argument #8 to ksp
+ .set pop
+
+ .section __ex_table,"a"
+ PTR 1b,bad_stack
+ PTR 2b,bad_stack
+ PTR 3b,bad_stack
+ PTR 4b,bad_stack
+ .previous
+
lw t0, TI_FLAGS($28) # syscall tracing enabled?
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
and t0, t1
- bnez t0, syscall_trace_entry # -> yes
+ bnez t0, syscall_trace_entry # -> yes
jalr t2 # Do The Real Thing (TM)
@@ -67,24 +105,13 @@ stack_done:
sw t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ lw t1, PT_R2(sp) # syscall number
negu v0 # error
- sw v0, PT_R0(sp) # set flag for syscall
- # restarting
+ sw t1, PT_R0(sp) # save it for syscall restarting
1: sw v0, PT_R2(sp) # result
o32_syscall_exit:
- local_irq_disable # make sure need_resched and
- # signals dont change between
- # sampling and return
- lw a2, TI_FLAGS($28) # current->work
- li t0, _TIF_ALLWORK_MASK
- and t0, a2
- bnez t0, o32_syscall_exit_work
-
- j restore_partial
-
-o32_syscall_exit_work:
- j syscall_exit_work_partial
+ j syscall_exit_partial
/* ------------------------------------------------------------------------ */
@@ -92,8 +119,18 @@ syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- li a1, 0
- jal do_syscall_trace
+
+ /*
+ * syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ */
+ addiu a1, v0, __NR_O32_Linux
+ bnez v0, 1f /* __NR_syscall at offset 0 */
+ lw a1, PT_R4(sp)
+
+1: jal syscall_trace_enter
+
+ bltz v0, 2f # seccomp failed? Skip syscall
move t0, s0
RESTORE_STATIC
@@ -108,73 +145,21 @@ syscall_trace_entry:
sw t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ lw t1, PT_R2(sp) # syscall number
negu v0 # error
- sw v0, PT_R0(sp) # set flag for syscall
- # restarting
+ sw t1, PT_R0(sp) # save it for syscall restarting
1: sw v0, PT_R2(sp) # result
- j syscall_exit
+2: j syscall_exit
/* ------------------------------------------------------------------------ */
/*
- * More than four arguments. Try to deal with it by copying the
- * stack arguments from the user stack to the kernel stack.
- * This Sucks (TM).
- */
-stackargs:
- lw t0, PT_R29(sp) # get old user stack pointer
-
- /*
- * We intentionally keep the kernel stack a little below the top of
- * userspace so we don't have to do a slower byte accurate check here.
- */
- lw t5, TI_ADDR_LIMIT($28)
- addu t4, t0, 32
- and t5, t4
- bltz t5, bad_stack # -> sp is bad
-
- /* Ok, copy the args from the luser stack to the kernel stack.
- * t3 is the precomputed number of instruction bytes needed to
- * load or store arguments 6-8.
- */
-
- la t1, 5f # load up to 3 arguments
- subu t1, t3
-1: lw t5, 16(t0) # argument #5 from usp
- .set push
- .set noreorder
- .set nomacro
- jr t1
- addiu t1, 6f - 5f
-
-2: lw t8, 28(t0) # argument #8 from usp
-3: lw t7, 24(t0) # argument #7 from usp
-4: lw t6, 20(t0) # argument #6 from usp
-5: jr t1
- sw t5, 16(sp) # argument #5 to ksp
-
- sw t8, 28(sp) # argument #8 to ksp
- sw t7, 24(sp) # argument #7 to ksp
- sw t6, 20(sp) # argument #6 to ksp
-6: j stack_done # go back
- nop
- .set pop
-
- .section __ex_table,"a"
- PTR 1b,bad_stack
- PTR 2b,bad_stack
- PTR 3b,bad_stack
- PTR 4b,bad_stack
- .previous
-
- /*
* The stackpointer for a call with more than 4 arguments is bad.
* We probably should handle this case a bit more drastic.
*/
bad_stack:
- negu v0 # error
- sw v0, PT_R0(sp)
+ li v0, EFAULT
sw v0, PT_R2(sp)
li t0, 1 # set error flag
sw t0, PT_R7(sp)
@@ -184,104 +169,21 @@ bad_stack:
* The system call does not exist in this kernel
*/
illegal_syscall:
- li v0, -ENOSYS # error
+ li v0, ENOSYS # error
sw v0, PT_R2(sp)
li t0, 1 # set error flag
sw t0, PT_R7(sp)
j o32_syscall_exit
END(handle_sys)
- LEAF(mips_atomic_set)
- andi v0, a1, 3 # must be word aligned
- bnez v0, bad_alignment
-
- lw v1, TI_ADDR_LIMIT($28) # in legal address range?
- addiu a0, a1, 4
- or a0, a0, a1
- and a0, a0, v1
- bltz a0, bad_address
-
-#ifdef CONFIG_CPU_HAS_LLSC
- /* Ok, this is the ll/sc case. World is sane :-) */
-1: ll v0, (a1)
- move a0, a2
-2: sc a0, (a1)
-#if R10000_LLSC_WAR
- beqzl a0, 1b
-#else
- beqz a0, 1b
-#endif
-
- .section __ex_table,"a"
- PTR 1b, bad_stack
- PTR 2b, bad_stack
- .previous
-#else
- sw a1, 16(sp)
- sw a2, 20(sp)
-
- move a0, sp
- move a2, a1
- li a1, 1
- jal do_page_fault
-
- lw a1, 16(sp)
- lw a2, 20(sp)
-
- /*
- * At this point the page should be readable and writable unless
- * there was no more memory available.
- */
-1: lw v0, (a1)
-2: sw a2, (a1)
-
- .section __ex_table,"a"
- PTR 1b, no_mem
- PTR 2b, no_mem
- .previous
-#endif
-
- sw zero, PT_R7(sp) # success
- sw v0, PT_R2(sp) # result
-
- j o32_syscall_exit # continue like a normal syscall
-
-no_mem: li v0, -ENOMEM
- jr ra
-
-bad_address:
- li v0, -EFAULT
- jr ra
-
-bad_alignment:
- li v0, -EINVAL
- jr ra
- END(mips_atomic_set)
-
- LEAF(sys_sysmips)
- beq a0, MIPS_ATOMIC_SET, mips_atomic_set
- j _sys_sysmips
- END(sys_sysmips)
-
LEAF(sys_syscall)
-#if defined(CONFIG_BINFMT_IRIX)
- sltiu v0, a0, MAX_SYSCALL_NO + 1 # check syscall number
-#else
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
-#endif
- sll t1, t0, 3
+ beqz t0, einval # do not recurse
+ sll t1, t0, 2
beqz v0, einval
-
lw t2, sys_call_table(t1) # syscall routine
-#if defined(CONFIG_BINFMT_IRIX)
- li v1, 4000 # nr of sys_syscall
-#else
- li v1, 4000 - __NR_O32_Linux # index of sys_syscall
-#endif
- beq t0, v1, einval # do not recurse
-
/* Some syscalls like execve get their arguments from struct pt_regs
and claim zero arguments in the syscall table. Thus we have to
assume the worst case and shuffle around all potential arguments.
@@ -305,379 +207,374 @@ bad_alignment:
jr t2
/* Unreached */
-einval: li v0, -EINVAL
+einval: li v0, -ENOSYS
jr ra
END(sys_syscall)
- .macro fifty ptr, nargs, from=1, to=50
- sys \ptr \nargs
- .if \to-\from
- fifty \ptr,\nargs,"(\from+1)",\to
- .endif
- .endm
-
- .macro mille ptr, nargs, from=1, to=20
- fifty \ptr,\nargs
- .if \to-\from
- mille \ptr,\nargs,"(\from+1)",\to
- .endif
- .endm
-
- .macro syscalltable
-#if defined(CONFIG_BINFMT_IRIX)
- mille sys_ni_syscall 0 /* 0 - 999 SVR4 flavour */
- mille sys_ni_syscall 0 /* 1000 - 1999 32-bit IRIX */
- mille sys_ni_syscall 0 /* 2000 - 2999 BSD43 flavour */
- mille sys_ni_syscall 0 /* 3000 - 3999 POSIX flavour */
-#endif
-
- sys sys_syscall 8 /* 4000 */
- sys sys_exit 1
- sys sys_fork 0
- sys sys_read 3
- sys sys_write 3
- sys sys_open 3 /* 4005 */
- sys sys_close 1
- sys sys_waitpid 3
- sys sys_creat 2
- sys sys_link 2
- sys sys_unlink 1 /* 4010 */
- sys sys_execve 0
- sys sys_chdir 1
- sys sys_time 1
- sys sys_mknod 3
- sys sys_chmod 2 /* 4015 */
- sys sys_lchown 3
- sys sys_ni_syscall 0
- sys sys_ni_syscall 0 /* was sys_stat */
- sys sys_lseek 3
- sys sys_getpid 0 /* 4020 */
- sys sys_mount 5
- sys sys_oldumount 1
- sys sys_setuid 1
- sys sys_getuid 0
- sys sys_stime 1 /* 4025 */
- sys sys_ptrace 4
- sys sys_alarm 1
- sys sys_ni_syscall 0 /* was sys_fstat */
- sys sys_pause 0
- sys sys_utime 2 /* 4030 */
- sys sys_ni_syscall 0
- sys sys_ni_syscall 0
- sys sys_access 2
- sys sys_nice 1
- sys sys_ni_syscall 0 /* 4035 */
- sys sys_sync 0
- sys sys_kill 2
- sys sys_rename 2
- sys sys_mkdir 2
- sys sys_rmdir 1 /* 4040 */
- sys sys_dup 1
- sys sys_pipe 0
- sys sys_times 1
- sys sys_ni_syscall 0
- sys sys_brk 1 /* 4045 */
- sys sys_setgid 1
- sys sys_getgid 0
- sys sys_ni_syscall 0 /* was signal(2) */
- sys sys_geteuid 0
- sys sys_getegid 0 /* 4050 */
- sys sys_acct 1
- sys sys_umount 2
- sys sys_ni_syscall 0
- sys sys_ioctl 3
- sys sys_fcntl 3 /* 4055 */
- sys sys_ni_syscall 2
- sys sys_setpgid 2
- sys sys_ni_syscall 0
- sys sys_olduname 1
- sys sys_umask 1 /* 4060 */
- sys sys_chroot 1
- sys sys_ustat 2
- sys sys_dup2 2
- sys sys_getppid 0
- sys sys_getpgrp 0 /* 4065 */
- sys sys_setsid 0
- sys sys_sigaction 3
- sys sys_sgetmask 0
- sys sys_ssetmask 1
- sys sys_setreuid 2 /* 4070 */
- sys sys_setregid 2
- sys sys_sigsuspend 0
- sys sys_sigpending 1
- sys sys_sethostname 2
- sys sys_setrlimit 2 /* 4075 */
- sys sys_getrlimit 2
- sys sys_getrusage 2
- sys sys_gettimeofday 2
- sys sys_settimeofday 2
- sys sys_getgroups 2 /* 4080 */
- sys sys_setgroups 2
- sys sys_ni_syscall 0 /* old_select */
- sys sys_symlink 2
- sys sys_ni_syscall 0 /* was sys_lstat */
- sys sys_readlink 3 /* 4085 */
- sys sys_uselib 1
- sys sys_swapon 2
- sys sys_reboot 3
- sys old_readdir 3
- sys old_mmap 6 /* 4090 */
- sys sys_munmap 2
- sys sys_truncate 2
- sys sys_ftruncate 2
- sys sys_fchmod 2
- sys sys_fchown 3 /* 4095 */
- sys sys_getpriority 2
- sys sys_setpriority 3
- sys sys_ni_syscall 0
- sys sys_statfs 2
- sys sys_fstatfs 2 /* 4100 */
- sys sys_ni_syscall 0 /* was ioperm(2) */
- sys sys_socketcall 2
- sys sys_syslog 3
- sys sys_setitimer 3
- sys sys_getitimer 2 /* 4105 */
- sys sys_newstat 2
- sys sys_newlstat 2
- sys sys_newfstat 2
- sys sys_uname 1
- sys sys_ni_syscall 0 /* 4110 was iopl(2) */
- sys sys_vhangup 0
- sys sys_ni_syscall 0 /* was sys_idle() */
- sys sys_ni_syscall 0 /* was sys_vm86 */
- sys sys_wait4 4
- sys sys_swapoff 1 /* 4115 */
- sys sys_sysinfo 1
- sys sys_ipc 6
- sys sys_fsync 1
- sys sys_sigreturn 0
- sys sys_clone 0 /* 4120 */
- sys sys_setdomainname 2
- sys sys_newuname 1
- sys sys_ni_syscall 0 /* sys_modify_ldt */
- sys sys_adjtimex 1
- sys sys_mprotect 3 /* 4125 */
- sys sys_sigprocmask 3
- sys sys_ni_syscall 0 /* was create_module */
- sys sys_init_module 5
- sys sys_delete_module 1
- sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */
- sys sys_quotactl 4
- sys sys_getpgid 1
- sys sys_fchdir 1
- sys sys_bdflush 2
- sys sys_sysfs 3 /* 4135 */
- sys sys_personality 1
- sys sys_ni_syscall 0 /* for afs_syscall */
- sys sys_setfsuid 1
- sys sys_setfsgid 1
- sys sys_llseek 5 /* 4140 */
- sys sys_getdents 3
- sys sys_select 5
- sys sys_flock 2
- sys sys_msync 3
- sys sys_readv 3 /* 4145 */
- sys sys_writev 3
- sys sys_cacheflush 3
- sys sys_cachectl 3
- sys sys_sysmips 4
- sys sys_ni_syscall 0 /* 4150 */
- sys sys_getsid 1
- sys sys_fdatasync 1
- sys sys_sysctl 1
- sys sys_mlock 2
- sys sys_munlock 2 /* 4155 */
- sys sys_mlockall 1
- sys sys_munlockall 0
- sys sys_sched_setparam 2
- sys sys_sched_getparam 2
- sys sys_sched_setscheduler 3 /* 4160 */
- sys sys_sched_getscheduler 1
- sys sys_sched_yield 0
- sys sys_sched_get_priority_max 1
- sys sys_sched_get_priority_min 1
- sys sys_sched_rr_get_interval 2 /* 4165 */
- sys sys_nanosleep, 2
- sys sys_mremap, 5
- sys sys_accept 3
- sys sys_bind 3
- sys sys_connect 3 /* 4170 */
- sys sys_getpeername 3
- sys sys_getsockname 3
- sys sys_getsockopt 5
- sys sys_listen 2
- sys sys_recv 4 /* 4175 */
- sys sys_recvfrom 6
- sys sys_recvmsg 3
- sys sys_send 4
- sys sys_sendmsg 3
- sys sys_sendto 6 /* 4180 */
- sys sys_setsockopt 5
- sys sys_shutdown 2
- sys sys_socket 3
- sys sys_socketpair 4
- sys sys_setresuid 3 /* 4185 */
- sys sys_getresuid 3
- sys sys_ni_syscall 0 /* was sys_query_module */
- sys sys_poll 3
- sys sys_nfsservctl 3
- sys sys_setresgid 3 /* 4190 */
- sys sys_getresgid 3
- sys sys_prctl 5
- sys sys_rt_sigreturn 0
- sys sys_rt_sigaction 4
- sys sys_rt_sigprocmask 4 /* 4195 */
- sys sys_rt_sigpending 2
- sys sys_rt_sigtimedwait 4
- sys sys_rt_sigqueueinfo 3
- sys sys_rt_sigsuspend 0
- sys sys_pread64 6 /* 4200 */
- sys sys_pwrite64 6
- sys sys_chown 3
- sys sys_getcwd 2
- sys sys_capget 2
- sys sys_capset 2 /* 4205 */
- sys sys_sigaltstack 0
- sys sys_sendfile 4
- sys sys_ni_syscall 0
- sys sys_ni_syscall 0
- sys sys_mmap2 6 /* 4210 */
- sys sys_truncate64 4
- sys sys_ftruncate64 4
- sys sys_stat64 2
- sys sys_lstat64 2
- sys sys_fstat64 2 /* 4215 */
- sys sys_pivot_root 2
- sys sys_mincore 3
- sys sys_madvise 3
- sys sys_getdents64 3
- sys sys_fcntl64 3 /* 4220 */
- sys sys_ni_syscall 0
- sys sys_gettid 0
- sys sys_readahead 5
- sys sys_setxattr 5
- sys sys_lsetxattr 5 /* 4225 */
- sys sys_fsetxattr 5
- sys sys_getxattr 4
- sys sys_lgetxattr 4
- sys sys_fgetxattr 4
- sys sys_listxattr 3 /* 4230 */
- sys sys_llistxattr 3
- sys sys_flistxattr 3
- sys sys_removexattr 2
- sys sys_lremovexattr 2
- sys sys_fremovexattr 2 /* 4235 */
- sys sys_tkill 2
- sys sys_sendfile64 5
- sys sys_futex 6
+ .align 2
+ .type sys_call_table, @object
+EXPORT(sys_call_table)
+ PTR sys_syscall /* 4000 */
+ PTR sys_exit
+ PTR __sys_fork
+ PTR sys_read
+ PTR sys_write
+ PTR sys_open /* 4005 */
+ PTR sys_close
+ PTR sys_waitpid
+ PTR sys_creat
+ PTR sys_link
+ PTR sys_unlink /* 4010 */
+ PTR sys_execve
+ PTR sys_chdir
+ PTR sys_time
+ PTR sys_mknod
+ PTR sys_chmod /* 4015 */
+ PTR sys_lchown
+ PTR sys_ni_syscall
+ PTR sys_ni_syscall /* was sys_stat */
+ PTR sys_lseek
+ PTR sys_getpid /* 4020 */
+ PTR sys_mount
+ PTR sys_oldumount
+ PTR sys_setuid
+ PTR sys_getuid
+ PTR sys_stime /* 4025 */
+ PTR sys_ptrace
+ PTR sys_alarm
+ PTR sys_ni_syscall /* was sys_fstat */
+ PTR sys_pause
+ PTR sys_utime /* 4030 */
+ PTR sys_ni_syscall
+ PTR sys_ni_syscall
+ PTR sys_access
+ PTR sys_nice
+ PTR sys_ni_syscall /* 4035 */
+ PTR sys_sync
+ PTR sys_kill
+ PTR sys_rename
+ PTR sys_mkdir
+ PTR sys_rmdir /* 4040 */
+ PTR sys_dup
+ PTR sysm_pipe
+ PTR sys_times
+ PTR sys_ni_syscall
+ PTR sys_brk /* 4045 */
+ PTR sys_setgid
+ PTR sys_getgid
+ PTR sys_ni_syscall /* was signal(2) */
+ PTR sys_geteuid
+ PTR sys_getegid /* 4050 */
+ PTR sys_acct
+ PTR sys_umount
+ PTR sys_ni_syscall
+ PTR sys_ioctl
+ PTR sys_fcntl /* 4055 */
+ PTR sys_ni_syscall
+ PTR sys_setpgid
+ PTR sys_ni_syscall
+ PTR sys_olduname
+ PTR sys_umask /* 4060 */
+ PTR sys_chroot
+ PTR sys_ustat
+ PTR sys_dup2
+ PTR sys_getppid
+ PTR sys_getpgrp /* 4065 */
+ PTR sys_setsid
+ PTR sys_sigaction
+ PTR sys_sgetmask
+ PTR sys_ssetmask
+ PTR sys_setreuid /* 4070 */
+ PTR sys_setregid
+ PTR sys_sigsuspend
+ PTR sys_sigpending
+ PTR sys_sethostname
+ PTR sys_setrlimit /* 4075 */
+ PTR sys_getrlimit
+ PTR sys_getrusage
+ PTR sys_gettimeofday
+ PTR sys_settimeofday
+ PTR sys_getgroups /* 4080 */
+ PTR sys_setgroups
+ PTR sys_ni_syscall /* old_select */
+ PTR sys_symlink
+ PTR sys_ni_syscall /* was sys_lstat */
+ PTR sys_readlink /* 4085 */
+ PTR sys_uselib
+ PTR sys_swapon
+ PTR sys_reboot
+ PTR sys_old_readdir
+ PTR sys_mips_mmap /* 4090 */
+ PTR sys_munmap
+ PTR sys_truncate
+ PTR sys_ftruncate
+ PTR sys_fchmod
+ PTR sys_fchown /* 4095 */
+ PTR sys_getpriority
+ PTR sys_setpriority
+ PTR sys_ni_syscall
+ PTR sys_statfs
+ PTR sys_fstatfs /* 4100 */
+ PTR sys_ni_syscall /* was ioperm(2) */
+ PTR sys_socketcall
+ PTR sys_syslog
+ PTR sys_setitimer
+ PTR sys_getitimer /* 4105 */
+ PTR sys_newstat
+ PTR sys_newlstat
+ PTR sys_newfstat
+ PTR sys_uname
+ PTR sys_ni_syscall /* 4110 was iopl(2) */
+ PTR sys_vhangup
+ PTR sys_ni_syscall /* was sys_idle() */
+ PTR sys_ni_syscall /* was sys_vm86 */
+ PTR sys_wait4
+ PTR sys_swapoff /* 4115 */
+ PTR sys_sysinfo
+ PTR sys_ipc
+ PTR sys_fsync
+ PTR sys_sigreturn
+ PTR __sys_clone /* 4120 */
+ PTR sys_setdomainname
+ PTR sys_newuname
+ PTR sys_ni_syscall /* sys_modify_ldt */
+ PTR sys_adjtimex
+ PTR sys_mprotect /* 4125 */
+ PTR sys_sigprocmask
+ PTR sys_ni_syscall /* was create_module */
+ PTR sys_init_module
+ PTR sys_delete_module
+ PTR sys_ni_syscall /* 4130 was get_kernel_syms */
+ PTR sys_quotactl
+ PTR sys_getpgid
+ PTR sys_fchdir
+ PTR sys_bdflush
+ PTR sys_sysfs /* 4135 */
+ PTR sys_personality
+ PTR sys_ni_syscall /* for afs_syscall */
+ PTR sys_setfsuid
+ PTR sys_setfsgid
+ PTR sys_llseek /* 4140 */
+ PTR sys_getdents
+ PTR sys_select
+ PTR sys_flock
+ PTR sys_msync
+ PTR sys_readv /* 4145 */
+ PTR sys_writev
+ PTR sys_cacheflush
+ PTR sys_cachectl
+ PTR sys_sysmips
+ PTR sys_ni_syscall /* 4150 */
+ PTR sys_getsid
+ PTR sys_fdatasync
+ PTR sys_sysctl
+ PTR sys_mlock
+ PTR sys_munlock /* 4155 */
+ PTR sys_mlockall
+ PTR sys_munlockall
+ PTR sys_sched_setparam
+ PTR sys_sched_getparam
+ PTR sys_sched_setscheduler /* 4160 */
+ PTR sys_sched_getscheduler
+ PTR sys_sched_yield
+ PTR sys_sched_get_priority_max
+ PTR sys_sched_get_priority_min
+ PTR sys_sched_rr_get_interval /* 4165 */
+ PTR sys_nanosleep
+ PTR sys_mremap
+ PTR sys_accept
+ PTR sys_bind
+ PTR sys_connect /* 4170 */
+ PTR sys_getpeername
+ PTR sys_getsockname
+ PTR sys_getsockopt
+ PTR sys_listen
+ PTR sys_recv /* 4175 */
+ PTR sys_recvfrom
+ PTR sys_recvmsg
+ PTR sys_send
+ PTR sys_sendmsg
+ PTR sys_sendto /* 4180 */
+ PTR sys_setsockopt
+ PTR sys_shutdown
+ PTR sys_socket
+ PTR sys_socketpair
+ PTR sys_setresuid /* 4185 */
+ PTR sys_getresuid
+ PTR sys_ni_syscall /* was sys_query_module */
+ PTR sys_poll
+ PTR sys_ni_syscall /* was nfsservctl */
+ PTR sys_setresgid /* 4190 */
+ PTR sys_getresgid
+ PTR sys_prctl
+ PTR sys_rt_sigreturn
+ PTR sys_rt_sigaction
+ PTR sys_rt_sigprocmask /* 4195 */
+ PTR sys_rt_sigpending
+ PTR sys_rt_sigtimedwait
+ PTR sys_rt_sigqueueinfo
+ PTR sys_rt_sigsuspend
+ PTR sys_pread64 /* 4200 */
+ PTR sys_pwrite64
+ PTR sys_chown
+ PTR sys_getcwd
+ PTR sys_capget
+ PTR sys_capset /* 4205 */
+ PTR sys_sigaltstack
+ PTR sys_sendfile
+ PTR sys_ni_syscall
+ PTR sys_ni_syscall
+ PTR sys_mips_mmap2 /* 4210 */
+ PTR sys_truncate64
+ PTR sys_ftruncate64
+ PTR sys_stat64
+ PTR sys_lstat64
+ PTR sys_fstat64 /* 4215 */
+ PTR sys_pivot_root
+ PTR sys_mincore
+ PTR sys_madvise
+ PTR sys_getdents64
+ PTR sys_fcntl64 /* 4220 */
+ PTR sys_ni_syscall
+ PTR sys_gettid
+ PTR sys_readahead
+ PTR sys_setxattr
+ PTR sys_lsetxattr /* 4225 */
+ PTR sys_fsetxattr
+ PTR sys_getxattr
+ PTR sys_lgetxattr
+ PTR sys_fgetxattr
+ PTR sys_listxattr /* 4230 */
+ PTR sys_llistxattr
+ PTR sys_flistxattr
+ PTR sys_removexattr
+ PTR sys_lremovexattr
+ PTR sys_fremovexattr /* 4235 */
+ PTR sys_tkill
+ PTR sys_sendfile64
+ PTR sys_futex
#ifdef CONFIG_MIPS_MT_FPAFF
/*
* For FPU affinity scheduling on MIPS MT processors, we need to
* intercept sys_sched_xxxaffinity() calls until we get a proper hook
- * in kernel/sched.c. Considered only temporary we only support these
- * hooks for the 32-bit kernel - there is no MIPS64 MT processor atm.
+ * in kernel/sched/core.c. Considered only temporary we only support
+ * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
+ * atm.
*/
- sys mipsmt_sys_sched_setaffinity 3
- sys mipsmt_sys_sched_getaffinity 3
+ PTR mipsmt_sys_sched_setaffinity
+ PTR mipsmt_sys_sched_getaffinity
#else
- sys sys_sched_setaffinity 3
- sys sys_sched_getaffinity 3 /* 4240 */
+ PTR sys_sched_setaffinity
+ PTR sys_sched_getaffinity /* 4240 */
#endif /* CONFIG_MIPS_MT_FPAFF */
- sys sys_io_setup 2
- sys sys_io_destroy 1
- sys sys_io_getevents 5
- sys sys_io_submit 3
- sys sys_io_cancel 3 /* 4245 */
- sys sys_exit_group 1
- sys sys_lookup_dcookie 4
- sys sys_epoll_create 1
- sys sys_epoll_ctl 4
- sys sys_epoll_wait 3 /* 4250 */
- sys sys_remap_file_pages 5
- sys sys_set_tid_address 1
- sys sys_restart_syscall 0
- sys sys_fadvise64_64 7
- sys sys_statfs64 3 /* 4255 */
- sys sys_fstatfs64 2
- sys sys_timer_create 3
- sys sys_timer_settime 4
- sys sys_timer_gettime 2
- sys sys_timer_getoverrun 1 /* 4260 */
- sys sys_timer_delete 1
- sys sys_clock_settime 2
- sys sys_clock_gettime 2
- sys sys_clock_getres 2
- sys sys_clock_nanosleep 4 /* 4265 */
- sys sys_tgkill 3
- sys sys_utimes 2
- sys sys_mbind 4
- sys sys_ni_syscall 0 /* sys_get_mempolicy */
- sys sys_ni_syscall 0 /* 4270 sys_set_mempolicy */
- sys sys_mq_open 4
- sys sys_mq_unlink 1
- sys sys_mq_timedsend 5
- sys sys_mq_timedreceive 5
- sys sys_mq_notify 2 /* 4275 */
- sys sys_mq_getsetattr 3
- sys sys_ni_syscall 0 /* sys_vserver */
- sys sys_waitid 5
- sys sys_ni_syscall 0 /* available, was setaltroot */
- sys sys_add_key 5 /* 4280 */
- sys sys_request_key 4
- sys sys_keyctl 5
- sys sys_set_thread_area 1
- sys sys_inotify_init 0
- sys sys_inotify_add_watch 3 /* 4285 */
- sys sys_inotify_rm_watch 2
- sys sys_migrate_pages 4
- sys sys_openat 4
- sys sys_mkdirat 3
- sys sys_mknodat 4 /* 4290 */
- sys sys_fchownat 5
- sys sys_futimesat 3
- sys sys_fstatat64 4
- sys sys_unlinkat 3
- sys sys_renameat 4 /* 4295 */
- sys sys_linkat 5
- sys sys_symlinkat 3
- sys sys_readlinkat 4
- sys sys_fchmodat 3
- sys sys_faccessat 3 /* 4300 */
- sys sys_pselect6 6
- sys sys_ppoll 5
- sys sys_unshare 1
- sys sys_splice 4
- sys sys_sync_file_range 7 /* 4305 */
- sys sys_tee 4
- sys sys_vmsplice 4
- sys sys_move_pages 6
- sys sys_set_robust_list 2
- sys sys_get_robust_list 3 /* 4310 */
- sys sys_kexec_load 4
- sys sys_getcpu 3
- sys sys_epoll_pwait 6
- sys sys_ioprio_set 3
- sys sys_ioprio_get 2 /* 4315 */
- sys sys_utimensat 4
- sys sys_signalfd 3
- sys sys_ni_syscall 0
- sys sys_eventfd 1
- sys sys_fallocate 6 /* 4320 */
- sys sys_timerfd_create 2
- sys sys_timerfd_gettime 2
- sys sys_timerfd_settime 4
- .endm
-
- /* We pre-compute the number of _instruction_ bytes needed to
- load or store the arguments 6-8. Negative values are ignored. */
-
- .macro sys function, nargs
- PTR \function
- LONG (\nargs << 2) - (5 << 2)
- .endm
-
- .align 3
- .type sys_call_table,@object
-EXPORT(sys_call_table)
- syscalltable
- .size sys_call_table, . - sys_call_table
+ PTR sys_io_setup
+ PTR sys_io_destroy
+ PTR sys_io_getevents
+ PTR sys_io_submit
+ PTR sys_io_cancel /* 4245 */
+ PTR sys_exit_group
+ PTR sys_lookup_dcookie
+ PTR sys_epoll_create
+ PTR sys_epoll_ctl
+ PTR sys_epoll_wait /* 4250 */
+ PTR sys_remap_file_pages
+ PTR sys_set_tid_address
+ PTR sys_restart_syscall
+ PTR sys_fadvise64_64
+ PTR sys_statfs64 /* 4255 */
+ PTR sys_fstatfs64
+ PTR sys_timer_create
+ PTR sys_timer_settime
+ PTR sys_timer_gettime
+ PTR sys_timer_getoverrun /* 4260 */
+ PTR sys_timer_delete
+ PTR sys_clock_settime
+ PTR sys_clock_gettime
+ PTR sys_clock_getres
+ PTR sys_clock_nanosleep /* 4265 */
+ PTR sys_tgkill
+ PTR sys_utimes
+ PTR sys_mbind
+ PTR sys_ni_syscall /* sys_get_mempolicy */
+ PTR sys_ni_syscall /* 4270 sys_set_mempolicy */
+ PTR sys_mq_open
+ PTR sys_mq_unlink
+ PTR sys_mq_timedsend
+ PTR sys_mq_timedreceive
+ PTR sys_mq_notify /* 4275 */
+ PTR sys_mq_getsetattr
+ PTR sys_ni_syscall /* sys_vserver */
+ PTR sys_waitid
+ PTR sys_ni_syscall /* available, was setaltroot */
+ PTR sys_add_key /* 4280 */
+ PTR sys_request_key
+ PTR sys_keyctl
+ PTR sys_set_thread_area
+ PTR sys_inotify_init
+ PTR sys_inotify_add_watch /* 4285 */
+ PTR sys_inotify_rm_watch
+ PTR sys_migrate_pages
+ PTR sys_openat
+ PTR sys_mkdirat
+ PTR sys_mknodat /* 4290 */
+ PTR sys_fchownat
+ PTR sys_futimesat
+ PTR sys_fstatat64
+ PTR sys_unlinkat
+ PTR sys_renameat /* 4295 */
+ PTR sys_linkat
+ PTR sys_symlinkat
+ PTR sys_readlinkat
+ PTR sys_fchmodat
+ PTR sys_faccessat /* 4300 */
+ PTR sys_pselect6
+ PTR sys_ppoll
+ PTR sys_unshare
+ PTR sys_splice
+ PTR sys_sync_file_range /* 4305 */
+ PTR sys_tee
+ PTR sys_vmsplice
+ PTR sys_move_pages
+ PTR sys_set_robust_list
+ PTR sys_get_robust_list /* 4310 */
+ PTR sys_kexec_load
+ PTR sys_getcpu
+ PTR sys_epoll_pwait
+ PTR sys_ioprio_set
+ PTR sys_ioprio_get /* 4315 */
+ PTR sys_utimensat
+ PTR sys_signalfd
+ PTR sys_ni_syscall /* was timerfd */
+ PTR sys_eventfd
+ PTR sys_fallocate /* 4320 */
+ PTR sys_timerfd_create
+ PTR sys_timerfd_gettime
+ PTR sys_timerfd_settime
+ PTR sys_signalfd4
+ PTR sys_eventfd2 /* 4325 */
+ PTR sys_epoll_create1
+ PTR sys_dup3
+ PTR sys_pipe2
+ PTR sys_inotify_init1
+ PTR sys_preadv /* 4330 */
+ PTR sys_pwritev
+ PTR sys_rt_tgsigqueueinfo
+ PTR sys_perf_event_open
+ PTR sys_accept4
+ PTR sys_recvmmsg /* 4335 */
+ PTR sys_fanotify_init
+ PTR sys_fanotify_mark
+ PTR sys_prlimit64
+ PTR sys_name_to_handle_at
+ PTR sys_open_by_handle_at /* 4340 */
+ PTR sys_clock_adjtime
+ PTR sys_syncfs
+ PTR sys_sendmmsg
+ PTR sys_setns
+ PTR sys_process_vm_readv /* 4345 */
+ PTR sys_process_vm_writev
+ PTR sys_kcmp
+ PTR sys_finit_module
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr /* 4350 */
+ PTR sys_renameat2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index dc597b600c6..be2fedd4ae3 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -25,7 +25,7 @@
#define handle_sys64 handle_sys
#endif
- .align 5
+ .align 5
NESTED(handle_sys64, PT_SIZE, sp)
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
/*
@@ -40,7 +40,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
#endif
dsubu t0, v0, __NR_64_Linux # check syscall number
- sltiu t0, t0, __NR_64_Linux_syscalls + 1
+ sltiu t0, t0, __NR_64_Linux_syscalls + 1
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
ld t1, PT_EPC(sp) # skip syscall on return
daddiu t1, 4 # skip to next instruction
@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, syscall_trace_entry
@@ -66,24 +66,13 @@ NESTED(handle_sys64, PT_SIZE, sp)
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall
- # restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
n64_syscall_exit:
- local_irq_disable # make sure need_resched and
- # signals dont change between
- # sampling and return
- LONG_L a2, TI_FLAGS($28) # current->work
- li t0, _TIF_ALLWORK_MASK
- and t0, a2, t0
- bnez t0, n64_syscall_exit_work
-
- j restore_partial
-
-n64_syscall_exit_work:
- j syscall_exit_work_partial
+ j syscall_exit_partial
/* ------------------------------------------------------------------------ */
@@ -91,8 +80,10 @@ syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- li a1, 0
- jal do_syscall_trace
+ daddiu a1, v0, __NR_64_Linux
+ jal syscall_trace_enter
+
+ bltz v0, 2f # seccomp failed? Skip syscall
move t0, s0
RESTORE_STATIC
@@ -109,95 +100,25 @@ syscall_trace_entry:
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
- j syscall_exit
+2: j syscall_exit
illegal_syscall:
/* This also isn't a 64-bit syscall, throw an error. */
- li v0, -ENOSYS # error
+ li v0, ENOSYS # error
sd v0, PT_R2(sp)
li t0, 1 # set error flag
sd t0, PT_R7(sp)
j n64_syscall_exit
END(handle_sys64)
- LEAF(mips_atomic_set)
- andi v0, a1, 3 # must be word aligned
- bnez v0, bad_alignment
-
- LONG_L v1, TI_ADDR_LIMIT($28) # in legal address range?
- LONG_ADDIU a0, a1, 4
- or a0, a0, a1
- and a0, a0, v1
- bltz a0, bad_address
-
-#ifdef CONFIG_CPU_HAS_LLSC
- /* Ok, this is the ll/sc case. World is sane :-) */
-1: ll v0, (a1)
- move a0, a2
-2: sc a0, (a1)
-#if R10000_LLSC_WAR
- beqzl a0, 1b
-#else
- beqz a0, 1b
-#endif
-
- .section __ex_table,"a"
- PTR 1b, bad_stack
- PTR 2b, bad_stack
- .previous
-#else
- sw a1, 16(sp)
- sw a2, 20(sp)
-
- move a0, sp
- move a2, a1
- li a1, 1
- jal do_page_fault
-
- lw a1, 16(sp)
- lw a2, 20(sp)
-
- /*
- * At this point the page should be readable and writable unless
- * there was no more memory available.
- */
-1: lw v0, (a1)
-2: sw a2, (a1)
-
- .section __ex_table,"a"
- PTR 1b, no_mem
- PTR 2b, no_mem
- .previous
-#endif
-
- sd zero, PT_R7(sp) # success
- sd v0, PT_R2(sp) # result
-
- j n64_syscall_exit # continue like a normal syscall
-
-no_mem: li v0, -ENOMEM
- jr ra
-
-bad_address:
- li v0, -EFAULT
- jr ra
-
-bad_alignment:
- li v0, -EINVAL
- jr ra
- END(mips_atomic_set)
-
- LEAF(sys_sysmips)
- beq a0, MIPS_ATOMIC_SET, mips_atomic_set
- j _sys_sysmips
- END(sys_sysmips)
-
.align 3
-sys_call_table:
+ .type sys_call_table, @object
+EXPORT(sys_call_table)
PTR sys_read /* 5000 */
PTR sys_write
PTR sys_open
@@ -207,7 +128,7 @@ sys_call_table:
PTR sys_newlstat
PTR sys_poll
PTR sys_lseek
- PTR old_mmap
+ PTR sys_mips_mmap
PTR sys_mprotect /* 5010 */
PTR sys_munmap
PTR sys_brk
@@ -219,7 +140,7 @@ sys_call_table:
PTR sys_readv
PTR sys_writev
PTR sys_access /* 5020 */
- PTR sys_pipe
+ PTR sysm_pipe
PTR sys_select
PTR sys_sched_yield
PTR sys_mremap
@@ -253,8 +174,8 @@ sys_call_table:
PTR sys_socketpair
PTR sys_setsockopt
PTR sys_getsockopt
- PTR sys_clone /* 5055 */
- PTR sys_fork
+ PTR __sys_clone /* 5055 */
+ PTR __sys_fork
PTR sys_execve
PTR sys_exit
PTR sys_wait4
@@ -371,9 +292,9 @@ sys_call_table:
PTR sys_ni_syscall /* 5170, was get_kernel_syms */
PTR sys_ni_syscall /* was query_module */
PTR sys_quotactl
- PTR sys_nfsservctl
+ PTR sys_ni_syscall /* was nfsservctl */
PTR sys_ni_syscall /* res. for getpmsg */
- PTR sys_ni_syscall /* 5175 for putpmsg */
+ PTR sys_ni_syscall /* 5175 for putpmsg */
PTR sys_ni_syscall /* res. for afs_syscall */
PTR sys_ni_syscall /* res. for security */
PTR sys_gettid
@@ -475,10 +396,39 @@ sys_call_table:
PTR sys_ioprio_get
PTR sys_utimensat /* 5275 */
PTR sys_signalfd
- PTR sys_ni_syscall
+ PTR sys_ni_syscall /* was timerfd */
PTR sys_eventfd
PTR sys_fallocate
PTR sys_timerfd_create /* 5280 */
PTR sys_timerfd_gettime
PTR sys_timerfd_settime
+ PTR sys_signalfd4
+ PTR sys_eventfd2
+ PTR sys_epoll_create1 /* 5285 */
+ PTR sys_dup3
+ PTR sys_pipe2
+ PTR sys_inotify_init1
+ PTR sys_preadv
+ PTR sys_pwritev /* 5290 */
+ PTR sys_rt_tgsigqueueinfo
+ PTR sys_perf_event_open
+ PTR sys_accept4
+ PTR sys_recvmmsg
+ PTR sys_fanotify_init /* 5295 */
+ PTR sys_fanotify_mark
+ PTR sys_prlimit64
+ PTR sys_name_to_handle_at
+ PTR sys_open_by_handle_at
+ PTR sys_clock_adjtime /* 5300 */
+ PTR sys_syncfs
+ PTR sys_sendmmsg
+ PTR sys_setns
+ PTR sys_process_vm_readv
+ PTR sys_process_vm_writev /* 5305 */
+ PTR sys_kcmp
+ PTR sys_finit_module
+ PTR sys_getdents64
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr /* 5310 */
+ PTR sys_renameat2
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 12940eca789..c1dbcda4b81 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -17,18 +17,12 @@
#include <asm/thread_info.h>
#include <asm/unistd.h>
-/* This duplicates the definition from <linux/sched.h> */
-#define PT_TRACESYS 0x00000002 /* tracing system calls */
-
-/* This duplicates the definition from <asm/signal.h> */
-#define SIGILL 4 /* Illegal instruction (ANSI). */
-
#ifndef CONFIG_MIPS32_O32
/* No O32, so define handle_sys here */
#define handle_sysn32 handle_sys
#endif
- .align 5
+ .align 5
NESTED(handle_sysn32, PT_SIZE, sp)
#ifndef CONFIG_MIPS32_O32
.set noat
@@ -39,7 +33,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
#endif
dsubu t0, v0, __NR_N32_Linux # check syscall number
- sltiu t0, t0, __NR_N32_Linux_syscalls + 1
+ sltiu t0, t0, __NR_N32_Linux_syscalls + 1
#ifndef CONFIG_MIPS32_O32
ld t1, PT_EPC(sp) # skip syscall on return
@@ -53,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
sd a3, PT_R26(sp) # save a3 for syscall restarting
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, n32_syscall_trace_entry
@@ -65,22 +59,12 @@ NESTED(handle_sysn32, PT_SIZE, sp)
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
- local_irq_disable # make sure need_resched and
- # signals dont change between
- # sampling and return
- LONG_L a2, TI_FLAGS($28) # current->work
- li t0, _TIF_ALLWORK_MASK
- and t0, a2, t0
- bnez t0, n32_syscall_exit_work
-
- j restore_partial
-
-n32_syscall_exit_work:
- j syscall_exit_work_partial
+ j syscall_exit_partial
/* ------------------------------------------------------------------------ */
@@ -88,8 +72,10 @@ n32_syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- li a1, 0
- jal do_syscall_trace
+ daddiu a1, v0, __NR_N32_Linux
+ jal syscall_trace_enter
+
+ bltz v0, 2f # seccomp failed? Skip syscall
move t0, s0
RESTORE_STATIC
@@ -106,11 +92,12 @@ n32_syscall_trace_entry:
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
- j syscall_exit
+2: j syscall_exit
not_n32_scall:
/* This is not an n32 compatibility syscall, pass it on to
@@ -119,6 +106,7 @@ not_n32_scall:
END(handle_sysn32)
+ .type sysn32_call_table, @object
EXPORT(sysn32_call_table)
PTR sys_read /* 6000 */
PTR sys_write
@@ -129,19 +117,19 @@ EXPORT(sysn32_call_table)
PTR sys_newlstat
PTR sys_poll
PTR sys_lseek
- PTR old_mmap
+ PTR sys_mips_mmap
PTR sys_mprotect /* 6010 */
PTR sys_munmap
PTR sys_brk
- PTR sys32_rt_sigaction
- PTR sys32_rt_sigprocmask
+ PTR compat_sys_rt_sigaction
+ PTR compat_sys_rt_sigprocmask
PTR compat_sys_ioctl /* 6015 */
PTR sys_pread64
PTR sys_pwrite64
PTR compat_sys_readv
PTR compat_sys_writev
PTR sys_access /* 6020 */
- PTR sys_pipe
+ PTR sysm_pipe
PTR compat_sys_select
PTR sys_sched_yield
PTR sys_mremap
@@ -159,12 +147,12 @@ EXPORT(sysn32_call_table)
PTR compat_sys_setitimer
PTR sys_alarm
PTR sys_getpid
- PTR sys32_sendfile
+ PTR compat_sys_sendfile
PTR sys_socket /* 6040 */
PTR sys_connect
PTR sys_accept
PTR sys_sendto
- PTR sys_recvfrom
+ PTR compat_sys_recvfrom
PTR compat_sys_sendmsg /* 6045 */
PTR compat_sys_recvmsg
PTR sys_shutdown
@@ -175,20 +163,20 @@ EXPORT(sysn32_call_table)
PTR sys_socketpair
PTR compat_sys_setsockopt
PTR sys_getsockopt
- PTR sys_clone /* 6055 */
- PTR sys_fork
- PTR sys32_execve
+ PTR __sys_clone /* 6055 */
+ PTR __sys_fork
+ PTR compat_sys_execve
PTR sys_exit
PTR compat_sys_wait4
PTR sys_kill /* 6060 */
- PTR sys32_newuname
+ PTR sys_newuname
PTR sys_semget
PTR sys_semop
- PTR sysn32_semctl
+ PTR compat_sys_semctl
PTR sys_shmdt /* 6065 */
PTR sys_msgget
- PTR sysn32_msgsnd
- PTR sysn32_msgrcv
+ PTR compat_sys_msgsnd
+ PTR compat_sys_msgrcv
PTR compat_sys_msgctl
PTR compat_sys_fcntl /* 6070 */
PTR sys_flock
@@ -214,12 +202,12 @@ EXPORT(sysn32_call_table)
PTR sys_fchown
PTR sys_lchown
PTR sys_umask
- PTR sys32_gettimeofday
+ PTR compat_sys_gettimeofday
PTR compat_sys_getrlimit /* 6095 */
PTR compat_sys_getrusage
PTR compat_sys_sysinfo
PTR compat_sys_times
- PTR sys32_ptrace
+ PTR compat_sys_ptrace
PTR sys_getuid /* 6100 */
PTR sys_syslog
PTR sys_getgid
@@ -245,15 +233,15 @@ EXPORT(sysn32_call_table)
PTR sys_getsid
PTR sys_capget
PTR sys_capset
- PTR sys32_rt_sigpending /* 6125 */
+ PTR compat_sys_rt_sigpending /* 6125 */
PTR compat_sys_rt_sigtimedwait
- PTR sys32_rt_sigqueueinfo
- PTR sysn32_rt_sigsuspend
- PTR sys32_sigaltstack
+ PTR compat_sys_rt_sigqueueinfo
+ PTR compat_sys_rt_sigsuspend
+ PTR compat_sys_sigaltstack
PTR compat_sys_utime /* 6130 */
PTR sys_mknod
- PTR sys32_personality
- PTR sys32_ustat
+ PTR sys_32_personality
+ PTR compat_sys_ustat
PTR compat_sys_statfs
PTR compat_sys_fstatfs /* 6135 */
PTR sys_sysfs
@@ -265,21 +253,21 @@ EXPORT(sysn32_call_table)
PTR sys_sched_getscheduler
PTR sys_sched_get_priority_max
PTR sys_sched_get_priority_min
- PTR sys32_sched_rr_get_interval /* 6145 */
+ PTR compat_sys_sched_rr_get_interval /* 6145 */
PTR sys_mlock
PTR sys_munlock
PTR sys_mlockall
PTR sys_munlockall
PTR sys_vhangup /* 6150 */
PTR sys_pivot_root
- PTR sys32_sysctl
+ PTR compat_sys_sysctl
PTR sys_prctl
PTR compat_sys_adjtimex
PTR compat_sys_setrlimit /* 6155 */
PTR sys_chroot
PTR sys_sync
PTR sys_acct
- PTR sys32_settimeofday
+ PTR compat_sys_settimeofday
PTR compat_sys_mount /* 6160 */
PTR sys_umount
PTR sys_swapon
@@ -293,9 +281,9 @@ EXPORT(sysn32_call_table)
PTR sys_ni_syscall /* 6170, was get_kernel_syms */
PTR sys_ni_syscall /* was query_module */
PTR sys_quotactl
- PTR compat_sys_nfsservctl
+ PTR sys_ni_syscall /* was nfsservctl */
PTR sys_ni_syscall /* res. for getpmsg */
- PTR sys_ni_syscall /* 6175 for putpmsg */
+ PTR sys_ni_syscall /* 6175 for putpmsg */
PTR sys_ni_syscall /* res. for afs_syscall */
PTR sys_ni_syscall /* res. for security */
PTR sys_gettid
@@ -320,10 +308,10 @@ EXPORT(sysn32_call_table)
PTR sys_cacheflush
PTR sys_cachectl
PTR sys_sysmips
- PTR sys_io_setup /* 6200 */
+ PTR compat_sys_io_setup /* 6200 */
PTR sys_io_destroy
- PTR sys_io_getevents
- PTR sys_io_submit
+ PTR compat_sys_io_getevents
+ PTR compat_sys_io_submit
PTR sys_io_cancel
PTR sys_exit_group /* 6205 */
PTR sys_lookup_dcookie
@@ -385,12 +373,12 @@ EXPORT(sysn32_call_table)
PTR sys_fchmodat
PTR sys_faccessat
PTR compat_sys_pselect6
- PTR sys_ppoll /* 6265 */
+ PTR compat_sys_ppoll /* 6265 */
PTR sys_unshare
PTR sys_splice
PTR sys_sync_file_range
PTR sys_tee
- PTR sys_vmsplice /* 6270 */
+ PTR compat_sys_vmsplice /* 6270 */
PTR sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list
@@ -400,11 +388,40 @@ EXPORT(sysn32_call_table)
PTR sys_ioprio_set
PTR sys_ioprio_get
PTR compat_sys_utimensat
- PTR compat_sys_signalfd /* 5280 */
- PTR sys_ni_syscall
+ PTR compat_sys_signalfd /* 6280 */
+ PTR sys_ni_syscall /* was timerfd */
PTR sys_eventfd
PTR sys_fallocate
PTR sys_timerfd_create
- PTR sys_timerfd_gettime /* 5285 */
- PTR sys_timerfd_settime
+ PTR compat_sys_timerfd_gettime /* 6285 */
+ PTR compat_sys_timerfd_settime
+ PTR compat_sys_signalfd4
+ PTR sys_eventfd2
+ PTR sys_epoll_create1
+ PTR sys_dup3 /* 6290 */
+ PTR sys_pipe2
+ PTR sys_inotify_init1
+ PTR compat_sys_preadv
+ PTR compat_sys_pwritev
+ PTR compat_sys_rt_tgsigqueueinfo /* 6295 */
+ PTR sys_perf_event_open
+ PTR sys_accept4
+ PTR compat_sys_recvmmsg
+ PTR sys_getdents64
+ PTR sys_fanotify_init /* 6300 */
+ PTR sys_fanotify_mark
+ PTR sys_prlimit64
+ PTR sys_name_to_handle_at
+ PTR sys_open_by_handle_at
+ PTR compat_sys_clock_adjtime /* 6305 */
+ PTR sys_syncfs
+ PTR compat_sys_sendmmsg
+ PTR sys_setns
+ PTR compat_sys_process_vm_readv
+ PTR compat_sys_process_vm_writev /* 6310 */
+ PTR sys_kcmp
+ PTR sys_finit_module
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr
+ PTR sys_renameat2 /* 6315 */
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 9a275efb4f0..f1343ccd7ed 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -10,7 +10,7 @@
*
* Hairy, the userspace application uses a different argument passing
* convention than the kernel, so we have to translate things from o32
- * to ABI64 calling convention. 64-bit syscalls are also processed
+ * to ABI64 calling convention. 64-bit syscalls are also processed
* here for now.
*/
#include <linux/errno.h>
@@ -24,7 +24,7 @@
#include <asm/unistd.h>
#include <asm/sysmips.h>
- .align 5
+ .align 5
NESTED(handle_sys, PT_SIZE, sp)
.set noat
SAVE_SOME
@@ -53,7 +53,7 @@ NESTED(handle_sys, PT_SIZE, sp)
sll a3, a3, 0
dsll t0, v0, 3 # offset into table
- ld t2, (sys_call_table - (__NR_O32_Linux * 8))(t0)
+ ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
sd a3, PT_R26(sp) # save a3 for syscall restarting
@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
PTR 4b, bad_stack
.previous
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ li t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
bnez t0, trace_a_syscall
@@ -93,23 +93,13 @@ NESTED(handle_sys, PT_SIZE, sp)
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
o32_syscall_exit:
- local_irq_disable # make need_resched and
- # signals dont change between
- # sampling and return
- LONG_L a2, TI_FLAGS($28)
- li t0, _TIF_ALLWORK_MASK
- and t0, a2, t0
- bnez t0, o32_syscall_exit_work
-
- j restore_partial
-
-o32_syscall_exit_work:
- j syscall_exit_work_partial
+ j syscall_exit_partial
/* ------------------------------------------------------------------------ */
@@ -122,8 +112,20 @@ trace_a_syscall:
move s0, t2 # Save syscall pointer
move a0, sp
- li a1, 0
- jal do_syscall_trace
+ /*
+ * syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ * note: NR_syscall is the first O32 syscall but the macro is
+ * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+ * therefore __NR_O32_Linux is used (4000)
+ */
+ addiu a1, v0, __NR_O32_Linux
+ bnez v0, 1f /* __NR_syscall at offset 0 */
+ lw a1, PT_R4(sp)
+
+1: jal syscall_trace_enter
+
+ bltz v0, 2f # seccomp failed? Skip syscall
move t0, s0
RESTORE_STATIC
@@ -142,11 +144,12 @@ trace_a_syscall:
sd t0, PT_R7(sp) # set error flag
beqz t0, 1f
+ ld t1, PT_R2(sp) # syscall number
dnegu v0 # error
- sd v0, PT_R0(sp) # set flag for syscall restarting
+ sd t1, PT_R0(sp) # save it for syscall restarting
1: sd v0, PT_R2(sp) # result
- j syscall_exit
+2: j syscall_exit
/* ------------------------------------------------------------------------ */
@@ -154,8 +157,7 @@ trace_a_syscall:
* The stackpointer for a call with more than 4 arguments is bad.
*/
bad_stack:
- dnegu v0 # error
- sd v0, PT_R0(sp)
+ li v0, EFAULT
sd v0, PT_R2(sp)
li t0, 1 # set error flag
sd t0, PT_R7(sp)
@@ -174,14 +176,12 @@ not_o32_scall:
END(handle_sys)
LEAF(sys32_syscall)
- sltu v0, a0, __NR_O32_Linux + __NR_O32_Linux_syscalls + 1
+ subu t0, a0, __NR_O32_Linux # check syscall number
+ sltiu v0, t0, __NR_O32_Linux_syscalls + 1
+ beqz t0, einval # do not recurse
+ dsll t1, t0, 3
beqz v0, einval
-
- dsll v0, a0, 3
- ld t2, (sys_call_table - (__NR_O32_Linux * 8))(v0)
-
- li v1, 4000 # indirect syscall number
- beq a0, v1, einval # do not recurse
+ ld t2, sys32_call_table(t1) # syscall routine
move a0, a1 # shift argument registers
move a1, a2
@@ -198,16 +198,16 @@ LEAF(sys32_syscall)
jr t2
/* Unreached */
-einval: li v0, -EINVAL
+einval: li v0, -ENOSYS
jr ra
END(sys32_syscall)
.align 3
- .type sys_call_table,@object
-sys_call_table:
+ .type sys32_call_table,@object
+EXPORT(sys32_call_table)
PTR sys32_syscall /* 4000 */
PTR sys_exit
- PTR sys_fork
+ PTR __sys_fork
PTR sys_read
PTR sys_write
PTR compat_sys_open /* 4005 */
@@ -216,7 +216,7 @@ sys_call_table:
PTR sys_creat
PTR sys_link
PTR sys_unlink /* 4010 */
- PTR sys32_execve
+ PTR compat_sys_execve
PTR sys_chdir
PTR compat_sys_time
PTR sys_mknod
@@ -231,7 +231,7 @@ sys_call_table:
PTR sys_setuid
PTR sys_getuid
PTR compat_sys_stime /* 4025 */
- PTR sys32_ptrace
+ PTR compat_sys_ptrace
PTR sys_alarm
PTR sys_ni_syscall /* was sys_fstat */
PTR sys_pause
@@ -247,7 +247,7 @@ sys_call_table:
PTR sys_mkdir
PTR sys_rmdir /* 4040 */
PTR sys_dup
- PTR sys_pipe
+ PTR sysm_pipe
PTR compat_sys_times
PTR sys_ni_syscall
PTR sys_brk /* 4045 */
@@ -267,12 +267,12 @@ sys_call_table:
PTR sys_olduname
PTR sys_umask /* 4060 */
PTR sys_chroot
- PTR sys32_ustat
+ PTR compat_sys_ustat
PTR sys_dup2
PTR sys_getppid
PTR sys_getpgrp /* 4065 */
PTR sys_setsid
- PTR sys32_sigaction
+ PTR sys_32_sigaction
PTR sys_sgetmask
PTR sys_ssetmask
PTR sys_setreuid /* 4070 */
@@ -283,8 +283,8 @@ sys_call_table:
PTR compat_sys_setrlimit /* 4075 */
PTR compat_sys_getrlimit
PTR compat_sys_getrusage
- PTR sys32_gettimeofday
- PTR sys32_settimeofday
+ PTR compat_sys_gettimeofday
+ PTR compat_sys_settimeofday
PTR sys_getgroups /* 4080 */
PTR sys_setgroups
PTR sys_ni_syscall /* old_select */
@@ -295,10 +295,10 @@ sys_call_table:
PTR sys_swapon
PTR sys_reboot
PTR compat_sys_old_readdir
- PTR old_mmap /* 4090 */
+ PTR sys_mips_mmap /* 4090 */
PTR sys_munmap
- PTR sys_truncate
- PTR sys_ftruncate
+ PTR compat_sys_truncate
+ PTR compat_sys_ftruncate
PTR sys_fchmod
PTR sys_fchown /* 4095 */
PTR sys_getpriority
@@ -322,12 +322,12 @@ sys_call_table:
PTR compat_sys_wait4
PTR sys_swapoff /* 4115 */
PTR compat_sys_sysinfo
- PTR sys32_ipc
+ PTR compat_sys_ipc
PTR sys_fsync
PTR sys32_sigreturn
- PTR sys32_clone /* 4120 */
+ PTR __sys_clone /* 4120 */
PTR sys_setdomainname
- PTR sys32_newuname
+ PTR sys_newuname
PTR sys_ni_syscall /* sys_modify_ldt */
PTR compat_sys_adjtimex
PTR sys_mprotect /* 4125 */
@@ -341,11 +341,11 @@ sys_call_table:
PTR sys_fchdir
PTR sys_bdflush
PTR sys_sysfs /* 4135 */
- PTR sys32_personality
- PTR sys_ni_syscall /* for afs_syscall */
+ PTR sys_32_personality
+ PTR sys_ni_syscall /* for afs_syscall */
PTR sys_setfsuid
PTR sys_setfsgid
- PTR sys32_llseek /* 4140 */
+ PTR sys_32_llseek /* 4140 */
PTR compat_sys_getdents
PTR compat_sys_select
PTR sys_flock
@@ -358,19 +358,19 @@ sys_call_table:
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
- PTR sys32_sysctl
+ PTR compat_sys_sysctl
PTR sys_mlock
PTR sys_munlock /* 4155 */
PTR sys_mlockall
PTR sys_munlockall
PTR sys_sched_setparam
PTR sys_sched_getparam
- PTR sys_sched_setscheduler /* 4160 */
+ PTR sys_sched_setscheduler /* 4160 */
PTR sys_sched_getscheduler
PTR sys_sched_yield
PTR sys_sched_get_priority_max
PTR sys_sched_get_priority_min
- PTR sys32_sched_rr_get_interval /* 4165 */
+ PTR compat_sys_sched_rr_get_interval /* 4165 */
PTR compat_sys_nanosleep
PTR sys_mremap
PTR sys_accept
@@ -380,8 +380,8 @@ sys_call_table:
PTR sys_getsockname
PTR sys_getsockopt
PTR sys_listen
- PTR sys_recv /* 4175 */
- PTR sys_recvfrom
+ PTR compat_sys_recv /* 4175 */
+ PTR compat_sys_recvfrom
PTR compat_sys_recvmsg
PTR sys_send
PTR compat_sys_sendmsg
@@ -394,30 +394,30 @@ sys_call_table:
PTR sys_getresuid
PTR sys_ni_syscall /* was query_module */
PTR sys_poll
- PTR compat_sys_nfsservctl
+ PTR sys_ni_syscall /* was nfsservctl */
PTR sys_setresgid /* 4190 */
PTR sys_getresgid
PTR sys_prctl
PTR sys32_rt_sigreturn
- PTR sys32_rt_sigaction
- PTR sys32_rt_sigprocmask /* 4195 */
- PTR sys32_rt_sigpending
+ PTR compat_sys_rt_sigaction
+ PTR compat_sys_rt_sigprocmask /* 4195 */
+ PTR compat_sys_rt_sigpending
PTR compat_sys_rt_sigtimedwait
- PTR sys32_rt_sigqueueinfo
- PTR sys32_rt_sigsuspend
- PTR sys32_pread /* 4200 */
- PTR sys32_pwrite
+ PTR compat_sys_rt_sigqueueinfo
+ PTR compat_sys_rt_sigsuspend
+ PTR sys_32_pread /* 4200 */
+ PTR sys_32_pwrite
PTR sys_chown
PTR sys_getcwd
PTR sys_capget
PTR sys_capset /* 4205 */
- PTR sys32_sigaltstack
- PTR sys32_sendfile
+ PTR compat_sys_sigaltstack
+ PTR compat_sys_sendfile
PTR sys_ni_syscall
PTR sys_ni_syscall
- PTR sys32_mmap2 /* 4210 */
- PTR sys32_truncate64
- PTR sys32_ftruncate64
+ PTR sys_mips_mmap2 /* 4210 */
+ PTR sys_32_truncate64
+ PTR sys_32_ftruncate64
PTR sys_newstat
PTR sys_newlstat
PTR sys_newfstat /* 4215 */
@@ -446,13 +446,13 @@ sys_call_table:
PTR compat_sys_futex
PTR compat_sys_sched_setaffinity
PTR compat_sys_sched_getaffinity /* 4240 */
- PTR sys_io_setup
+ PTR compat_sys_io_setup
PTR sys_io_destroy
- PTR sys_io_getevents
- PTR sys_io_submit
+ PTR compat_sys_io_getevents
+ PTR compat_sys_io_submit
PTR sys_io_cancel /* 4245 */
PTR sys_exit_group
- PTR sys_lookup_dcookie
+ PTR compat_sys_lookup_dcookie
PTR sys_epoll_create
PTR sys_epoll_ctl
PTR sys_epoll_wait /* 4250 */
@@ -483,7 +483,7 @@ sys_call_table:
PTR compat_sys_mq_notify /* 4275 */
PTR compat_sys_mq_getsetattr
PTR sys_ni_syscall /* sys_vserver */
- PTR sys32_waitid
+ PTR compat_sys_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key /* 4280 */
PTR sys_request_key
@@ -507,12 +507,12 @@ sys_call_table:
PTR sys_fchmodat
PTR sys_faccessat /* 4300 */
PTR compat_sys_pselect6
- PTR sys_ppoll
+ PTR compat_sys_ppoll
PTR sys_unshare
PTR sys_splice
PTR sys32_sync_file_range /* 4305 */
PTR sys_tee
- PTR sys_vmsplice
+ PTR compat_sys_vmsplice
PTR compat_sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list /* 4310 */
@@ -523,10 +523,38 @@ sys_call_table:
PTR sys_ioprio_get /* 4315 */
PTR compat_sys_utimensat
PTR compat_sys_signalfd
- PTR sys_ni_syscall
+ PTR sys_ni_syscall /* was timerfd */
PTR sys_eventfd
PTR sys32_fallocate /* 4320 */
PTR sys_timerfd_create
- PTR sys_timerfd_gettime
- PTR sys_timerfd_settime
- .size sys_call_table,.-sys_call_table
+ PTR compat_sys_timerfd_gettime
+ PTR compat_sys_timerfd_settime
+ PTR compat_sys_signalfd4
+ PTR sys_eventfd2 /* 4325 */
+ PTR sys_epoll_create1
+ PTR sys_dup3
+ PTR sys_pipe2
+ PTR sys_inotify_init1
+ PTR compat_sys_preadv /* 4330 */
+ PTR compat_sys_pwritev
+ PTR compat_sys_rt_tgsigqueueinfo
+ PTR sys_perf_event_open
+ PTR sys_accept4
+ PTR compat_sys_recvmmsg /* 4335 */
+ PTR sys_fanotify_init
+ PTR compat_sys_fanotify_mark
+ PTR sys_prlimit64
+ PTR sys_name_to_handle_at
+ PTR compat_sys_open_by_handle_at /* 4340 */
+ PTR compat_sys_clock_adjtime
+ PTR sys_syncfs
+ PTR compat_sys_sendmmsg
+ PTR sys_setns
+ PTR compat_sys_process_vm_readv /* 4345 */
+ PTR compat_sys_process_vm_writev
+ PTR sys_kcmp
+ PTR sys_finit_module
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr /* 4350 */
+ PTR sys_renameat2
+ .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
new file mode 100644
index 00000000000..076ead2a985
--- /dev/null
+++ b/arch/mips/kernel/segment.c
@@ -0,0 +1,110 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+
+static void build_segment_config(char *str, unsigned int cfg)
+{
+ unsigned int am;
+ static const char * const am_str[] = {
+ "UK", "MK", "MSK", "MUSK", "MUSUK", "USK",
+ "RSRVD", "UUSK"};
+
+ /* Segment access mode. */
+ am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT;
+ str += sprintf(str, "%-5s", am_str[am]);
+
+ /*
+ * Access modes MK, MSK and MUSK are mapped segments. Therefore
+ * there is no direct physical address mapping.
+ */
+ if ((am == 0) || (am > 3)) {
+ str += sprintf(str, " %03lx",
+ ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
+ str += sprintf(str, " %01ld",
+ ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
+ } else {
+ str += sprintf(str, " UND");
+ str += sprintf(str, " U");
+ }
+
+ /* Exception configuration. */
+ str += sprintf(str, " %01ld\n",
+ ((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT));
+}
+
+static int show_segments(struct seq_file *m, void *v)
+{
+ unsigned int segcfg;
+ char str[42];
+
+ seq_puts(m, "Segment Virtual Size Access Mode Physical Caching EU\n");
+ seq_puts(m, "------- ------- ---- ----------- -------- ------- --\n");
+
+ segcfg = read_c0_segctl0();
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 0 e0000000 512M %s", str);
+
+ segcfg >>= 16;
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 1 c0000000 512M %s", str);
+
+ segcfg = read_c0_segctl1();
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 2 a0000000 512M %s", str);
+
+ segcfg >>= 16;
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 3 80000000 512M %s", str);
+
+ segcfg = read_c0_segctl2();
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 4 40000000 1G %s", str);
+
+ segcfg >>= 16;
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 5 00000000 1G %s\n", str);
+
+ return 0;
+}
+
+static int segments_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_segments, NULL);
+}
+
+static const struct file_operations segments_fops = {
+ .open = segments_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init segments_info(void)
+{
+ extern struct dentry *mips_debugfs_dir;
+ struct dentry *segments;
+
+ if (cpu_has_segments) {
+ if (!mips_debugfs_dir)
+ return -ENODEV;
+
+ segments = debugfs_create_file("segments", S_IRUGO,
+ mips_debugfs_dir, NULL,
+ &segments_fops);
+ if (!segments)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+device_initcall(segments_info);
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c
deleted file mode 100644
index 1265358cdca..00000000000
--- a/arch/mips/kernel/semaphore.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * MIPS-specific semaphore code.
- *
- * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
- * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
- * to eliminate the SMP races in the old version between the updates
- * of `count' and `waking'. Now we use negative `count' values to
- * indicate that some process(es) are waiting for the semaphore.
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <asm/atomic.h>
-#include <asm/cpu-features.h>
-#include <asm/errno.h>
-#include <asm/semaphore.h>
-#include <asm/war.h>
-/*
- * Atomically update sem->count.
- * This does the equivalent of the following:
- *
- * old_count = sem->count;
- * tmp = MAX(old_count, 0) + incr;
- * sem->count = tmp;
- * return old_count;
- *
- * On machines without lld/scd we need a spinlock to make the manipulation of
- * sem->count and sem->waking atomic. Scalability isn't an issue because
- * this lock is used on UP only so it's just an empty variable.
- */
-static inline int __sem_update_count(struct semaphore *sem, int incr)
-{
- int old_count, tmp;
-
- if (cpu_has_llsc && R10000_LLSC_WAR) {
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %2 # __sem_update_count \n"
- " sra %1, %0, 31 \n"
- " not %1 \n"
- " and %1, %0, %1 \n"
- " addu %1, %1, %3 \n"
- " sc %1, %2 \n"
- " beqzl %1, 1b \n"
- " .set mips0 \n"
- : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
- : "r" (incr), "m" (sem->count));
- } else if (cpu_has_llsc) {
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %2 # __sem_update_count \n"
- " sra %1, %0, 31 \n"
- " not %1 \n"
- " and %1, %0, %1 \n"
- " addu %1, %1, %3 \n"
- " sc %1, %2 \n"
- " beqz %1, 1b \n"
- " .set mips0 \n"
- : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
- : "r" (incr), "m" (sem->count));
- } else {
- static DEFINE_SPINLOCK(semaphore_lock);
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_lock, flags);
- old_count = atomic_read(&sem->count);
- tmp = max_t(int, old_count, 0) + incr;
- atomic_set(&sem->count, tmp);
- spin_unlock_irqrestore(&semaphore_lock, flags);
- }
-
- return old_count;
-}
-
-void __up(struct semaphore *sem)
-{
- /*
- * Note that we incremented count in up() before we came here,
- * but that was ineffective since the result was <= 0, and
- * any negative value of count is equivalent to 0.
- * This ends up setting count to 1, unless count is now > 0
- * (i.e. because some other cpu has called up() in the meantime),
- * in which case we just increment count.
- */
- __sem_update_count(sem, 1);
- wake_up(&sem->wait);
-}
-
-EXPORT_SYMBOL(__up);
-
-/*
- * Note that when we come in to __down or __down_interruptible,
- * we have already decremented count, but that decrement was
- * ineffective since the result was < 0, and any negative value
- * of count is equivalent to 0.
- * Thus it is only when we decrement count from some value > 0
- * that we have actually got the semaphore.
- */
-void __sched __down(struct semaphore *sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- add_wait_queue_exclusive(&sem->wait, &wait);
-
- /*
- * Try to get the semaphore. If the count is > 0, then we've
- * got the semaphore; we decrement count and exit the loop.
- * If the count is 0 or negative, we set it to -1, indicating
- * that we are asleep, and then sleep.
- */
- while (__sem_update_count(sem, -1) <= 0) {
- schedule();
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- }
- remove_wait_queue(&sem->wait, &wait);
- __set_task_state(tsk, TASK_RUNNING);
-
- /*
- * If there are any more sleepers, wake one of them up so
- * that it can either get the semaphore, or set count to -1
- * indicating that there are still processes sleeping.
- */
- wake_up(&sem->wait);
-}
-
-EXPORT_SYMBOL(__down);
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
- int retval = 0;
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- __set_task_state(tsk, TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&sem->wait, &wait);
-
- while (__sem_update_count(sem, -1) <= 0) {
- if (signal_pending(current)) {
- /*
- * A signal is pending - give up trying.
- * Set sem->count to 0 if it is negative,
- * since we are no longer sleeping.
- */
- __sem_update_count(sem, 0);
- retval = -EINTR;
- break;
- }
- schedule();
- set_task_state(tsk, TASK_INTERRUPTIBLE);
- }
- remove_wait_queue(&sem->wait, &wait);
- __set_task_state(tsk, TASK_RUNNING);
-
- wake_up(&sem->wait);
- return retval;
-}
-
-EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 39f3dfe134f..a842154d57d 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -8,12 +8,13 @@
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
* Copyright (C) 1996 Stoned Elipot
* Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/screen_info.h>
+#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
@@ -21,6 +22,8 @@
#include <linux/console.h>
#include <linux/pfn.h>
#include <linux/debugfs.h>
+#include <linux/kexec.h>
+#include <linux/sizes.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@@ -30,7 +33,7 @@
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
-#include <asm/system.h>
+#include <asm/prom.h>
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
@@ -58,48 +61,61 @@ EXPORT_SYMBOL(mips_machtype);
struct boot_mem_map boot_mem_map;
-static char command_line[CL_SIZE];
- char arcs_cmdline[CL_SIZE]=CONFIG_CMDLINE;
+static char __initdata command_line[COMMAND_LINE_SIZE];
+char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_CMDLINE_BOOL
+static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+#endif
/*
* mips_io_port_base is the begin of the address space to which x86 style
* I/O ports are mapped.
*/
-const unsigned long mips_io_port_base __read_mostly = -1;
+const unsigned long mips_io_port_base = -1;
EXPORT_SYMBOL(mips_io_port_base);
-/*
- * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
- * for the processor.
- */
-unsigned long isa_slot_offset;
-EXPORT_SYMBOL(isa_slot_offset);
-
static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
+static void *detect_magic __initdata = detect_memory_region;
+
void __init add_memory_region(phys_t start, phys_t size, long type)
{
int x = boot_mem_map.nr_map;
- struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
+ int i;
/* Sanity check */
if (start + size < start) {
- printk("Trying to add an invalid memory region, skipped\n");
+ pr_warning("Trying to add an invalid memory region, skipped\n");
return;
}
/*
- * Try to merge with previous entry if any. This is far less than
- * perfect but is sufficient for most real world cases.
+ * Try to merge with existing entry, if any.
*/
- if (x && prev->addr + prev->size == start && prev->type == type) {
- prev->size += size;
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ struct boot_mem_map_entry *entry = boot_mem_map.map + i;
+ unsigned long top;
+
+ if (entry->type != type)
+ continue;
+
+ if (start + size < entry->addr)
+ continue; /* no overlap */
+
+ if (entry->addr + entry->size < start)
+ continue; /* no overlap */
+
+ top = max(entry->addr + entry->size, start + size);
+ entry->addr = min(entry->addr, start);
+ entry->size = top - entry->addr;
+
return;
}
- if (x == BOOT_MEM_MAP_MAX) {
- printk("Ooops! Too many entries in the memory map!\n");
+ if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
+ pr_err("Ooops! Too many entries in the memory map!\n");
return;
}
@@ -109,28 +125,50 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
boot_mem_map.nr_map++;
}
+void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max)
+{
+ void *dm = &detect_magic;
+ phys_t size;
+
+ for (size = sz_min; size < sz_max; size <<= 1) {
+ if (!memcmp(dm, dm + size, sizeof(detect_magic)))
+ break;
+ }
+
+ pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
+ ((unsigned long long) size) / SZ_1M,
+ (unsigned long long) start,
+ ((unsigned long long) sz_min) / SZ_1M,
+ ((unsigned long long) sz_max) / SZ_1M);
+
+ add_memory_region(start, size, BOOT_MEM_RAM);
+}
+
static void __init print_memory_map(void)
{
int i;
const int field = 2 * sizeof(unsigned long);
for (i = 0; i < boot_mem_map.nr_map; i++) {
- printk(" memory: %0*Lx @ %0*Lx ",
+ printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
field, (unsigned long long) boot_mem_map.map[i].size,
field, (unsigned long long) boot_mem_map.map[i].addr);
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
- printk("(usable)\n");
+ printk(KERN_CONT "(usable)\n");
+ break;
+ case BOOT_MEM_INIT_RAM:
+ printk(KERN_CONT "(usable after init)\n");
break;
case BOOT_MEM_ROM_DATA:
- printk("(ROM data)\n");
+ printk(KERN_CONT "(ROM data)\n");
break;
case BOOT_MEM_RESERVED:
- printk("(reserved)\n");
+ printk(KERN_CONT "(reserved)\n");
break;
default:
- printk("type %lu\n", boot_mem_map.map[i].type);
+ printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
break;
}
}
@@ -167,36 +205,21 @@ early_param("rd_size", rd_size_early);
static unsigned long __init init_initrd(void)
{
unsigned long end;
- u32 *initrd_header;
/*
* Board specific code or command line parser should have
* already set up initrd_start and initrd_end. In these cases
* perfom sanity checks and use them if all looks good.
*/
- if (initrd_start && initrd_end > initrd_start)
- goto sanitize;
-
- /*
- * See if initrd has been added to the kernel image by
- * arch/mips/boot/addinitrd.c. In that case a header is
- * prepended to initrd and is made up by 8 bytes. The fisrt
- * word is a magic number and the second one is the size of
- * initrd. Initrd start must be page aligned in any cases.
- */
- initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
- if (initrd_header[0] != 0x494E5244)
+ if (!initrd_start || initrd_end <= initrd_start)
goto disable;
- initrd_start = (unsigned long)(initrd_header + 2);
- initrd_end = initrd_start + initrd_header[1];
-sanitize:
if (initrd_start & ~PAGE_MASK) {
- printk(KERN_ERR "initrd start must be page aligned\n");
+ pr_err("initrd start must be page aligned\n");
goto disable;
}
if (initrd_start < PAGE_OFFSET) {
- printk(KERN_ERR "initrd start < PAGE_OFFSET\n");
+ pr_err("initrd start < PAGE_OFFSET\n");
goto disable;
}
@@ -228,18 +251,18 @@ static void __init finalize_initrd(void)
goto disable;
}
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
- printk("Initrd extends beyond end of memory");
+ printk(KERN_ERR "Initrd extends beyond end of memory");
goto disable;
}
reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
initrd_below_start_ok = 1;
- printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
- initrd_start, size);
+ pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
+ initrd_start, size);
return;
disable:
- printk(" - disabling initrd\n");
+ printk(KERN_CONT " - disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
}
@@ -277,11 +300,13 @@ static void __init bootmem_init(void)
int i;
/*
- * Init any data related to initrd. It's a nop if INITRD is
- * not selected. Once that done we can determine the low bound
- * of usable memory.
+ * Sanity check any INITRD first. We don't take it into account
+ * for bootmem setup initially, rely on the end-of-kernel-code
+ * as our memory range starting point. Once bootmem is inited we
+ * will reserve the area used for the initrd.
*/
- reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
+ init_initrd();
+ reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
/*
* max_low_pfn is not a number of pages. The number of pages
@@ -317,20 +342,19 @@ static void __init bootmem_init(void)
if (min_low_pfn >= max_low_pfn)
panic("Incorrect memory mapping !!!");
if (min_low_pfn > ARCH_PFN_OFFSET) {
- printk(KERN_INFO
- "Wasting %lu bytes for tracking %lu unused pages\n",
- (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
- min_low_pfn - ARCH_PFN_OFFSET);
+ pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
+ (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
+ min_low_pfn - ARCH_PFN_OFFSET);
} else if (min_low_pfn < ARCH_PFN_OFFSET) {
- printk(KERN_INFO
- "%lu free pages won't be used\n",
- ARCH_PFN_OFFSET - min_low_pfn);
+ pr_info("%lu free pages won't be used\n",
+ ARCH_PFN_OFFSET - min_low_pfn);
}
min_low_pfn = ARCH_PFN_OFFSET;
/*
* Determine low and high memory ranges
*/
+ max_pfn = max_low_pfn;
if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
#ifdef CONFIG_HIGHMEM
highstart_pfn = PFN_DOWN(HIGHMEM_START);
@@ -339,6 +363,14 @@ static void __init bootmem_init(void)
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
+#ifdef CONFIG_BLK_DEV_INITRD
+ /*
+ * mapstart should be after initrd_end
+ */
+ if (initrd_end)
+ mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+#endif
+
/*
* Initialize the boot-time allocator with low memory only.
*/
@@ -369,7 +401,7 @@ static void __init bootmem_init(void)
continue;
#endif
- add_active_range(0, start, end);
+ memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
}
/*
@@ -378,15 +410,24 @@ static void __init bootmem_init(void)
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end, size;
+ start = PFN_UP(boot_mem_map.map[i].addr);
+ end = PFN_DOWN(boot_mem_map.map[i].addr
+ + boot_mem_map.map[i].size);
+
/*
* Reserve usable memory.
*/
- if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
+ switch (boot_mem_map.map[i].type) {
+ case BOOT_MEM_RAM:
+ break;
+ case BOOT_MEM_INIT_RAM:
+ memory_present(0, start, end);
+ continue;
+ default:
+ /* Not usable memory */
continue;
+ }
- start = PFN_UP(boot_mem_map.map[i].addr);
- end = PFN_DOWN(boot_mem_map.map[i].addr
- + boot_mem_map.map[i].size);
/*
* We are rounding up the start address of usable memory
* and at the end of the usable range downwards.
@@ -439,13 +480,13 @@ static void __init bootmem_init(void)
* At this stage the bootmem allocator is ready to use.
*
* NOTE: historically plat_mem_setup did the entire platform initialization.
- * This was rather impractical because it meant plat_mem_setup had to
+ * This was rather impractical because it meant plat_mem_setup had to
* get away without any kind of memory allocator. To keep old code from
* breaking plat_setup was just renamed to plat_setup and a second platform
* initialization hook for anything else was introduced.
*/
-static int usermem __initdata = 0;
+static int usermem __initdata;
static int __init early_parse_mem(char *p)
{
@@ -459,7 +500,7 @@ static int __init early_parse_mem(char *p)
if (usermem == 0) {
boot_mem_map.nr_map = 0;
usermem = 1;
- }
+ }
start = 0;
size = memparse(p, &p);
if (*p == '@')
@@ -470,6 +511,102 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
+#ifdef CONFIG_PROC_VMCORE
+unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
+static int __init early_parse_elfcorehdr(char *p)
+{
+ int i;
+
+ setup_elfcorehdr = memparse(p, &p);
+
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ unsigned long start = boot_mem_map.map[i].addr;
+ unsigned long end = (boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size);
+ if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
+ /*
+ * Reserve from the elf core header to the end of
+ * the memory segment, that should all be kdump
+ * reserved memory.
+ */
+ setup_elfcorehdr_size = end - setup_elfcorehdr;
+ break;
+ }
+ }
+ /*
+ * If we don't find it in the memory map, then we shouldn't
+ * have to worry about it, as the new kernel won't use it.
+ */
+ return 0;
+}
+early_param("elfcorehdr", early_parse_elfcorehdr);
+#endif
+
+static void __init arch_mem_addpart(phys_t mem, phys_t end, int type)
+{
+ phys_t size;
+ int i;
+
+ size = end - mem;
+ if (!size)
+ return;
+
+ /* Make sure it is in the boot_mem_map */
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ if (mem >= boot_mem_map.map[i].addr &&
+ mem < (boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size))
+ return;
+ }
+ add_memory_region(mem, size, type);
+}
+
+#ifdef CONFIG_KEXEC
+static inline unsigned long long get_total_mem(void)
+{
+ unsigned long long total;
+
+ total = max_pfn - min_low_pfn;
+ return total << PAGE_SHIFT;
+}
+
+static void __init mips_parse_crashkernel(void)
+{
+ unsigned long long total_mem;
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ total_mem = get_total_mem();
+ ret = parse_crashkernel(boot_command_line, total_mem,
+ &crash_size, &crash_base);
+ if (ret != 0 || crash_size <= 0)
+ return;
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+}
+
+static void __init request_crashkernel(struct resource *res)
+{
+ int ret;
+
+ ret = request_resource(res, &crashk_res);
+ if (!ret)
+ pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
+ (unsigned long)((crashk_res.end -
+ crashk_res.start + 1) >> 20),
+ (unsigned long)(crashk_res.start >> 20));
+}
+#else /* !defined(CONFIG_KEXEC) */
+static void __init mips_parse_crashkernel(void)
+{
+}
+
+static void __init request_crashkernel(struct resource *res)
+{
+}
+#endif /* !defined(CONFIG_KEXEC) */
+
static void __init arch_mem_init(char **cmdline_p)
{
extern void plat_mem_setup(void);
@@ -477,23 +614,66 @@ static void __init arch_mem_init(char **cmdline_p)
/* call board setup routine */
plat_mem_setup();
- printk("Determined physical RAM map:\n");
+ /*
+ * Make sure all kernel memory is in the maps. The "UP" and
+ * "DOWN" are opposite for initdata since if it crosses over
+ * into another memory section you don't want that to be
+ * freed when the initdata is freed.
+ */
+ arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
+ PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
+ BOOT_MEM_RAM);
+ arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
+ PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
+ BOOT_MEM_INIT_RAM);
+
+ pr_info("Determined physical RAM map:\n");
print_memory_map();
- strlcpy(command_line, arcs_cmdline, sizeof(command_line));
- strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+ if (builtin_cmdline[0]) {
+ strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(arcs_cmdline, builtin_cmdline, COMMAND_LINE_SIZE);
+ }
+ strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
+#endif
+#else
+ strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
+#endif
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
if (usermem) {
- printk("User-defined physical RAM map:\n");
+ pr_info("User-defined physical RAM map:\n");
print_memory_map();
}
bootmem_init();
+#ifdef CONFIG_PROC_VMCORE
+ if (setup_elfcorehdr && setup_elfcorehdr_size) {
+ printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
+ setup_elfcorehdr, setup_elfcorehdr_size);
+ reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
+ BOOTMEM_DEFAULT);
+ }
+#endif
+
+ mips_parse_crashkernel();
+#ifdef CONFIG_KEXEC
+ if (crashk_res.start != crashk_res.end)
+ reserve_bootmem(crashk_res.start,
+ crashk_res.end - crashk_res.start + 1,
+ BOOTMEM_DEFAULT);
+#endif
+ device_tree_init();
sparse_init();
+ plat_swiotlb_setup();
paging_init();
}
@@ -509,9 +689,6 @@ static void __init resource_init(void)
data_resource.start = __pa_symbol(&_etext);
data_resource.end = __pa_symbol(&_edata) - 1;
- /*
- * Request address space for all standard RAM.
- */
for (i = 0; i < boot_mem_map.nr_map; i++) {
struct resource *res;
unsigned long start, end;
@@ -526,6 +703,7 @@ static void __init resource_init(void)
res = alloc_bootmem(sizeof(struct resource));
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
+ case BOOT_MEM_INIT_RAM:
case BOOT_MEM_ROM_DATA:
res->name = "System RAM";
break;
@@ -547,6 +725,7 @@ static void __init resource_init(void)
*/
request_resource(res, &code_resource);
request_resource(res, &data_resource);
+ request_crashkernel(res);
}
}
@@ -556,11 +735,7 @@ void __init setup_arch(char **cmdline_p)
prom_init();
#ifdef CONFIG_EARLY_PRINTK
- {
- extern void setup_early_printk(void);
-
- setup_early_printk();
- }
+ setup_early_printk();
#endif
cpu_report();
check_bugs_early();
@@ -577,29 +752,10 @@ void __init setup_arch(char **cmdline_p)
resource_init();
plat_smp_setup();
-}
-
-static int __init fpu_disable(char *s)
-{
- int i;
- for (i = 0; i < NR_CPUS; i++)
- cpu_data[i].options &= ~MIPS_CPU_FPU;
-
- return 1;
+ cpu_cache_init();
}
-__setup("nofpu", fpu_disable);
-
-static int __init dsp_disable(char *s)
-{
- cpu_data[0].ases &= ~MIPS_ASE_DSP;
-
- return 1;
-}
-
-__setup("nodsp", dsp_disable);
-
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
@@ -610,8 +766,8 @@ static int __init debugfs_mips(void)
struct dentry *d;
d = debugfs_create_dir("mips", NULL);
- if (IS_ERR(d))
- return PTR_ERR(d);
+ if (!d)
+ return -ENOMEM;
mips_debugfs_dir = d;
return 0;
}
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index c0faabd5201..9c60d09e62a 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -14,23 +14,16 @@
/* #define DEBUG_SIG */
#ifdef DEBUG_SIG
-# define DEBUGP(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ##args)
+# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
#else
# define DEBUGP(fmt, args...)
#endif
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
/*
* Determine which stack to use..
*/
extern void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size);
-/*
- * install trampoline code to get back from the sig handler
- */
-extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall);
-
/* Check and clear pending FPU exceptions in saved CSR */
extern int fpcsr_pending(unsigned int __user *fpcsr);
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index a4e106c56ab..9e60d117e41 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -6,8 +6,11 @@
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
*/
#include <linux/cache.h>
+#include <linux/context_tracking.h>
+#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>
@@ -19,7 +22,9 @@
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compiler.h>
+#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#include <linux/tracehook.h>
#include <asm/abi.h>
#include <asm/asm.h>
@@ -30,49 +35,65 @@
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
+#include <asm/vdso.h>
+#include <asm/dsp.h>
+#include <asm/inst.h>
#include "signal-common.h"
-/*
- * Horribly complicated - with the bloody RM9000 workarounds enabled
- * the signal trampolines is moving to the end of the structure so we can
- * increase the alignment without breaking software compatibility.
- */
-#if ICACHE_REFILLS_WORKAROUND_WAR == 0
+static int (*save_fp_context)(struct sigcontext __user *sc);
+static int (*restore_fp_context)(struct sigcontext __user *sc);
+
+extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
+extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
- u32 sf_code[2]; /* signal trampoline */
+ u32 sf_pad[2]; /* Was: signal trampoline */
struct sigcontext sf_sc;
sigset_t sf_mask;
};
struct rt_sigframe {
u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_code[2]; /* signal trampoline */
+ u32 rs_pad[2]; /* Was: signal trampoline */
struct siginfo rs_info;
struct ucontext rs_uc;
};
-#else
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
+{
+ int i;
+ int err = 0;
-struct sigframe {
- u32 sf_ass[4]; /* argument save space for o32 */
- u32 sf_pad[2];
- struct sigcontext sf_sc; /* hw context */
- sigset_t sf_mask;
- u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
-};
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &sc->sc_fpregs[i]);
+ }
+ err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-struct rt_sigframe {
- u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_pad[2];
- struct siginfo rs_info;
- struct ucontext rs_uc;
- u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
-};
+ return err;
+}
-#endif
+static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
+{
+ int i;
+ int err = 0;
+ u64 fpr_val;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+ return err;
+}
/*
* Helper routines
@@ -80,11 +101,16 @@ struct rt_sigframe {
static int protected_save_fp_context(struct sigcontext __user *sc)
{
int err;
+#ifndef CONFIG_EVA
while (1) {
lock_fpu_owner();
- own_fpu_inatomic(1);
- err = save_fp_context(sc); /* this might fail */
- unlock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = save_fp_context(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_to_sigcontext(sc);
+ }
if (likely(!err))
break;
/* touch the sigcontext and try again */
@@ -94,17 +120,30 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
if (err)
break; /* really bad sigcontext */
}
+#else
+ /*
+ * EVA does not have FPU EVA instructions so saving fpu context directly
+ * does not work.
+ */
+ lose_fpu(1);
+ err = save_fp_context(sc); /* this might fail */
+#endif
return err;
}
static int protected_restore_fp_context(struct sigcontext __user *sc)
{
- int err, tmp;
+ int err, tmp __maybe_unused;
+#ifndef CONFIG_EVA
while (1) {
lock_fpu_owner();
- own_fpu_inatomic(0);
- err = restore_fp_context(sc); /* this might fail */
- unlock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = restore_fp_context(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_from_sigcontext(sc);
+ }
if (likely(!err))
break;
/* touch the sigcontext and try again */
@@ -114,6 +153,14 @@ static int protected_restore_fp_context(struct sigcontext __user *sc)
if (err)
break; /* really bad sigcontext */
}
+#else
+ /*
+ * EVA does not have FPU EVA instructions so restoring fpu context
+ * directly does not work.
+ */
+ lose_fpu(0);
+ err = restore_fp_context(sc); /* this might fail */
+#endif
return err;
}
@@ -255,91 +302,20 @@ void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
}
-int install_sigtramp(unsigned int __user *tramp, unsigned int syscall)
-{
- int err;
-
- /*
- * Set up the return code ...
- *
- * li v0, __NR__foo_sigreturn
- * syscall
- */
-
- err = __put_user(0x24020000 + syscall, tramp + 0);
- err |= __put_user(0x0000000c , tramp + 1);
- if (ICACHE_REFILLS_WORKAROUND_WAR) {
- err |= __put_user(0, tramp + 2);
- err |= __put_user(0, tramp + 3);
- err |= __put_user(0, tramp + 4);
- err |= __put_user(0, tramp + 5);
- err |= __put_user(0, tramp + 6);
- err |= __put_user(0, tramp + 7);
- }
- flush_cache_sigtramp((unsigned long) tramp);
-
- return err;
-}
-
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
#ifdef CONFIG_TRAD_SIGNALS
-asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
+SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset)
{
- sigset_t newset;
- sigset_t __user *uset;
-
- uset = (sigset_t __user *) regs.regs[4];
- if (copy_from_user(&newset, uset, sizeof(sigset_t)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
+ return sys_rt_sigsuspend(uset, sizeof(sigset_t));
}
#endif
-asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
-{
- sigset_t newset;
- sigset_t __user *unewset;
- size_t sigsetsize;
-
- /* XXX Don't preclude handling different sized sigset_t's. */
- sigsetsize = regs.regs[5];
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- unewset = (sigset_t __user *) regs.regs[4];
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
#ifdef CONFIG_TRAD_SIGNALS
-asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act,
- struct sigaction __user *oact)
+SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
+ struct sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
@@ -378,15 +354,6 @@ asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act,
}
#endif
-asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs)
-{
- const stack_t __user *uss = (const stack_t __user *) regs.regs[4];
- stack_t __user *uoss = (stack_t __user *) regs.regs[5];
- unsigned long usp = regs.regs[29];
-
- return do_sigaltstack(uss, uoss, usp);
-}
-
#ifdef CONFIG_TRAD_SIGNALS
asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
{
@@ -400,11 +367,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
goto badframe;
- sigdelsetmask(&blocked, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = blocked;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&blocked);
sig = restore_sigcontext(&regs, &frame->sf_sc);
if (sig < 0)
@@ -431,7 +394,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe __user *frame;
sigset_t set;
- stack_t st;
int sig;
frame = (struct rt_sigframe __user *) regs.regs[29];
@@ -440,11 +402,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
@@ -452,11 +410,8 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
else if (sig)
force_sig(sig, current);
- if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
+ if (restore_altstack(&frame->rs_uc.uc_stack))
goto badframe;
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
/*
* Don't let your children do this ...
@@ -473,8 +428,8 @@ badframe:
}
#ifdef CONFIG_TRAD_SIGNALS
-static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
- int signr, sigset_t *set)
+static int setup_frame(void *sig_return, struct k_sigaction *ka,
+ struct pt_regs *regs, int signr, sigset_t *set)
{
struct sigframe __user *frame;
int err = 0;
@@ -483,8 +438,6 @@ static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- err |= install_sigtramp(frame->sf_code, __NR_sigreturn);
-
err |= setup_sigcontext(regs, &frame->sf_sc);
err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
if (err)
@@ -504,7 +457,7 @@ static int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
regs->regs[ 5] = 0;
regs->regs[ 6] = (unsigned long) &frame->sf_sc;
regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) frame->sf_code;
+ regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -518,8 +471,9 @@ give_sigsegv:
}
#endif
-static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
- int signr, sigset_t *set, siginfo_t *info)
+static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
+ struct pt_regs *regs, int signr, sigset_t *set,
+ siginfo_t *info)
{
struct rt_sigframe __user *frame;
int err = 0;
@@ -528,20 +482,13 @@ static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- err |= install_sigtramp(frame->rs_code, __NR_rt_sigreturn);
-
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(NULL, &frame->rs_uc.uc_link);
- err |= __put_user((void __user *)current->sas_ss_sp,
- &frame->rs_uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->regs[29]),
- &frame->rs_uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size,
- &frame->rs_uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
@@ -562,7 +509,7 @@ static int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) frame->rs_code;
+ regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -579,114 +526,101 @@ give_sigsegv:
struct mips_abi mips_abi = {
#ifdef CONFIG_TRAD_SIGNALS
.setup_frame = setup_frame,
+ .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
#endif
- .setup_rt_frame = setup_rt_frame,
+ .setup_rt_frame = setup_rt_frame,
+ .rt_signal_return_offset =
+ offsetof(struct mips_vdso, rt_signal_trampoline),
.restart = __NR_restart_syscall
};
-static int handle_signal(unsigned long sig, siginfo_t *info,
- struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
+static void handle_signal(unsigned long sig, siginfo_t *info,
+ struct k_sigaction *ka, struct pt_regs *regs)
{
+ sigset_t *oldset = sigmask_to_save();
int ret;
+ struct mips_abi *abi = current->thread.abi;
+#ifdef CONFIG_CPU_MICROMIPS
+ void *vdso;
+ unsigned int tmp = (unsigned int)current->mm->context.vdso;
+
+ set_isa16_mode(tmp);
+ vdso = (void *)tmp;
+#else
+ void *vdso = current->mm->context.vdso;
+#endif
- switch(regs->regs[0]) {
- case ERESTART_RESTARTBLOCK:
- case ERESTARTNOHAND:
- regs->regs[2] = EINTR;
- break;
- case ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
+ if (regs->regs[0]) {
+ switch(regs->regs[2]) {
+ case ERESTART_RESTARTBLOCK:
+ case ERESTARTNOHAND:
regs->regs[2] = EINTR;
break;
+ case ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->regs[2] = EINTR;
+ break;
+ }
+ /* fallthrough */
+ case ERESTARTNOINTR:
+ regs->regs[7] = regs->regs[26];
+ regs->regs[2] = regs->regs[0];
+ regs->cp0_epc -= 4;
}
- /* fallthrough */
- case ERESTARTNOINTR: /* Userland will reload $v0. */
- regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 8;
- }
- regs->regs[0] = 0; /* Don't deal with this again. */
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
if (sig_uses_siginfo(ka))
- ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info);
+ ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
+ ka, regs, sig, oldset, info);
else
- ret = current->thread.abi->setup_frame(ka, regs, sig, oldset);
+ ret = abi->setup_frame(vdso + abi->signal_return_offset,
+ ka, regs, sig, oldset);
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked, sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ if (ret)
+ return;
- return ret;
+ signal_delivered(sig, info, ka, regs, 0);
}
static void do_signal(struct pt_regs *regs)
{
struct k_sigaction ka;
- sigset_t *oldset;
siginfo_t info;
int signr;
- /*
- * We want the common case to go fast, which is why we may in certain
- * cases get here from kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return;
-
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- oldset = &current->saved_sigmask;
- else
- oldset = &current->blocked;
-
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
- /* Whee! Actually deliver the signal. */
- if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
- /*
- * A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- }
-
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &info, &ka, regs);
return;
}
- /*
- * Who's code doesn't conform to the restartable syscall convention
- * dies here!!! The li instruction, a single machine instruction,
- * must directly be followed by the syscall instruction.
- */
if (regs->regs[0]) {
- if (regs->regs[2] == ERESTARTNOHAND ||
- regs->regs[2] == ERESTARTSYS ||
- regs->regs[2] == ERESTARTNOINTR) {
+ switch (regs->regs[2]) {
+ case ERESTARTNOHAND:
+ case ERESTARTSYS:
+ case ERESTARTNOINTR:
+ regs->regs[2] = regs->regs[0];
regs->regs[7] = regs->regs[26];
- regs->cp0_epc -= 8;
- }
- if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
+ regs->cp0_epc -= 4;
+ break;
+
+ case ERESTART_RESTARTBLOCK:
regs->regs[2] = current->thread.abi->restart;
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
+ break;
}
- regs->regs[0] = 0; /* Don't deal with this again. */
+ regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
* If there's no signal to deliver, we just put the saved sigmask
* back
*/
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
- }
+ restore_saved_sigmask();
}
/*
@@ -696,7 +630,62 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
__u32 thread_info_flags)
{
+ local_irq_enable();
+
+ user_exit();
+
/* deal with pending signal delivery */
- if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+ if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
+
+ user_enter();
}
+
+#ifdef CONFIG_SMP
+#ifndef CONFIG_EVA
+static int smp_save_fp_context(struct sigcontext __user *sc)
+{
+ return raw_cpu_has_fpu
+ ? _save_fp_context(sc)
+ : copy_fp_to_sigcontext(sc);
+}
+
+static int smp_restore_fp_context(struct sigcontext __user *sc)
+{
+ return raw_cpu_has_fpu
+ ? _restore_fp_context(sc)
+ : copy_fp_from_sigcontext(sc);
+}
+#endif /* CONFIG_EVA */
+#endif
+
+static int signal_setup(void)
+{
+#ifndef CONFIG_EVA
+#ifdef CONFIG_SMP
+ /* For now just do the cpu_has_fpu check when the functions are invoked */
+ save_fp_context = smp_save_fp_context;
+ restore_fp_context = smp_restore_fp_context;
+#else
+ if (cpu_has_fpu) {
+ save_fp_context = _save_fp_context;
+ restore_fp_context = _restore_fp_context;
+ } else {
+ save_fp_context = copy_fp_from_sigcontext;
+ restore_fp_context = copy_fp_to_sigcontext;
+ }
+#endif /* CONFIG_SMP */
+#else
+ save_fp_context = copy_fp_from_sigcontext;;
+ restore_fp_context = copy_fp_to_sigcontext;
+#endif
+
+ return 0;
+}
+
+arch_initcall(signal_setup);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 572c610db1b..bae2e6ee210 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -29,85 +29,86 @@
#include <asm/cacheflush.h>
#include <asm/sim.h>
#include <asm/ucontext.h>
-#include <asm/system.h>
#include <asm/fpu.h>
#include <asm/war.h>
+#include <asm/vdso.h>
+#include <asm/dsp.h>
#include "signal-common.h"
+static int (*save_fp_context32)(struct sigcontext32 __user *sc);
+static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
+
+extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
+extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
+
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
-#define __NR_O32_sigreturn 4119
-#define __NR_O32_rt_sigreturn 4193
-#define __NR_O32_restart_syscall 4253
+#define __NR_O32_restart_syscall 4253
/* 32-bit compatibility types */
typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
-struct sigaction32 {
- unsigned int sa_flags;
- __sighandler32_t sa_handler;
- compat_sigset_t sa_mask;
-};
-
-/* IRIX compatible stack_t */
-typedef struct sigaltstack32 {
- s32 ss_sp;
- compat_size_t ss_size;
- int ss_flags;
-} stack32_t;
-
struct ucontext32 {
- u32 uc_flags;
- s32 uc_link;
- stack32_t uc_stack;
+ u32 uc_flags;
+ s32 uc_link;
+ compat_stack_t uc_stack;
struct sigcontext32 uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
-/*
- * Horribly complicated - with the bloody RM9000 workarounds enabled
- * the signal trampolines is moving to the end of the structure so we can
- * increase the alignment without breaking software compatibility.
- */
-#if ICACHE_REFILLS_WORKAROUND_WAR == 0
-
struct sigframe32 {
u32 sf_ass[4]; /* argument save space for o32 */
- u32 sf_code[2]; /* signal trampoline */
+ u32 sf_pad[2]; /* Was: signal trampoline */
struct sigcontext32 sf_sc;
compat_sigset_t sf_mask;
};
struct rt_sigframe32 {
u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_code[2]; /* signal trampoline */
+ u32 rs_pad[2]; /* Was: signal trampoline */
compat_siginfo_t rs_info;
struct ucontext32 rs_uc;
};
-#else /* ICACHE_REFILLS_WORKAROUND_WAR */
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
+{
+ int i;
+ int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-struct sigframe32 {
- u32 sf_ass[4]; /* argument save space for o32 */
- u32 sf_pad[2];
- struct sigcontext32 sf_sc; /* hw context */
- compat_sigset_t sf_mask;
- u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
-};
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &sc->sc_fpregs[i]);
+ }
+ err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-struct rt_sigframe32 {
- u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_pad[2];
- compat_siginfo_t rs_info;
- struct ucontext32 rs_uc;
- u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */
-};
+ return err;
+}
+
+static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
+{
+ int i;
+ int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
+ u64 fpr_val;
+
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
+ return err;
+}
/*
* sigcontext handlers
@@ -117,9 +118,13 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
int err;
while (1) {
lock_fpu_owner();
- own_fpu_inatomic(1);
- err = save_fp_context32(sc); /* this might fail */
- unlock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = save_fp_context32(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_to_sigcontext32(sc);
+ }
if (likely(!err))
break;
/* touch the sigcontext and try again */
@@ -134,12 +139,16 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
{
- int err, tmp;
+ int err, tmp __maybe_unused;
while (1) {
lock_fpu_owner();
- own_fpu_inatomic(0);
- err = restore_fp_context32(sc); /* this might fail */
- unlock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = restore_fp_context32(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_from_sigcontext32(sc);
+ }
if (likely(!err))
break;
/* touch the sigcontext and try again */
@@ -299,58 +308,13 @@ static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
* Atomically swap in the new signal mask, and wait for a signal.
*/
-asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
+asmlinkage int sys32_sigsuspend(compat_sigset_t __user *uset)
{
- compat_sigset_t __user *uset;
- sigset_t newset;
-
- uset = (compat_sigset_t __user *) regs.regs[4];
- if (get_sigset(&newset, uset))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
+ return compat_sys_rt_sigsuspend(uset, sizeof(compat_sigset_t));
}
-asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
-{
- compat_sigset_t __user *uset;
- sigset_t newset;
- size_t sigsetsize;
-
- /* XXX Don't preclude handling different sized sigset_t's. */
- sigsetsize = regs.regs[5];
- if (sigsetsize != sizeof(compat_sigset_t))
- return -EINVAL;
-
- uset = (compat_sigset_t __user *) regs.regs[4];
- if (get_sigset(&newset, uset))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
-asmlinkage int sys32_sigaction(int sig, const struct sigaction32 __user *act,
- struct sigaction32 __user *oact)
+SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *, act,
+ struct compat_sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
@@ -379,7 +343,7 @@ asmlinkage int sys32_sigaction(int sig, const struct sigaction32 __user *act,
return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
- &oact->sa_handler);
+ &oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
err |= __put_user(0, &oact->sa_mask.sig[1]);
err |= __put_user(0, &oact->sa_mask.sig[2]);
@@ -391,46 +355,7 @@ asmlinkage int sys32_sigaction(int sig, const struct sigaction32 __user *act,
return ret;
}
-asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs)
-{
- const stack32_t __user *uss = (const stack32_t __user *) regs.regs[4];
- stack32_t __user *uoss = (stack32_t __user *) regs.regs[5];
- unsigned long usp = regs.regs[29];
- stack_t kss, koss;
- int ret, err = 0;
- mm_segment_t old_fs = get_fs();
- s32 sp;
-
- if (uss) {
- if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
- return -EFAULT;
- err |= __get_user(sp, &uss->ss_sp);
- kss.ss_sp = (void __user *) (long) sp;
- err |= __get_user(kss.ss_size, &uss->ss_size);
- err |= __get_user(kss.ss_flags, &uss->ss_flags);
- if (err)
- return -EFAULT;
- }
-
- set_fs(KERNEL_DS);
- ret = do_sigaltstack(uss ? (stack_t __user *)&kss : NULL,
- uoss ? (stack_t __user *)&koss : NULL, usp);
- set_fs(old_fs);
-
- if (!ret && uoss) {
- if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
- return -EFAULT;
- sp = (int) (unsigned long) koss.ss_sp;
- err |= __put_user(sp, &uoss->ss_sp);
- err |= __put_user(koss.ss_size, &uoss->ss_size);
- err |= __put_user(koss.ss_flags, &uoss->ss_flags);
- if (err)
- return -EFAULT;
- }
- return ret;
-}
-
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
@@ -482,6 +407,18 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
return err;
}
+int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+{
+ memset(to, 0, sizeof *to);
+
+ if (copy_from_user(to, from, 3*sizeof(int)) ||
+ copy_from_user(to->_sifields._pad,
+ from->_sifields._pad, SI_PAD_SIZE32))
+ return -EFAULT;
+
+ return 0;
+}
+
asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct sigframe32 __user *frame;
@@ -494,11 +431,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
goto badframe;
- sigdelsetmask(&blocked, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = blocked;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&blocked);
sig = restore_sigcontext32(&regs, &frame->sf_sc);
if (sig < 0)
@@ -523,10 +456,7 @@ badframe:
asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe32 __user *frame;
- mm_segment_t old_fs;
sigset_t set;
- stack_t st;
- s32 sp;
int sig;
frame = (struct rt_sigframe32 __user *) regs.regs[29];
@@ -535,11 +465,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
@@ -547,21 +473,8 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
else if (sig)
force_sig(sig, current);
- /* The ucontext contains a stack32_t, so we must convert! */
- if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
- goto badframe;
- st.ss_sp = (void __user *)(long) sp;
- if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size))
+ if (compat_restore_altstack(&frame->rs_uc.uc_stack))
goto badframe;
- if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags))
- goto badframe;
-
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
- set_fs(old_fs);
/*
* Don't let your children do this ...
@@ -577,8 +490,8 @@ badframe:
force_sig(SIGSEGV, current);
}
-static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
- int signr, sigset_t *set)
+static int setup_frame_32(void *sig_return, struct k_sigaction *ka,
+ struct pt_regs *regs, int signr, sigset_t *set)
{
struct sigframe32 __user *frame;
int err = 0;
@@ -587,8 +500,6 @@ static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- err |= install_sigtramp(frame->sf_code, __NR_O32_sigreturn);
-
err |= setup_sigcontext32(regs, &frame->sf_sc);
err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
@@ -609,7 +520,7 @@ static int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
regs->regs[ 5] = 0;
regs->regs[ 6] = (unsigned long) &frame->sf_sc;
regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) frame->sf_code;
+ regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -623,32 +534,24 @@ give_sigsegv:
return -EFAULT;
}
-static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
- int signr, sigset_t *set, siginfo_t *info)
+static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka,
+ struct pt_regs *regs, int signr, sigset_t *set,
+ siginfo_t *info)
{
struct rt_sigframe32 __user *frame;
int err = 0;
- s32 sp;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- err |= install_sigtramp(frame->rs_code, __NR_O32_rt_sigreturn);
-
/* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
err |= copy_siginfo_to_user32(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
- sp = (int) (long) current->sas_ss_sp;
- err |= __put_user(sp,
- &frame->rs_uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->regs[29]),
- &frame->rs_uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size,
- &frame->rs_uc.uc_stack.ss_size);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
@@ -669,7 +572,7 @@ static int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) frame->rs_code;
+ regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -688,131 +591,25 @@ give_sigsegv:
*/
struct mips_abi mips_abi_32 = {
.setup_frame = setup_frame_32,
- .setup_rt_frame = setup_rt_frame_32,
+ .signal_return_offset =
+ offsetof(struct mips_vdso, o32_signal_trampoline),
+ .setup_rt_frame = setup_rt_frame_32,
+ .rt_signal_return_offset =
+ offsetof(struct mips_vdso, o32_rt_signal_trampoline),
.restart = __NR_O32_restart_syscall
};
-asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
- struct sigaction32 __user *oact,
- unsigned int sigsetsize)
+static int signal32_init(void)
{
- struct k_sigaction new_sa, old_sa;
- int ret = -EINVAL;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- goto out;
-
- if (act) {
- s32 handler;
- int err = 0;
-
- if (!access_ok(VERIFY_READ, act, sizeof(*act)))
- return -EFAULT;
- err |= __get_user(handler, &act->sa_handler);
- new_sa.sa.sa_handler = (void __user *)(s64)handler;
- err |= __get_user(new_sa.sa.sa_flags, &act->sa_flags);
- err |= get_sigset(&new_sa.sa.sa_mask, &act->sa_mask);
- if (err)
- return -EFAULT;
- }
-
- ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
-
- if (!ret && oact) {
- int err = 0;
-
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
- return -EFAULT;
-
- err |= __put_user((u32)(u64)old_sa.sa.sa_handler,
- &oact->sa_handler);
- err |= __put_user(old_sa.sa.sa_flags, &oact->sa_flags);
- err |= put_sigset(&old_sa.sa.sa_mask, &oact->sa_mask);
- if (err)
- return -EFAULT;
+ if (cpu_has_fpu) {
+ save_fp_context32 = _save_fp_context32;
+ restore_fp_context32 = _restore_fp_context32;
+ } else {
+ save_fp_context32 = copy_fp_to_sigcontext32;
+ restore_fp_context32 = copy_fp_from_sigcontext32;
}
-out:
- return ret;
-}
-
-asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
- compat_sigset_t __user *oset, unsigned int sigsetsize)
-{
- sigset_t old_set, new_set;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- if (set && get_sigset(&new_set, set))
- return -EFAULT;
-
- set_fs(KERNEL_DS);
- ret = sys_rt_sigprocmask(how, set ? (sigset_t __user *)&new_set : NULL,
- oset ? (sigset_t __user *)&old_set : NULL,
- sigsetsize);
- set_fs(old_fs);
-
- if (!ret && oset && put_sigset(&old_set, oset))
- return -EFAULT;
-
- return ret;
-}
-
-asmlinkage int sys32_rt_sigpending(compat_sigset_t __user *uset,
- unsigned int sigsetsize)
-{
- int ret;
- sigset_t set;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- ret = sys_rt_sigpending((sigset_t __user *)&set, sigsetsize);
- set_fs(old_fs);
-
- if (!ret && put_sigset(&set, uset))
- return -EFAULT;
- return ret;
-}
-
-asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
-{
- siginfo_t info;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- if (copy_from_user(&info, uinfo, 3*sizeof(int)) ||
- copy_from_user(info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
- return -EFAULT;
- set_fs(KERNEL_DS);
- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
- set_fs(old_fs);
- return ret;
+ return 0;
}
-asmlinkage long
-sys32_waitid(int which, compat_pid_t pid,
- compat_siginfo_t __user *uinfo, int options,
- struct compat_rusage __user *uru)
-{
- siginfo_t info;
- struct rusage ru;
- long ret;
- mm_segment_t old_fs = get_fs();
-
- info.si_signo = 0;
- set_fs(KERNEL_DS);
- ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options,
- uru ? (struct rusage __user *) &ru : NULL);
- set_fs(old_fs);
-
- if (ret < 0 || info.si_signo == 0)
- return ret;
-
- if (uru && (ret = put_compat_rusage(&ru, uru)))
- return ret;
-
- BUG_ON(info.si_code & __SI_MASK);
- info.si_code |= __SI_CHLD;
- return copy_siginfo_to_user32(uinfo, &info);
-}
+arch_initcall(signal32_init);
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index bb277e82d42..b2241bb9cac 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -35,97 +35,40 @@
#include <asm/sim.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
-#include <asm/system.h>
#include <asm/fpu.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
+#include <asm/vdso.h>
#include "signal-common.h"
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
-#define __NR_N32_rt_sigreturn 6211
#define __NR_N32_restart_syscall 6214
extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
-
-/* IRIX compatible stack_t */
-typedef struct sigaltstack32 {
- s32 ss_sp;
- compat_size_t ss_size;
- int ss_flags;
-} stack32_t;
-
struct ucontextn32 {
- u32 uc_flags;
- s32 uc_link;
- stack32_t uc_stack;
+ u32 uc_flags;
+ s32 uc_link;
+ compat_stack_t uc_stack;
struct sigcontext uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-#if ICACHE_REFILLS_WORKAROUND_WAR == 0
-
-struct rt_sigframe_n32 {
- u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_code[2]; /* signal trampoline */
- struct compat_siginfo rs_info;
- struct ucontextn32 rs_uc;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
-#else /* ICACHE_REFILLS_WORKAROUND_WAR */
-
struct rt_sigframe_n32 {
u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_pad[2];
+ u32 rs_pad[2]; /* Was: signal trampoline */
struct compat_siginfo rs_info;
struct ucontextn32 rs_uc;
- u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
};
-#endif /* !ICACHE_REFILLS_WORKAROUND_WAR */
-
-extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat);
-
-asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
-{
- compat_sigset_t __user *unewset;
- compat_sigset_t uset;
- size_t sigsetsize;
- sigset_t newset;
-
- /* XXX Don't preclude handling different sized sigset_t's. */
- sigsetsize = regs.regs[5];
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- unewset = (compat_sigset_t __user *) regs.regs[4];
- if (copy_from_user(&uset, unewset, sizeof(uset)))
- return -EFAULT;
- sigset_from_compat(&newset, &uset);
- sigdelsetmask(&newset, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe_n32 __user *frame;
sigset_t set;
- stack_t st;
- s32 sp;
int sig;
frame = (struct rt_sigframe_n32 __user *) regs.regs[29];
@@ -134,11 +77,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
@@ -146,19 +85,9 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
else if (sig)
force_sig(sig, current);
- /* The ucontext contains a stack32_t, so we must convert! */
- if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
- goto badframe;
- st.ss_sp = (void __user *)(long) sp;
- if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size))
- goto badframe;
- if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags))
+ if (compat_restore_altstack(&frame->rs_uc.uc_stack))
goto badframe;
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- do_sigaltstack((stack_t __user *)&st, NULL, regs.regs[29]);
-
/*
* Don't let your children do this ...
*/
@@ -173,32 +102,23 @@ badframe:
force_sig(SIGSEGV, current);
}
-static int setup_rt_frame_n32(struct k_sigaction * ka,
+static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka,
struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info)
{
struct rt_sigframe_n32 __user *frame;
int err = 0;
- s32 sp;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- install_sigtramp(frame->rs_code, __NR_N32_rt_sigreturn);
-
/* Create siginfo. */
err |= copy_siginfo_to_user32(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
- sp = (int) (long) current->sas_ss_sp;
- err |= __put_user(sp,
- &frame->rs_uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->regs[29]),
- &frame->rs_uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size,
- &frame->rs_uc.uc_stack.ss_size);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
@@ -219,7 +139,7 @@ static int setup_rt_frame_n32(struct k_sigaction * ka,
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) frame->rs_code;
+ regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
@@ -234,6 +154,8 @@ give_sigsegv:
}
struct mips_abi mips_abi_n32 = {
- .setup_rt_frame = setup_rt_frame_n32,
+ .setup_rt_frame = setup_rt_frame_n32,
+ .rt_signal_return_offset =
+ offsetof(struct mips_vdso, n32_rt_signal_trampoline),
.restart = __NR_N32_restart_syscall
};
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
new file mode 100644
index 00000000000..df9e2bd9b2c
--- /dev/null
+++ b/arch/mips/kernel/smp-bmips.c
@@ -0,0 +1,535 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
+ *
+ * SMP support for BMIPS
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/reboot.h>
+#include <linux/io.h>
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <asm/time.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/bootinfo.h>
+#include <asm/pmon.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/mipsregs.h>
+#include <asm/bmips.h>
+#include <asm/traps.h>
+#include <asm/barrier.h>
+
+static int __maybe_unused max_cpus = 1;
+
+/* these may be configured by the platform code */
+int bmips_smp_enabled = 1;
+int bmips_cpu_offset;
+cpumask_t bmips_booted_mask;
+
+#ifdef CONFIG_SMP
+
+/* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */
+unsigned long bmips_smp_boot_sp;
+unsigned long bmips_smp_boot_gp;
+
+static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
+static void bmips5000_send_ipi_single(int cpu, unsigned int action);
+static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id);
+static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id);
+
+/* SW interrupts 0,1 are used for interprocessor signaling */
+#define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0)
+#define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1)
+
+#define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift))
+#define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
+#define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
+#define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0))
+
+static void __init bmips_smp_setup(void)
+{
+ int i, cpu = 1, boot_cpu = 0;
+ int cpu_hw_intr;
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ /* arbitration priority */
+ clear_c0_brcm_cmt_ctrl(0x30);
+
+ /* NBK and weak order flags */
+ set_c0_brcm_config_0(0x30000);
+
+ /* Find out if we are running on TP0 or TP1 */
+ boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
+
+ /*
+ * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other
+ * thread
+ * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
+ * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
+ */
+ if (boot_cpu == 0)
+ cpu_hw_intr = 0x02;
+ else
+ cpu_hw_intr = 0x1d;
+
+ change_c0_brcm_cmt_intr(0xf8018000,
+ (cpu_hw_intr << 27) | (0x03 << 15));
+
+ /* single core, 2 threads (2 pipelines) */
+ max_cpus = 2;
+
+ break;
+ case CPU_BMIPS5000:
+ /* enable raceless SW interrupts */
+ set_c0_brcm_config(0x03 << 22);
+
+ /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
+ change_c0_brcm_mode(0x1f << 27, 0x02 << 27);
+
+ /* N cores, 2 threads per core */
+ max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1;
+
+ /* clear any pending SW interrupts */
+ for (i = 0; i < max_cpus; i++) {
+ write_c0_brcm_action(ACTION_CLR_IPI(i, 0));
+ write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
+ }
+
+ break;
+ default:
+ max_cpus = 1;
+ }
+
+ if (!bmips_smp_enabled)
+ max_cpus = 1;
+
+ /* this can be overridden by the BSP */
+ if (!board_ebase_setup)
+ board_ebase_setup = &bmips_ebase_setup;
+
+ __cpu_number_map[boot_cpu] = 0;
+ __cpu_logical_map[0] = boot_cpu;
+
+ for (i = 0; i < max_cpus; i++) {
+ if (i != boot_cpu) {
+ __cpu_number_map[i] = cpu;
+ __cpu_logical_map[cpu] = i;
+ cpu++;
+ }
+ set_cpu_possible(i, 1);
+ set_cpu_present(i, 1);
+ }
+}
+
+/*
+ * IPI IRQ setup - runs on CPU0
+ */
+static void bmips_prepare_cpus(unsigned int max_cpus)
+{
+ irqreturn_t (*bmips_ipi_interrupt)(int irq, void *dev_id);
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ bmips_ipi_interrupt = bmips43xx_ipi_interrupt;
+ break;
+ case CPU_BMIPS5000:
+ bmips_ipi_interrupt = bmips5000_ipi_interrupt;
+ break;
+ default:
+ return;
+ }
+
+ if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
+ "smp_ipi0", NULL))
+ panic("Can't request IPI0 interrupt");
+ if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
+ "smp_ipi1", NULL))
+ panic("Can't request IPI1 interrupt");
+}
+
+/*
+ * Tell the hardware to boot CPUx - runs on CPU0
+ */
+static void bmips_boot_secondary(int cpu, struct task_struct *idle)
+{
+ bmips_smp_boot_sp = __KSTK_TOS(idle);
+ bmips_smp_boot_gp = (unsigned long)task_thread_info(idle);
+ mb();
+
+ /*
+ * Initial boot sequence for secondary CPU:
+ * bmips_reset_nmi_vec @ a000_0000 ->
+ * bmips_smp_entry ->
+ * plat_wired_tlb_setup (cached function call; optional) ->
+ * start_secondary (cached jump)
+ *
+ * Warm restart sequence:
+ * play_dead WAIT loop ->
+ * bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC ->
+ * eret to play_dead ->
+ * bmips_secondary_reentry ->
+ * start_secondary
+ */
+
+ pr_info("SMP: Booting CPU%d...\n", cpu);
+
+ if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ bmips43xx_send_ipi_single(cpu, 0);
+ break;
+ case CPU_BMIPS5000:
+ bmips5000_send_ipi_single(cpu, 0);
+ break;
+ }
+ }
+ else {
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ /* Reset slave TP1 if booting from TP0 */
+ if (cpu_logical_map(cpu) == 1)
+ set_c0_brcm_cmt_ctrl(0x01);
+ break;
+ case CPU_BMIPS5000:
+ if (cpu & 0x01)
+ write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
+ else {
+ /*
+ * core N thread 0 was already booted; just
+ * pulse the NMI line
+ */
+ bmips_write_zscm_reg(0x210, 0xc0000000);
+ udelay(10);
+ bmips_write_zscm_reg(0x210, 0x00);
+ }
+ break;
+ }
+ cpumask_set_cpu(cpu, &bmips_booted_mask);
+ }
+}
+
+/*
+ * Early setup - runs on secondary CPU after cache probe
+ */
+static void bmips_init_secondary(void)
+{
+ /* move NMI vector to kseg0, in case XKS01 is enabled */
+
+ void __iomem *cbr;
+ unsigned long old_vec;
+ unsigned long relo_vector;
+ int boot_cpu;
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ cbr = BMIPS_GET_CBR();
+
+ boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
+ relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
+ BMIPS_RELO_VECTOR_CONTROL_1;
+
+ old_vec = __raw_readl(cbr + relo_vector);
+ __raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
+
+ clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
+ break;
+ case CPU_BMIPS5000:
+ write_c0_brcm_bootvec(read_c0_brcm_bootvec() &
+ (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000));
+
+ write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
+ break;
+ }
+}
+
+/*
+ * Late setup - runs on secondary CPU before entering the idle loop
+ */
+static void bmips_smp_finish(void)
+{
+ pr_info("SMP: CPU%d is running\n", smp_processor_id());
+
+ /* make sure there won't be a timer interrupt for a little while */
+ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+
+ irq_enable_hazard();
+ set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
+ irq_enable_hazard();
+}
+
+/*
+ * BMIPS5000 raceless IPIs
+ *
+ * Each CPU has two inbound SW IRQs which are independent of all other CPUs.
+ * IPI0 is used for SMP_RESCHEDULE_YOURSELF
+ * IPI1 is used for SMP_CALL_FUNCTION
+ */
+
+static void bmips5000_send_ipi_single(int cpu, unsigned int action)
+{
+ write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
+}
+
+static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
+{
+ int action = irq - IPI0_IRQ;
+
+ write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action));
+
+ if (action == 0)
+ scheduler_ipi();
+ else
+ smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static void bmips5000_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ bmips5000_send_ipi_single(i, action);
+}
+
+/*
+ * BMIPS43xx racey IPIs
+ *
+ * We use one inbound SW IRQ for each CPU.
+ *
+ * A spinlock must be held in order to keep CPUx from accidentally clearing
+ * an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The
+ * same spinlock is used to protect the action masks.
+ */
+
+static DEFINE_SPINLOCK(ipi_lock);
+static DEFINE_PER_CPU(int, ipi_action_mask);
+
+static void bmips43xx_send_ipi_single(int cpu, unsigned int action)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipi_lock, flags);
+ set_c0_cause(cpu ? C_SW1 : C_SW0);
+ per_cpu(ipi_action_mask, cpu) |= action;
+ irq_enable_hazard();
+ spin_unlock_irqrestore(&ipi_lock, flags);
+}
+
+static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
+{
+ unsigned long flags;
+ int action, cpu = irq - IPI0_IRQ;
+
+ spin_lock_irqsave(&ipi_lock, flags);
+ action = __get_cpu_var(ipi_action_mask);
+ per_cpu(ipi_action_mask, cpu) = 0;
+ clear_c0_cause(cpu ? C_SW1 : C_SW0);
+ spin_unlock_irqrestore(&ipi_lock, flags);
+
+ if (action & SMP_RESCHEDULE_YOURSELF)
+ scheduler_ipi();
+ if (action & SMP_CALL_FUNCTION)
+ smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static void bmips43xx_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ bmips43xx_send_ipi_single(i, action);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int bmips_cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ return -EBUSY;
+
+ pr_info("SMP: CPU%d is offline\n", cpu);
+
+ set_cpu_online(cpu, false);
+ cpu_clear(cpu, cpu_callin_map);
+
+ local_flush_tlb_all();
+ local_flush_icache_range(0, ~0);
+
+ return 0;
+}
+
+static void bmips_cpu_die(unsigned int cpu)
+{
+}
+
+void __ref play_dead(void)
+{
+ idle_task_exit();
+
+ /* flush data cache */
+ _dma_cache_wback_inv(0, ~0);
+
+ /*
+ * Wakeup is on SW0 or SW1; disable everything else
+ * Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux
+ * IRQ handlers; this clears ST0_IE and returns immediately.
+ */
+ clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
+ change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
+ IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
+ irq_disable_hazard();
+
+ /*
+ * wait for SW interrupt from bmips_boot_secondary(), then jump
+ * back to start_secondary()
+ */
+ __asm__ __volatile__(
+ " wait\n"
+ " j bmips_secondary_reentry\n"
+ : : : "memory");
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+struct plat_smp_ops bmips43xx_smp_ops = {
+ .smp_setup = bmips_smp_setup,
+ .prepare_cpus = bmips_prepare_cpus,
+ .boot_secondary = bmips_boot_secondary,
+ .smp_finish = bmips_smp_finish,
+ .init_secondary = bmips_init_secondary,
+ .send_ipi_single = bmips43xx_send_ipi_single,
+ .send_ipi_mask = bmips43xx_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = bmips_cpu_disable,
+ .cpu_die = bmips_cpu_die,
+#endif
+};
+
+struct plat_smp_ops bmips5000_smp_ops = {
+ .smp_setup = bmips_smp_setup,
+ .prepare_cpus = bmips_prepare_cpus,
+ .boot_secondary = bmips_boot_secondary,
+ .smp_finish = bmips_smp_finish,
+ .init_secondary = bmips_init_secondary,
+ .send_ipi_single = bmips5000_send_ipi_single,
+ .send_ipi_mask = bmips5000_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = bmips_cpu_disable,
+ .cpu_die = bmips_cpu_die,
+#endif
+};
+
+#endif /* CONFIG_SMP */
+
+/***********************************************************************
+ * BMIPS vector relocation
+ * This is primarily used for SMP boot, but it is applicable to some
+ * UP BMIPS systems as well.
+ ***********************************************************************/
+
+static void bmips_wr_vec(unsigned long dst, char *start, char *end)
+{
+ memcpy((void *)dst, start, end - start);
+ dma_cache_wback((unsigned long)start, end - start);
+ local_flush_icache_range(dst, dst + (end - start));
+ instruction_hazard();
+}
+
+static inline void bmips_nmi_handler_setup(void)
+{
+ bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
+ &bmips_reset_nmi_vec_end);
+ bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
+ &bmips_smp_int_vec_end);
+}
+
+void bmips_ebase_setup(void)
+{
+ unsigned long new_ebase = ebase;
+ void __iomem __maybe_unused *cbr;
+
+ BUG_ON(ebase != CKSEG0);
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ /*
+ * BMIPS4350 cannot relocate the normal vectors, but it
+ * can relocate the BEV=1 vectors. So CPU1 starts up at
+ * the relocated BEV=1, IV=0 general exception vector @
+ * 0xa000_0380.
+ *
+ * set_uncached_handler() is used here because:
+ * - CPU1 will run this from uncached space
+ * - None of the cacheflush functions are set up yet
+ */
+ set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0,
+ &bmips_smp_int_vec, 0x80);
+ __sync();
+ return;
+ case CPU_BMIPS4380:
+ /*
+ * 0x8000_0000: reset/NMI (initially in kseg1)
+ * 0x8000_0400: normal vectors
+ */
+ new_ebase = 0x80000400;
+ cbr = BMIPS_GET_CBR();
+ __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
+ __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+ break;
+ case CPU_BMIPS5000:
+ /*
+ * 0x8000_0000: reset/NMI (initially in kseg1)
+ * 0x8000_1000: normal vectors
+ */
+ new_ebase = 0x80001000;
+ write_c0_brcm_bootvec(0xa0088008);
+ write_c0_ebase(new_ebase);
+ if (max_cpus > 2)
+ bmips_write_zscm_reg(0xa0, 0xa008a008);
+ break;
+ default:
+ return;
+ }
+
+ board_nmi_handler_setup = &bmips_nmi_handler_setup;
+ ebase = new_ebase;
+}
+
+asmlinkage void __weak plat_wired_tlb_setup(void)
+{
+ /*
+ * Called when starting/restarting a secondary CPU.
+ * Kernel stacks and other important data might only be accessible
+ * once the wired entries are present.
+ */
+}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
new file mode 100644
index 00000000000..fc8a5155342
--- /dev/null
+++ b/arch/mips/kernel/smp-cmp.c
@@ -0,0 +1,159 @@
+/*
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Chris Dearman (chris@mips.com)
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <linux/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/amon.h>
+#include <asm/gic.h>
+
+static void cmp_init_secondary(void)
+{
+ struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
+
+ /* Assume GIC is present */
+ change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
+ STATUSF_IP7);
+
+ /* Enable per-cpu interrupts: platform specific */
+
+#ifdef CONFIG_MIPS_MT_SMP
+ if (cpu_has_mipsmt)
+ c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
+ TCBIND_CURVPE;
+#endif
+}
+
+static void cmp_smp_finish(void)
+{
+ pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+ /* CDFIXME: remove this? */
+ write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpu_set(smp_processor_id(), mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ local_irq_enable();
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it running
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ */
+static void cmp_boot_secondary(int cpu, struct task_struct *idle)
+{
+ struct thread_info *gp = task_thread_info(idle);
+ unsigned long sp = __KSTK_TOS(idle);
+ unsigned long pc = (unsigned long)&smp_bootstrap;
+ unsigned long a0 = 0;
+
+ pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
+ __func__, cpu);
+
+#if 0
+ /* Needed? */
+ flush_icache_range((unsigned long)gp,
+ (unsigned long)(gp + sizeof(struct thread_info)));
+#endif
+
+ amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0);
+}
+
+/*
+ * Common setup before any secondaries are started
+ */
+void __init cmp_smp_setup(void)
+{
+ int i;
+ int ncpu = 0;
+
+ pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpu_set(0, mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ for (i = 1; i < NR_CPUS; i++) {
+ if (amon_cpu_avail(i)) {
+ set_cpu_possible(i, true);
+ __cpu_number_map[i] = ++ncpu;
+ __cpu_logical_map[ncpu] = i;
+ }
+ }
+
+ if (cpu_has_mipsmt) {
+ unsigned int nvpe = 1;
+#ifdef CONFIG_MIPS_MT_SMP
+ unsigned int mvpconf0 = read_c0_mvpconf0();
+
+ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+#endif
+ smp_num_siblings = nvpe;
+ }
+ pr_info("Detected %i available secondary CPU(s)\n", ncpu);
+}
+
+void __init cmp_prepare_cpus(unsigned int max_cpus)
+{
+ pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
+ smp_processor_id(), __func__, max_cpus);
+
+#ifdef CONFIG_MIPS_MT
+ /*
+ * FIXME: some of these options are per-system, some per-core and
+ * some per-cpu
+ */
+ mips_mt_set_cpuoptions();
+#endif
+
+}
+
+struct plat_smp_ops cmp_smp_ops = {
+ .send_ipi_single = gic_send_ipi_single,
+ .send_ipi_mask = gic_send_ipi_mask,
+ .init_secondary = cmp_init_secondary,
+ .smp_finish = cmp_smp_finish,
+ .boot_secondary = cmp_boot_secondary,
+ .smp_setup = cmp_smp_setup,
+ .prepare_cpus = cmp_prepare_cpus,
+};
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
new file mode 100644
index 00000000000..949f2c6827a
--- /dev/null
+++ b/arch/mips/kernel/smp-cps.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/gic.h>
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+#include <asm/mips_mt.h>
+#include <asm/mipsregs.h>
+#include <asm/pm-cps.h>
+#include <asm/smp-cps.h>
+#include <asm/time.h>
+#include <asm/uasm.h>
+
+static DECLARE_BITMAP(core_power, NR_CPUS);
+
+struct core_boot_config *mips_cps_core_bootcfg;
+
+static unsigned core_vpe_count(unsigned core)
+{
+ unsigned cfg;
+
+ if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
+ return 1;
+
+ write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
+ cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
+ return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
+}
+
+static void __init cps_smp_setup(void)
+{
+ unsigned int ncores, nvpes, core_vpes;
+ int c, v;
+
+ /* Detect & record VPE topology */
+ ncores = mips_cm_numcores();
+ pr_info("VPE topology ");
+ for (c = nvpes = 0; c < ncores; c++) {
+ core_vpes = core_vpe_count(c);
+ pr_cont("%c%u", c ? ',' : '{', core_vpes);
+
+ /* Use the number of VPEs in core 0 for smp_num_siblings */
+ if (!c)
+ smp_num_siblings = core_vpes;
+
+ for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
+ cpu_data[nvpes + v].core = c;
+#ifdef CONFIG_MIPS_MT_SMP
+ cpu_data[nvpes + v].vpe_id = v;
+#endif
+ }
+
+ nvpes += core_vpes;
+ }
+ pr_cont("} total %u\n", nvpes);
+
+ /* Indicate present CPUs (CPU being synonymous with VPE) */
+ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
+ set_cpu_possible(v, true);
+ set_cpu_present(v, true);
+ __cpu_number_map[v] = v;
+ __cpu_logical_map[v] = v;
+ }
+
+ /* Set a coherent default CCA (CWB) */
+ change_c0_config(CONF_CM_CMASK, 0x5);
+
+ /* Core 0 is powered up (we're running on it) */
+ bitmap_set(core_power, 0, 1);
+
+ /* Initialise core 0 */
+ mips_cps_core_init();
+
+ /* Make core 0 coherent with everything */
+ write_gcr_cl_coherence(0xff);
+}
+
+static void __init cps_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned ncores, core_vpes, c, cca;
+ bool cca_unsuitable;
+ u32 *entry_code;
+
+ mips_mt_set_cpuoptions();
+
+ /* Detect whether the CCA is unsuited to multi-core SMP */
+ cca = read_c0_config() & CONF_CM_CMASK;
+ switch (cca) {
+ case 0x4: /* CWBE */
+ case 0x5: /* CWB */
+ /* The CCA is coherent, multi-core is fine */
+ cca_unsuitable = false;
+ break;
+
+ default:
+ /* CCA is not coherent, multi-core is not usable */
+ cca_unsuitable = true;
+ }
+
+ /* Warn the user if the CCA prevents multi-core */
+ ncores = mips_cm_numcores();
+ if (cca_unsuitable && ncores > 1) {
+ pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
+ cca);
+
+ for_each_present_cpu(c) {
+ if (cpu_data[c].core)
+ set_cpu_present(c, false);
+ }
+ }
+
+ /*
+ * Patch the start of mips_cps_core_entry to provide:
+ *
+ * v0 = CM base address
+ * s0 = kseg0 CCA
+ */
+ entry_code = (u32 *)&mips_cps_core_entry;
+ UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
+ uasm_i_addiu(&entry_code, 16, 0, cca);
+ dma_cache_wback_inv((unsigned long)&mips_cps_core_entry,
+ (void *)entry_code - (void *)&mips_cps_core_entry);
+
+ /* Allocate core boot configuration structs */
+ mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
+ GFP_KERNEL);
+ if (!mips_cps_core_bootcfg) {
+ pr_err("Failed to allocate boot config for %u cores\n", ncores);
+ goto err_out;
+ }
+
+ /* Allocate VPE boot configuration structs */
+ for (c = 0; c < ncores; c++) {
+ core_vpes = core_vpe_count(c);
+ mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
+ sizeof(*mips_cps_core_bootcfg[c].vpe_config),
+ GFP_KERNEL);
+ if (!mips_cps_core_bootcfg[c].vpe_config) {
+ pr_err("Failed to allocate %u VPE boot configs\n",
+ core_vpes);
+ goto err_out;
+ }
+ }
+
+ /* Mark this CPU as booted */
+ atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
+ 1 << cpu_vpe_id(&current_cpu_data));
+
+ return;
+err_out:
+ /* Clean up allocations */
+ if (mips_cps_core_bootcfg) {
+ for (c = 0; c < ncores; c++)
+ kfree(mips_cps_core_bootcfg[c].vpe_config);
+ kfree(mips_cps_core_bootcfg);
+ mips_cps_core_bootcfg = NULL;
+ }
+
+ /* Effectively disable SMP by declaring CPUs not present */
+ for_each_possible_cpu(c) {
+ if (c == 0)
+ continue;
+ set_cpu_present(c, false);
+ }
+}
+
+static void boot_core(unsigned core)
+{
+ u32 access;
+
+ /* Select the appropriate core */
+ write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF);
+
+ /* Set its reset vector */
+ write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
+
+ /* Ensure its coherency is disabled */
+ write_gcr_co_coherence(0);
+
+ /* Ensure the core can access the GCRs */
+ access = read_gcr_access();
+ access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
+ write_gcr_access(access);
+
+ if (mips_cpc_present()) {
+ /* Reset the core */
+ mips_cpc_lock_other(core);
+ write_cpc_co_cmd(CPC_Cx_CMD_RESET);
+ mips_cpc_unlock_other();
+ } else {
+ /* Take the core out of reset */
+ write_gcr_co_reset_release(0);
+ }
+
+ /* The core is now powered up */
+ bitmap_set(core_power, core, 1);
+}
+
+static void remote_vpe_boot(void *dummy)
+{
+ mips_cps_boot_vpes();
+}
+
+static void cps_boot_secondary(int cpu, struct task_struct *idle)
+{
+ unsigned core = cpu_data[cpu].core;
+ unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+ struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
+ unsigned int remote;
+ int err;
+
+ vpe_cfg->pc = (unsigned long)&smp_bootstrap;
+ vpe_cfg->sp = __KSTK_TOS(idle);
+ vpe_cfg->gp = (unsigned long)task_thread_info(idle);
+
+ atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
+
+ preempt_disable();
+
+ if (!test_bit(core, core_power)) {
+ /* Boot a VPE on a powered down core */
+ boot_core(core);
+ goto out;
+ }
+
+ if (core != current_cpu_data.core) {
+ /* Boot a VPE on another powered up core */
+ for (remote = 0; remote < NR_CPUS; remote++) {
+ if (cpu_data[remote].core != core)
+ continue;
+ if (cpu_online(remote))
+ break;
+ }
+ BUG_ON(remote >= NR_CPUS);
+
+ err = smp_call_function_single(remote, remote_vpe_boot,
+ NULL, 1);
+ if (err)
+ panic("Failed to call remote CPU\n");
+ goto out;
+ }
+
+ BUG_ON(!cpu_has_mipsmt);
+
+ /* Boot a VPE on this core */
+ mips_cps_boot_vpes();
+out:
+ preempt_enable();
+}
+
+static void cps_init_secondary(void)
+{
+ /* Disable MT - we only want to run 1 TC per VPE */
+ if (cpu_has_mipsmt)
+ dmt();
+
+ change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
+ STATUSF_IP6 | STATUSF_IP7);
+}
+
+static void cps_smp_finish(void)
+{
+ write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpu_set(smp_processor_id(), mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ local_irq_enable();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int cps_cpu_disable(void)
+{
+ unsigned cpu = smp_processor_id();
+ struct core_boot_config *core_cfg;
+
+ if (!cpu)
+ return -EBUSY;
+
+ if (!cps_pm_support_state(CPS_PM_POWER_GATED))
+ return -EINVAL;
+
+ core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
+ atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
+ smp_mb__after_atomic();
+ set_cpu_online(cpu, false);
+ cpu_clear(cpu, cpu_callin_map);
+
+ return 0;
+}
+
+static DECLARE_COMPLETION(cpu_death_chosen);
+static unsigned cpu_death_sibling;
+static enum {
+ CPU_DEATH_HALT,
+ CPU_DEATH_POWER,
+} cpu_death;
+
+void play_dead(void)
+{
+ unsigned cpu, core;
+
+ local_irq_disable();
+ idle_task_exit();
+ cpu = smp_processor_id();
+ cpu_death = CPU_DEATH_POWER;
+
+ if (cpu_has_mipsmt) {
+ core = cpu_data[cpu].core;
+
+ /* Look for another online VPE within the core */
+ for_each_online_cpu(cpu_death_sibling) {
+ if (cpu_data[cpu_death_sibling].core != core)
+ continue;
+
+ /*
+ * There is an online VPE within the core. Just halt
+ * this TC and leave the core alone.
+ */
+ cpu_death = CPU_DEATH_HALT;
+ break;
+ }
+ }
+
+ /* This CPU has chosen its way out */
+ complete(&cpu_death_chosen);
+
+ if (cpu_death == CPU_DEATH_HALT) {
+ /* Halt this TC */
+ write_c0_tchalt(TCHALT_H);
+ instruction_hazard();
+ } else {
+ /* Power down the core */
+ cps_pm_enter_state(CPS_PM_POWER_GATED);
+ }
+
+ /* This should never be reached */
+ panic("Failed to offline CPU %u", cpu);
+}
+
+static void wait_for_sibling_halt(void *ptr_cpu)
+{
+ unsigned cpu = (unsigned)ptr_cpu;
+ unsigned vpe_id = cpu_data[cpu].vpe_id;
+ unsigned halted;
+ unsigned long flags;
+
+ do {
+ local_irq_save(flags);
+ settc(vpe_id);
+ halted = read_tc_c0_tchalt();
+ local_irq_restore(flags);
+ } while (!(halted & TCHALT_H));
+}
+
+static void cps_cpu_die(unsigned int cpu)
+{
+ unsigned core = cpu_data[cpu].core;
+ unsigned stat;
+ int err;
+
+ /* Wait for the cpu to choose its way out */
+ if (!wait_for_completion_timeout(&cpu_death_chosen,
+ msecs_to_jiffies(5000))) {
+ pr_err("CPU%u: didn't offline\n", cpu);
+ return;
+ }
+
+ /*
+ * Now wait for the CPU to actually offline. Without doing this that
+ * offlining may race with one or more of:
+ *
+ * - Onlining the CPU again.
+ * - Powering down the core if another VPE within it is offlined.
+ * - A sibling VPE entering a non-coherent state.
+ *
+ * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
+ * with which we could race, so do nothing.
+ */
+ if (cpu_death == CPU_DEATH_POWER) {
+ /*
+ * Wait for the core to enter a powered down or clock gated
+ * state, the latter happening when a JTAG probe is connected
+ * in which case the CPC will refuse to power down the core.
+ */
+ do {
+ mips_cpc_lock_other(core);
+ stat = read_cpc_co_stat_conf();
+ stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
+ mips_cpc_unlock_other();
+ } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
+ stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
+ stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
+
+ /* Indicate the core is powered off */
+ bitmap_clear(core_power, core, 1);
+ } else if (cpu_has_mipsmt) {
+ /*
+ * Have a CPU with access to the offlined CPUs registers wait
+ * for its TC to halt.
+ */
+ err = smp_call_function_single(cpu_death_sibling,
+ wait_for_sibling_halt,
+ (void *)cpu, 1);
+ if (err)
+ panic("Failed to call remote sibling CPU\n");
+ }
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static struct plat_smp_ops cps_smp_ops = {
+ .smp_setup = cps_smp_setup,
+ .prepare_cpus = cps_prepare_cpus,
+ .boot_secondary = cps_boot_secondary,
+ .init_secondary = cps_init_secondary,
+ .smp_finish = cps_smp_finish,
+ .send_ipi_single = gic_send_ipi_single,
+ .send_ipi_mask = gic_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = cps_cpu_disable,
+ .cpu_die = cps_cpu_die,
+#endif
+};
+
+bool mips_cps_smp_in_use(void)
+{
+ extern struct plat_smp_ops *mp_ops;
+ return mp_ops == &cps_smp_ops;
+}
+
+int register_cps_smp_ops(void)
+{
+ if (!mips_cm_present()) {
+ pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
+ return -ENODEV;
+ }
+
+ /* check we have a GIC - we need one for IPIs */
+ if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
+ pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
+ return -ENODEV;
+ }
+
+ register_smp_ops(&cps_smp_ops);
+ return 0;
+}
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
new file mode 100644
index 00000000000..3b21a96d1cc
--- /dev/null
+++ b/arch/mips/kernel/smp-gic.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * Based on smp-cmp.c:
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Author: Chris Dearman (chris@mips.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/printk.h>
+
+#include <asm/gic.h>
+#include <asm/mips-cpc.h>
+#include <asm/smp-ops.h>
+
+void gic_send_ipi_single(int cpu, unsigned int action)
+{
+ unsigned long flags;
+ unsigned int intr;
+ unsigned int core = cpu_data[cpu].core;
+
+ pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
+ smp_processor_id(), __func__, cpu, action, read_c0_status());
+
+ local_irq_save(flags);
+
+ switch (action) {
+ case SMP_CALL_FUNCTION:
+ intr = plat_ipi_call_int_xlate(cpu);
+ break;
+
+ case SMP_RESCHEDULE_YOURSELF:
+ intr = plat_ipi_resched_int_xlate(cpu);
+ break;
+
+ default:
+ BUG();
+ }
+
+ gic_send_ipi(intr);
+
+ if (mips_cpc_present() && (core != current_cpu_data.core)) {
+ while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
+ mips_cpc_lock_other(core);
+ write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
+ mips_cpc_unlock_other();
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ gic_send_ipi_single(i, action);
+}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 89e6f6aa516..3babf6e4f89 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -24,122 +24,19 @@
#include <linux/compiler.h>
#include <linux/smp.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
+#include <asm/gic.h>
-#define MIPS_CPU_IPI_RESCHED_IRQ 0
-#define MIPS_CPU_IPI_CALL_IRQ 1
-
-static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
-
-#if 0
-static void dump_mtregisters(int vpe, int tc)
-{
- printk("vpe %d tc %d\n", vpe, tc);
-
- settc(tc);
-
- printk(" c0 status 0x%lx\n", read_vpe_c0_status());
- printk(" vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
- printk(" vpeconf0 0x%lx\n", read_vpe_c0_vpeconf0());
- printk(" tcstatus 0x%lx\n", read_tc_c0_tcstatus());
- printk(" tcrestart 0x%lx\n", read_tc_c0_tcrestart());
- printk(" tcbind 0x%lx\n", read_tc_c0_tcbind());
- printk(" tchalt 0x%lx\n", read_tc_c0_tchalt());
-}
-#endif
-
-void __init sanitize_tlb_entries(void)
-{
- int i, tlbsiz;
- unsigned long mvpconf0, ncpu;
-
- if (!cpu_has_mipsmt)
- return;
-
- /* Enable VPC */
- set_c0_mvpcontrol(MVPCONTROL_VPC);
-
- back_to_back_c0_hazard();
-
- /* Disable TLB sharing */
- clear_c0_mvpcontrol(MVPCONTROL_STLB);
-
- mvpconf0 = read_c0_mvpconf0();
-
- printk(KERN_INFO "MVPConf0 0x%lx TLBS %lx PTLBE %ld\n", mvpconf0,
- (mvpconf0 & MVPCONF0_TLBS) >> MVPCONF0_TLBS_SHIFT,
- (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT);
-
- tlbsiz = (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT;
- ncpu = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
-
- printk(" tlbsiz %d ncpu %ld\n", tlbsiz, ncpu);
-
- if (tlbsiz > 0) {
- /* share them out across the vpe's */
- tlbsiz /= ncpu;
-
- printk(KERN_INFO "setting Config1.MMU_size to %d\n", tlbsiz);
-
- for (i = 0; i < ncpu; i++) {
- settc(i);
-
- if (i == 0)
- write_c0_config1((read_c0_config1() & ~(0x3f << 25)) | (tlbsiz << 25));
- else
- write_vpe_c0_config1((read_vpe_c0_config1() & ~(0x3f << 25)) |
- (tlbsiz << 25));
- }
- }
-
- clear_c0_mvpcontrol(MVPCONTROL_VPC);
-}
-
-static void ipi_resched_dispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
-}
-
-static void ipi_call_dispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
-}
-
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
- smp_call_function_interrupt();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
- .handler = ipi_resched_interrupt,
- .flags = IRQF_DISABLED|IRQF_PERCPU,
- .name = "IPI_resched"
-};
-
-static struct irqaction irq_call = {
- .handler = ipi_call_interrupt,
- .flags = IRQF_DISABLED|IRQF_PERCPU,
- .name = "IPI_call"
-};
-
-static void __init smp_copy_vpe_config(void)
+static void __init smvp_copy_vpe_config(void)
{
write_vpe_c0_status(
(read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
@@ -156,7 +53,7 @@ static void __init smp_copy_vpe_config(void)
write_vpe_c0_count(read_c0_count());
}
-static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
+static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
unsigned int ncpu)
{
if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
@@ -173,21 +70,22 @@ static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
write_vpe_c0_vpeconf0(tmp);
/* Record this as available CPU */
- cpu_set(tc, phys_cpu_present_map);
+ set_cpu_possible(tc, true);
+ set_cpu_present(tc, true);
__cpu_number_map[tc] = ++ncpu;
- __cpu_logical_map[ncpu] = tc;
+ __cpu_logical_map[ncpu] = tc;
}
/* Disable multi-threading with TC's */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
if (tc != 0)
- smp_copy_vpe_config();
+ smvp_copy_vpe_config();
return ncpu;
}
-static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
+static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
{
unsigned long tmp;
@@ -221,9 +119,15 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
unsigned long flags;
int vpflags;
+#ifdef CONFIG_IRQ_GIC
+ if (gic_present) {
+ gic_send_ipi_single(cpu, action);
+ return;
+ }
+#endif
local_irq_save(flags);
- vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
+ vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
switch (action) {
case SMP_CALL_FUNCTION:
@@ -244,25 +148,30 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
-static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
vsmp_send_ipi_single(i, action);
}
-static void __cpuinit vsmp_init_secondary(void)
+static void vsmp_init_secondary(void)
{
- /* Enable per-cpu interrupts */
-
- /* This is Malta specific: IPI,performance and timer inetrrupts */
- write_c0_status((read_c0_status() & ~ST0_IM ) |
- (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
+#ifdef CONFIG_IRQ_GIC
+ /* This is Malta specific: IPI,performance and timer interrupts */
+ if (gic_present)
+ change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
+ STATUSF_IP6 | STATUSF_IP7);
+ else
+#endif
+ change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
+ STATUSF_IP6 | STATUSF_IP7);
}
-static void __cpuinit vsmp_smp_finish(void)
+static void vsmp_smp_finish(void)
{
+ /* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
@@ -274,10 +183,6 @@ static void __cpuinit vsmp_smp_finish(void)
local_irq_enable();
}
-static void vsmp_cpus_done(void)
-{
-}
-
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
@@ -286,7 +191,7 @@ static void vsmp_cpus_done(void)
* (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE
*/
-static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
+static void vsmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
dvpe();
@@ -312,7 +217,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
write_tc_gpr_gp((unsigned long)gp);
flush_icache_range((unsigned long)gp,
- (unsigned long)(gp + sizeof(struct thread_info)));
+ (unsigned long)(gp + sizeof(struct thread_info)));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -323,7 +228,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
/*
* Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the
- * secondarys
+ * secondaries
*/
static void __init vsmp_smp_setup(void)
{
@@ -356,8 +261,8 @@ static void __init vsmp_smp_setup(void)
for (tc = 0; tc <= ntc; tc++) {
settc(tc);
- smp_tc_init(tc, mvpconf0);
- ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
+ smvp_tc_init(tc, mvpconf0);
+ ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
}
/* Release config state */
@@ -371,21 +276,6 @@ static void __init vsmp_smp_setup(void)
static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{
mips_mt_set_cpuoptions();
-
- /* set up ipi interrupts */
- if (cpu_has_vint) {
- set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
- set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
- }
-
- cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
- cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
-
- setup_irq(cpu_ipi_resched_irq, &irq_resched);
- setup_irq(cpu_ipi_call_irq, &irq_call);
-
- set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
- set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
}
struct plat_smp_ops vsmp_smp_ops = {
@@ -393,8 +283,29 @@ struct plat_smp_ops vsmp_smp_ops = {
.send_ipi_mask = vsmp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
.smp_finish = vsmp_smp_finish,
- .cpus_done = vsmp_cpus_done,
.boot_secondary = vsmp_boot_secondary,
.smp_setup = vsmp_smp_setup,
.prepare_cpus = vsmp_prepare_cpus,
};
+
+static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
+ unsigned long action_unused, void *data)
+{
+ struct proc_cpuinfo_notifier_args *pcn = data;
+ struct seq_file *m = pcn->m;
+ unsigned long n = pcn->n;
+
+ if (!cpu_has_mipsmt)
+ return NOTIFY_OK;
+
+ seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+
+ return NOTIFY_OK;
+}
+
+static int __init proc_cpuinfo_notifier_init(void)
+{
+ return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
+}
+
+subsys_initcall(proc_cpuinfo_notifier_init);
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index ead6c30eeb1..17878d71ef2 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -13,12 +13,13 @@
/*
* Send inter-processor interrupt
*/
-void up_send_ipi_single(int cpu, unsigned int action)
+static void up_send_ipi_single(int cpu, unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
-static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void up_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
@@ -27,41 +28,51 @@ static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
-void __cpuinit up_init_secondary(void)
+static void up_init_secondary(void)
{
}
-void __cpuinit up_smp_finish(void)
-{
-}
-
-/* Hook for after all CPUs are online */
-void up_cpus_done(void)
+static void up_smp_finish(void)
{
}
/*
* Firmware CPU startup hook
*/
-void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle)
+static void up_boot_secondary(int cpu, struct task_struct *idle)
+{
+}
+
+static void __init up_smp_setup(void)
+{
+}
+
+static void __init up_prepare_cpus(unsigned int max_cpus)
{
}
-void __init up_smp_setup(void)
+#ifdef CONFIG_HOTPLUG_CPU
+static int up_cpu_disable(void)
{
+ return -ENOSYS;
}
-void __init up_prepare_cpus(unsigned int max_cpus)
+static void up_cpu_die(unsigned int cpu)
{
+ BUG();
}
+#endif
struct plat_smp_ops up_smp_ops = {
.send_ipi_single = up_send_ipi_single,
.send_ipi_mask = up_send_ipi_mask,
.init_secondary = up_init_secondary,
.smp_finish = up_smp_finish,
- .cpus_done = up_cpus_done,
.boot_secondary = up_boot_secondary,
.smp_setup = up_smp_setup,
.prepare_cpus = up_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = up_cpu_disable,
+ .cpu_die = up_cpu_die,
+#endif
};
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9d41dab90a8..9bad52ede90 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/module.h>
@@ -31,28 +32,24 @@
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/err.h>
+#include <linux/ftrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
-#include <asm/system.h>
+#include <asm/idle.h>
+#include <asm/r4k-timer.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
+#include <asm/setup.h>
-#ifdef CONFIG_MIPS_MT_SMTC
-#include <asm/mipsmtregs.h>
-#endif /* CONFIG_MIPS_MT_SMTC */
-
-cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
-cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
-int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
-int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
-EXPORT_SYMBOL(phys_cpu_present_map);
-EXPORT_SYMBOL(cpu_online_map);
+int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
+EXPORT_SYMBOL(__cpu_number_map);
-extern void cpu_idle(void);
+int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
+EXPORT_SYMBOL(__cpu_logical_map);
/* Number of TCs (or siblings in Intel speak) per CPU core */
int smp_num_siblings = 1;
@@ -65,6 +62,8 @@ EXPORT_SYMBOL(cpu_sibling_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
+cpumask_t cpu_coherent_mask;
+
static inline void set_cpu_sibling_map(int cpu)
{
int i;
@@ -83,11 +82,12 @@ static inline void set_cpu_sibling_map(int cpu)
}
struct plat_smp_ops *mp_ops;
+EXPORT_SYMBOL(mp_ops);
-__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
+void register_smp_ops(struct plat_smp_ops *ops)
{
- if (ops)
- printk(KERN_WARNING "Overriding previous set SMP ops\n");
+ if (mp_ops)
+ printk(KERN_WARNING "Overriding previously set SMP ops\n");
mp_ops = ops;
}
@@ -96,17 +96,13 @@ __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
* First C code run on the secondary CPUs after being started up by
* the master.
*/
-asmlinkage __cpuinit void start_secondary(void)
+asmlinkage void start_secondary(void)
{
unsigned int cpu;
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Only do cpu_probe for first TC of CPU */
- if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
-#endif /* CONFIG_MIPS_MT_SMTC */
cpu_probe();
cpu_report();
- per_cpu_trap_init();
+ per_cpu_trap_init(false);
mips_clockevent_init();
mp_ops->init_secondary();
@@ -120,153 +116,35 @@ asmlinkage __cpuinit void start_secondary(void)
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
- mp_ops->smp_finish();
- set_cpu_sibling_map(cpu);
+ cpu_set(cpu, cpu_coherent_mask);
+ notify_cpu_starting(cpu);
- cpu_set(cpu, cpu_callin_map);
+ set_cpu_online(cpu, true);
- cpu_idle();
-}
+ set_cpu_sibling_map(cpu);
-DEFINE_SPINLOCK(smp_call_lock);
+ cpu_set(cpu, cpu_callin_map);
-struct call_data_struct *call_data;
-
-/*
- * Run a function on all other CPUs.
- *
- * <mask> cpuset_t of all processors to run the function on.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <retry> If true, keep retrying until ready.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or are or have executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler:
- *
- * CPU A CPU B
- * Disable interrupts
- * smp_call_function()
- * Take call_lock
- * Send IPIs
- * Wait for all cpus to acknowledge IPI
- * CPU A has not responded, spin waiting
- * for cpu A to respond, holding call_lock
- * smp_call_function()
- * Spin waiting for call_lock
- * Deadlock Deadlock
- */
-int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
- void *info, int retry, int wait)
-{
- struct call_data_struct data;
- int cpu = smp_processor_id();
- int cpus;
+ synchronise_count_slave(cpu);
/*
- * Can die spectacularly if this CPU isn't yet marked online
+ * irq will be enabled in ->smp_finish(), enabling it too early
+ * is dangerous.
*/
- BUG_ON(!cpu_online(cpu));
-
- cpu_clear(cpu, mask);
- cpus = cpus_weight(mask);
- if (!cpus)
- return 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock(&smp_call_lock);
- call_data = &data;
- smp_mb();
-
- /* Send a message to all other CPUs and wait for them to respond */
- mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
-
- /* Wait for response */
- /* FIXME: lock-up detection, backtrace on lock-up */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- barrier();
- call_data = NULL;
- spin_unlock(&smp_call_lock);
-
- return 0;
-}
+ WARN_ON_ONCE(!irqs_disabled());
+ mp_ops->smp_finish();
-int smp_call_function(void (*func) (void *info), void *info, int retry,
- int wait)
-{
- return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
+ cpu_startup_entry(CPUHP_ONLINE);
}
-void smp_call_function_interrupt(void)
+/*
+ * Call into both interrupt handlers, as we share the IPI for them
+ */
+void __irq_entry smp_call_function_interrupt(void)
{
- void (*func) (void *info) = call_data->func;
- void *info = call_data->info;
- int wait = call_data->wait;
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function.
- */
- smp_mb();
- atomic_inc(&call_data->started);
-
- /*
- * At this point the info structure may be out of scope unless wait==1.
- */
irq_enter();
- (*func)(info);
+ generic_smp_call_function_interrupt();
irq_exit();
-
- if (wait) {
- smp_mb();
- atomic_inc(&call_data->finished);
- }
-}
-
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int retry, int wait)
-{
- int ret, me;
-
- /*
- * Can die spectacularly if this CPU isn't yet marked online
- */
- if (!cpu_online(cpu))
- return 0;
-
- me = get_cpu();
- BUG_ON(!cpu_online(me));
-
- if (cpu == me) {
- local_irq_disable();
- func(info);
- local_irq_enable();
- put_cpu();
- return 0;
- }
-
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
- wait);
-
- put_cpu();
- return 0;
}
static void stop_this_cpu(void *dummy)
@@ -274,19 +152,20 @@ static void stop_this_cpu(void *dummy)
/*
* Remove this CPU:
*/
- cpu_clear(smp_processor_id(), cpu_online_map);
- local_irq_enable(); /* May need to service _machine_restart IPI */
- for (;;); /* Wait if available. */
+ set_cpu_online(smp_processor_id(), false);
+ for (;;) {
+ if (cpu_wait)
+ (*cpu_wait)(); /* Wait if available. */
+ }
}
void smp_send_stop(void)
{
- smp_call_function(stop_this_cpu, NULL, 1, 0);
+ smp_call_function(stop_this_cpu, NULL, 0);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
- mp_ops->cpus_done();
}
/* called from main before smp_init() */
@@ -297,43 +176,22 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU
- cpu_present_map = cpu_possible_map;
+ init_cpu_present(cpu_possible_mask);
#endif
+ cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
}
/* preload SMP state for boot cpu */
-void __devinit smp_prepare_boot_cpu(void)
+void smp_prepare_boot_cpu(void)
{
- /*
- * This assumes that bootup is always handled by the processor
- * with the logic and physical number 0.
- */
- __cpu_number_map[0] = 0;
- __cpu_logical_map[0] = 0;
- cpu_set(0, phys_cpu_present_map);
- cpu_set(0, cpu_online_map);
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
cpu_set(0, cpu_callin_map);
}
-/*
- * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
- * and keep control until "cpu_online(cpu)" is set. Note: cpu is
- * physical, not logical.
- */
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- struct task_struct *idle;
-
- /*
- * Processor goes to start_secondary(), sets online flag
- * The following code is purely to make sure
- * Linux can schedule processes on this slave.
- */
- idle = fork_idle(cpu);
- if (IS_ERR(idle))
- panic(KERN_ERR "Fork failed for CPU %d", cpu);
-
- mp_ops->boot_secondary(cpu, idle);
+ mp_ops->boot_secondary(cpu, tidle);
/*
* Trust is futile. We should really have timeouts ...
@@ -341,8 +199,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
while (!cpu_isset(cpu, cpu_callin_map))
udelay(100);
- cpu_set(cpu, cpu_online_map);
-
+ synchronise_count_master(cpu);
return 0;
}
@@ -359,7 +216,7 @@ static void flush_tlb_all_ipi(void *info)
void flush_tlb_all(void)
{
- on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1);
+ on_each_cpu(flush_tlb_all_ipi, NULL, 1);
}
static void flush_tlb_mm_ipi(void *mm)
@@ -374,13 +231,10 @@ static void flush_tlb_mm_ipi(void *mm)
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache.
- * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
*/
static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
{
-#ifndef CONFIG_MIPS_MT_SMTC
- smp_call_function(func, info, 1, 1);
-#endif
+ smp_call_function(func, info, 1);
}
static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
@@ -413,13 +267,12 @@ void flush_tlb_mm(struct mm_struct *mm)
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
- cpumask_t mask = cpu_online_map;
unsigned int cpu;
- cpu_clear(smp_processor_id(), mask);
- for_each_cpu_mask(cpu, mask)
- if (cpu_context(cpu, mm))
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
+ }
}
local_flush_tlb_mm(mm);
@@ -453,13 +306,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else {
- cpumask_t mask = cpu_online_map;
unsigned int cpu;
- cpu_clear(smp_processor_id(), mask);
- for_each_cpu_mask(cpu, mask)
- if (cpu_context(cpu, mm))
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
+ }
}
local_flush_tlb_range(vma, start, end);
preempt_enable();
@@ -479,7 +331,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
.addr2 = end,
};
- on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1);
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
}
static void flush_tlb_page_ipi(void *info)
@@ -500,13 +352,12 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
} else {
- cpumask_t mask = cpu_online_map;
unsigned int cpu;
- cpu_clear(smp_processor_id(), mask);
- for_each_cpu_mask(cpu, mask)
- if (cpu_context(cpu, vma->vm_mm))
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
cpu_context(cpu, vma->vm_mm) = 0;
+ }
}
local_flush_tlb_page(vma, page);
preempt_enable();
@@ -526,3 +377,63 @@ void flush_tlb_one(unsigned long vaddr)
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(flush_tlb_one);
+
+#if defined(CONFIG_KEXEC)
+void (*dump_ipi_function_ptr)(void *) = NULL;
+void dump_send_ipi(void (*dump_ipi_callback)(void *))
+{
+ int i;
+ int cpu = smp_processor_id();
+
+ dump_ipi_function_ptr = dump_ipi_callback;
+ smp_mb();
+ for_each_online_cpu(i)
+ if (i != cpu)
+ mp_ops->send_ipi_single(i, SMP_DUMP);
+
+}
+EXPORT_SYMBOL(dump_send_ipi);
+#endif
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+
+static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
+static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
+
+void tick_broadcast(const struct cpumask *mask)
+{
+ atomic_t *count;
+ struct call_single_data *csd;
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ count = &per_cpu(tick_broadcast_count, cpu);
+ csd = &per_cpu(tick_broadcast_csd, cpu);
+
+ if (atomic_inc_return(count) == 1)
+ smp_call_function_single_async(cpu, csd);
+ }
+}
+
+static void tick_broadcast_callee(void *info)
+{
+ int cpu = smp_processor_id();
+ tick_receive_broadcast();
+ atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
+}
+
+static int __init tick_broadcast_init(void)
+{
+ struct call_single_data *csd;
+ int cpu;
+
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ csd = &per_cpu(tick_broadcast_csd, cpu);
+ csd->func = tick_broadcast_callee;
+ }
+
+ return 0;
+}
+early_initcall(tick_broadcast_init);
+
+#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
deleted file mode 100644
index 20938a4cb52..00000000000
--- a/arch/mips/kernel/smtc-asm.S
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Assembly Language Functions for MIPS MT SMTC support
- */
-
-/*
- * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
-
-#include <asm/regdef.h>
-#include <asm/asmmacro.h>
-#include <asm/stackframe.h>
-#include <asm/irqflags.h>
-
-/*
- * "Software Interrupt" linkage.
- *
- * This is invoked when an "Interrupt" is sent from one TC to another,
- * where the TC to be interrupted is halted, has it's Restart address
- * and Status values saved by the "remote control" thread, then modified
- * to cause execution to begin here, in kenel mode. This code then
- * disguises the TC state as that of an exception and transfers
- * control to the general exception or vectored interrupt handler.
- */
- .set noreorder
-
-/*
-The __smtc_ipi_vector would use k0 and k1 as temporaries and
-1) Set EXL (this is per-VPE, so this can't be done by proxy!)
-2) Restore the K/CU and IXMT bits to the pre "exception" state
- (EXL means no interrupts and access to the kernel map).
-3) Set EPC to be the saved value of TCRestart.
-4) Jump to the exception handler entry point passed by the sender.
-
-CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
-*/
-
-/*
- * Reviled and slandered vision: Set EXL and restore K/CU/IXMT
- * state of pre-halt thread, then save everything and call
- * thought some function pointer to imaginary_exception, which
- * will parse a register value or memory message queue to
- * deliver things like interprocessor interrupts. On return
- * from that function, jump to the global ret_from_irq code
- * to invoke the scheduler and return as appropriate.
- */
-
-#define PT_PADSLOT4 (PT_R0-8)
-#define PT_PADSLOT5 (PT_R0-4)
-
- .text
- .align 5
-FEXPORT(__smtc_ipi_vector)
- .set noat
- /* Disable thread scheduling to make Status update atomic */
- DMT 27 # dmt k1
- _ehb
- /* Set EXL */
- mfc0 k0,CP0_STATUS
- ori k0,k0,ST0_EXL
- mtc0 k0,CP0_STATUS
- _ehb
- /* Thread scheduling now inhibited by EXL. Restore TE state. */
- andi k1,k1,VPECONTROL_TE
- beqz k1,1f
- emt
-1:
- /*
- * The IPI sender has put some information on the anticipated
- * kernel stack frame. If we were in user mode, this will be
- * built above the saved kernel SP. If we were already in the
- * kernel, it will be built above the current CPU SP.
- *
- * Were we in kernel mode, as indicated by CU0?
- */
- sll k1,k0,3
- .set noreorder
- bltz k1,2f
- move k1,sp
- .set reorder
- /*
- * If previously in user mode, set CU0 and use kernel stack.
- */
- li k1,ST0_CU0
- or k1,k1,k0
- mtc0 k1,CP0_STATUS
- _ehb
- get_saved_sp
- /* Interrupting TC will have pre-set values in slots in the new frame */
-2: subu k1,k1,PT_SIZE
- /* Load TCStatus Value */
- lw k0,PT_TCSTATUS(k1)
- /* Write it to TCStatus to restore CU/KSU/IXMT state */
- mtc0 k0,$2,1
- _ehb
- lw k0,PT_EPC(k1)
- mtc0 k0,CP0_EPC
- /* Save all will redundantly recompute the SP, but use it for now */
- SAVE_ALL
- CLI
- TRACE_IRQS_OFF
- /* Function to be invoked passed stack pad slot 5 */
- lw t0,PT_PADSLOT5(sp)
- /* Argument from sender passed in stack pad slot 4 */
- lw a0,PT_PADSLOT4(sp)
- LONG_L s0, TI_REGS($28)
- LONG_S sp, TI_REGS($28)
- PTR_LA ra, ret_from_irq
- jr t0
-
-/*
- * Called from idle loop to provoke processing of queued IPIs
- * First IPI message in queue passed as argument.
- */
-
-LEAF(self_ipi)
- /* Before anything else, block interrupts */
- mfc0 t0,CP0_TCSTATUS
- ori t1,t0,TCSTATUS_IXMT
- mtc0 t1,CP0_TCSTATUS
- _ehb
- /* We know we're in kernel mode, so prepare stack frame */
- subu t1,sp,PT_SIZE
- sw ra,PT_EPC(t1)
- sw a0,PT_PADSLOT4(t1)
- la t2,ipi_decode
- sw t2,PT_PADSLOT5(t1)
- /* Save pre-disable value of TCStatus */
- sw t0,PT_TCSTATUS(t1)
- j __smtc_ipi_vector
- nop
-END(self_ipi)
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
deleted file mode 100644
index fe256559c99..00000000000
--- a/arch/mips/kernel/smtc-proc.c
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * /proc hooks for SMTC kernel
- * Copyright (C) 2005 Mips Technologies, Inc
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cpumask.h>
-#include <linux/interrupt.h>
-
-#include <asm/cpu.h>
-#include <asm/processor.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <asm/hardirq.h>
-#include <asm/mmu_context.h>
-#include <asm/mipsregs.h>
-#include <asm/cacheflush.h>
-#include <linux/proc_fs.h>
-
-#include <asm/smtc_proc.h>
-
-/*
- * /proc diagnostic and statistics hooks
- */
-
-/*
- * Statistics gathered
- */
-unsigned long selfipis[NR_CPUS];
-
-struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
-
-static struct proc_dir_entry *smtc_stats;
-
-atomic_t smtc_fpu_recoveries;
-
-static int proc_read_smtc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int totalen = 0;
- int len;
- int i;
- extern unsigned long ebase;
-
- len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status);
- totalen += len;
- page += len;
- len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7());
- totalen += len;
- page += len;
- len = sprintf(page, "EBASE: 0x%08lx\n", ebase);
- totalen += len;
- page += len;
- len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n");
- totalen += len;
- page += len;
- for (i=0; i < NR_CPUS; i++) {
- len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
- totalen += len;
- page += len;
- }
- len = sprintf(page, "Self-IPIs by CPU:\n");
- totalen += len;
- page += len;
- for(i = 0; i < NR_CPUS; i++) {
- len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
- totalen += len;
- page += len;
- }
- len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
- atomic_read(&smtc_fpu_recoveries));
- totalen += len;
- page += len;
-
- return totalen;
-}
-
-void init_smtc_stats(void)
-{
- int i;
-
- for (i=0; i<NR_CPUS; i++) {
- smtc_cpu_stats[i].timerints = 0;
- smtc_cpu_stats[i].selfipis = 0;
- }
-
- atomic_set(&smtc_fpu_recoveries, 0);
-
- smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
- proc_read_smtc, NULL);
-}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
deleted file mode 100644
index b42e71c7111..00000000000
--- a/arch/mips/kernel/smtc.c
+++ /dev/null
@@ -1,1424 +0,0 @@
-/* Copyright (C) 2004 Mips Technologies, Inc */
-
-#include <linux/clockchips.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cpumask.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-
-#include <asm/cpu.h>
-#include <asm/processor.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
-#include <asm/hardirq.h>
-#include <asm/hazards.h>
-#include <asm/irq.h>
-#include <asm/mmu_context.h>
-#include <asm/mipsregs.h>
-#include <asm/cacheflush.h>
-#include <asm/time.h>
-#include <asm/addrspace.h>
-#include <asm/smtc.h>
-#include <asm/smtc_ipi.h>
-#include <asm/smtc_proc.h>
-
-/*
- * SMTC Kernel needs to manipulate low-level CPU interrupt mask
- * in do_IRQ. These are passed in setup_irq_smtc() and stored
- * in this table.
- */
-unsigned long irq_hwmask[NR_IRQS];
-
-#define LOCK_MT_PRA() \
- local_irq_save(flags); \
- mtflags = dmt()
-
-#define UNLOCK_MT_PRA() \
- emt(mtflags); \
- local_irq_restore(flags)
-
-#define LOCK_CORE_PRA() \
- local_irq_save(flags); \
- mtflags = dvpe()
-
-#define UNLOCK_CORE_PRA() \
- evpe(mtflags); \
- local_irq_restore(flags)
-
-/*
- * Data structures purely associated with SMTC parallelism
- */
-
-
-/*
- * Table for tracking ASIDs whose lifetime is prolonged.
- */
-
-asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
-
-/*
- * Clock interrupt "latch" buffers, per "CPU"
- */
-
-static atomic_t ipi_timer_latch[NR_CPUS];
-
-/*
- * Number of InterProcessor Interrupt (IPI) message buffers to allocate
- */
-
-#define IPIBUF_PER_CPU 4
-
-static struct smtc_ipi_q IPIQ[NR_CPUS];
-static struct smtc_ipi_q freeIPIq;
-
-
-/* Forward declarations */
-
-void ipi_decode(struct smtc_ipi *);
-static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
-static void setup_cross_vpe_interrupts(unsigned int nvpe);
-void init_smtc_stats(void);
-
-/* Global SMTC Status */
-
-unsigned int smtc_status = 0;
-
-/* Boot command line configuration overrides */
-
-static int vpe0limit;
-static int ipibuffers = 0;
-static int nostlb = 0;
-static int asidmask = 0;
-unsigned long smtc_asid_mask = 0xff;
-
-static int __init vpe0tcs(char *str)
-{
- get_option(&str, &vpe0limit);
-
- return 1;
-}
-
-static int __init ipibufs(char *str)
-{
- get_option(&str, &ipibuffers);
- return 1;
-}
-
-static int __init stlb_disable(char *s)
-{
- nostlb = 1;
- return 1;
-}
-
-static int __init asidmask_set(char *str)
-{
- get_option(&str, &asidmask);
- switch (asidmask) {
- case 0x1:
- case 0x3:
- case 0x7:
- case 0xf:
- case 0x1f:
- case 0x3f:
- case 0x7f:
- case 0xff:
- smtc_asid_mask = (unsigned long)asidmask;
- break;
- default:
- printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
- }
- return 1;
-}
-
-__setup("vpe0tcs=", vpe0tcs);
-__setup("ipibufs=", ipibufs);
-__setup("nostlb", stlb_disable);
-__setup("asidmask=", asidmask_set);
-
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
-
-static int hang_trig = 0;
-
-static int __init hangtrig_enable(char *s)
-{
- hang_trig = 1;
- return 1;
-}
-
-
-__setup("hangtrig", hangtrig_enable);
-
-#define DEFAULT_BLOCKED_IPI_LIMIT 32
-
-static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
-
-static int __init tintq(char *str)
-{
- get_option(&str, &timerq_limit);
- return 1;
-}
-
-__setup("tintq=", tintq);
-
-static int imstuckcount[2][8];
-/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
-static int vpemask[2][8] = {
- {0, 0, 1, 0, 0, 0, 0, 1},
- {0, 0, 0, 0, 0, 0, 0, 1}
-};
-int tcnoprog[NR_CPUS];
-static atomic_t idle_hook_initialized = {0};
-static int clock_hang_reported[NR_CPUS];
-
-#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
-
-/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
-
-void __init sanitize_tlb_entries(void)
-{
- printk("Deprecated sanitize_tlb_entries() invoked\n");
-}
-
-
-/*
- * Configure shared TLB - VPC configuration bit must be set by caller
- */
-
-static void smtc_configure_tlb(void)
-{
- int i, tlbsiz, vpes;
- unsigned long mvpconf0;
- unsigned long config1val;
-
- /* Set up ASID preservation table */
- for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
- for(i = 0; i < MAX_SMTC_ASIDS; i++) {
- smtc_live_asid[vpes][i] = 0;
- }
- }
- mvpconf0 = read_c0_mvpconf0();
-
- if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
- >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
- /* If we have multiple VPEs, try to share the TLB */
- if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
- /*
- * If TLB sizing is programmable, shared TLB
- * size is the total available complement.
- * Otherwise, we have to take the sum of all
- * static VPE TLB entries.
- */
- if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
- >> MVPCONF0_PTLBE_SHIFT)) == 0) {
- /*
- * If there's more than one VPE, there had better
- * be more than one TC, because we need one to bind
- * to each VPE in turn to be able to read
- * its configuration state!
- */
- settc(1);
- /* Stop the TC from doing anything foolish */
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
- /* No need to un-Halt - that happens later anyway */
- for (i=0; i < vpes; i++) {
- write_tc_c0_tcbind(i);
- /*
- * To be 100% sure we're really getting the right
- * information, we exit the configuration state
- * and do an IHB after each rebinding.
- */
- write_c0_mvpcontrol(
- read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
- mips_ihb();
- /*
- * Only count if the MMU Type indicated is TLB
- */
- if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
- config1val = read_vpe_c0_config1();
- tlbsiz += ((config1val >> 25) & 0x3f) + 1;
- }
-
- /* Put core back in configuration state */
- write_c0_mvpcontrol(
- read_c0_mvpcontrol() | MVPCONTROL_VPC );
- mips_ihb();
- }
- }
- write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
- ehb();
-
- /*
- * Setup kernel data structures to use software total,
- * rather than read the per-VPE Config1 value. The values
- * for "CPU 0" gets copied to all the other CPUs as part
- * of their initialization in smtc_cpu_setup().
- */
-
- /* MIPS32 limits TLB indices to 64 */
- if (tlbsiz > 64)
- tlbsiz = 64;
- cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
- smtc_status |= SMTC_TLB_SHARED;
- local_flush_tlb_all();
-
- printk("TLB of %d entry pairs shared by %d VPEs\n",
- tlbsiz, vpes);
- } else {
- printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
- }
- }
-}
-
-
-/*
- * Incrementally build the CPU map out of constituent MIPS MT cores,
- * using the specified available VPEs and TCs. Plaform code needs
- * to ensure that each MIPS MT core invokes this routine on reset,
- * one at a time(!).
- *
- * This version of the build_cpu_map and prepare_cpus routines assumes
- * that *all* TCs of a MIPS MT core will be used for Linux, and that
- * they will be spread across *all* available VPEs (to minimise the
- * loss of efficiency due to exception service serialization).
- * An improved version would pick up configuration information and
- * possibly leave some TCs/VPEs as "slave" processors.
- *
- * Use c0_MVPConf0 to find out how many TCs are available, setting up
- * phys_cpu_present_map and the logical/physical mappings.
- */
-
-int __init mipsmt_build_cpu_map(int start_cpu_slot)
-{
- int i, ntcs;
-
- /*
- * The CPU map isn't actually used for anything at this point,
- * so it's not clear what else we should do apart from set
- * everything up so that "logical" = "physical".
- */
- ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
- for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
- cpu_set(i, phys_cpu_present_map);
- __cpu_number_map[i] = i;
- __cpu_logical_map[i] = i;
- }
-#ifdef CONFIG_MIPS_MT_FPAFF
- /* Initialize map of CPUs with FPUs */
- cpus_clear(mt_fpu_cpumask);
-#endif
-
- /* One of those TC's is the one booting, and not a secondary... */
- printk("%i available secondary CPU TC(s)\n", i - 1);
-
- return i;
-}
-
-/*
- * Common setup before any secondaries are started
- * Make sure all CPU's are in a sensible state before we boot any of the
- * secondaries.
- *
- * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
- * as possible across the available VPEs.
- */
-
-static void smtc_tc_setup(int vpe, int tc, int cpu)
-{
- settc(tc);
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
- write_tc_c0_tcstatus((read_tc_c0_tcstatus()
- & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
- | TCSTATUS_A);
- write_tc_c0_tccontext(0);
- /* Bind tc to vpe */
- write_tc_c0_tcbind(vpe);
- /* In general, all TCs should have the same cpu_data indications */
- memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
- /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
- if (cpu_data[0].cputype == CPU_34K)
- cpu_data[cpu].options &= ~MIPS_CPU_FPU;
- cpu_data[cpu].vpe_id = vpe;
- cpu_data[cpu].tc_id = tc;
-}
-
-
-void mipsmt_prepare_cpus(void)
-{
- int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
- unsigned long flags;
- unsigned long val;
- int nipi;
- struct smtc_ipi *pipi;
-
- /* disable interrupts so we can disable MT */
- local_irq_save(flags);
- /* disable MT so we can configure */
- dvpe();
- dmt();
-
- spin_lock_init(&freeIPIq.lock);
-
- /*
- * We probably don't have as many VPEs as we do SMP "CPUs",
- * but it's possible - and in any case we'll never use more!
- */
- for (i=0; i<NR_CPUS; i++) {
- IPIQ[i].head = IPIQ[i].tail = NULL;
- spin_lock_init(&IPIQ[i].lock);
- IPIQ[i].depth = 0;
- atomic_set(&ipi_timer_latch[i], 0);
- }
-
- /* cpu_data index starts at zero */
- cpu = 0;
- cpu_data[cpu].vpe_id = 0;
- cpu_data[cpu].tc_id = 0;
- cpu++;
-
- /* Report on boot-time options */
- mips_mt_set_cpuoptions();
- if (vpelimit > 0)
- printk("Limit of %d VPEs set\n", vpelimit);
- if (tclimit > 0)
- printk("Limit of %d TCs set\n", tclimit);
- if (nostlb) {
- printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
- }
- if (asidmask)
- printk("ASID mask value override to 0x%x\n", asidmask);
-
- /* Temporary */
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- if (hang_trig)
- printk("Logic Analyser Trigger on suspected TC hang\n");
-#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
-
- /* Put MVPE's into 'configuration state' */
- write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
-
- val = read_c0_mvpconf0();
- nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
- if (vpelimit > 0 && nvpe > vpelimit)
- nvpe = vpelimit;
- ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
- if (ntc > NR_CPUS)
- ntc = NR_CPUS;
- if (tclimit > 0 && ntc > tclimit)
- ntc = tclimit;
- slop = ntc % nvpe;
- for (i = 0; i < nvpe; i++) {
- tcpervpe[i] = ntc / nvpe;
- if (slop) {
- if((slop - i) > 0) tcpervpe[i]++;
- }
- }
- /* Handle command line override for VPE0 */
- if (vpe0limit > ntc) vpe0limit = ntc;
- if (vpe0limit > 0) {
- int slopslop;
- if (vpe0limit < tcpervpe[0]) {
- /* Reducing TC count - distribute to others */
- slop = tcpervpe[0] - vpe0limit;
- slopslop = slop % (nvpe - 1);
- tcpervpe[0] = vpe0limit;
- for (i = 1; i < nvpe; i++) {
- tcpervpe[i] += slop / (nvpe - 1);
- if(slopslop && ((slopslop - (i - 1) > 0)))
- tcpervpe[i]++;
- }
- } else if (vpe0limit > tcpervpe[0]) {
- /* Increasing TC count - steal from others */
- slop = vpe0limit - tcpervpe[0];
- slopslop = slop % (nvpe - 1);
- tcpervpe[0] = vpe0limit;
- for (i = 1; i < nvpe; i++) {
- tcpervpe[i] -= slop / (nvpe - 1);
- if(slopslop && ((slopslop - (i - 1) > 0)))
- tcpervpe[i]--;
- }
- }
- }
-
- /* Set up shared TLB */
- smtc_configure_tlb();
-
- for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
- /*
- * Set the MVP bits.
- */
- settc(tc);
- write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
- if (vpe != 0)
- printk(", ");
- printk("VPE %d: TC", vpe);
- for (i = 0; i < tcpervpe[vpe]; i++) {
- /*
- * TC 0 is bound to VPE 0 at reset,
- * and is presumably executing this
- * code. Leave it alone!
- */
- if (tc != 0) {
- smtc_tc_setup(vpe, tc, cpu);
- cpu++;
- }
- printk(" %d", tc);
- tc++;
- }
- if (vpe != 0) {
- /*
- * Clear any stale software interrupts from VPE's Cause
- */
- write_vpe_c0_cause(0);
-
- /*
- * Clear ERL/EXL of VPEs other than 0
- * and set restricted interrupt enable/mask.
- */
- write_vpe_c0_status((read_vpe_c0_status()
- & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
- | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
- | ST0_IE));
- /*
- * set config to be the same as vpe0,
- * particularly kseg0 coherency alg
- */
- write_vpe_c0_config(read_c0_config());
- /* Clear any pending timer interrupt */
- write_vpe_c0_compare(0);
- /* Propagate Config7 */
- write_vpe_c0_config7(read_c0_config7());
- write_vpe_c0_count(read_c0_count());
- }
- /* enable multi-threading within VPE */
- write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
- /* enable the VPE */
- write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
- }
-
- /*
- * Pull any physically present but unused TCs out of circulation.
- */
- while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
- cpu_clear(tc, phys_cpu_present_map);
- cpu_clear(tc, cpu_present_map);
- tc++;
- }
-
- /* release config state */
- write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
-
- printk("\n");
-
- /* Set up coprocessor affinity CPU mask(s) */
-
-#ifdef CONFIG_MIPS_MT_FPAFF
- for (tc = 0; tc < ntc; tc++) {
- if (cpu_data[tc].options & MIPS_CPU_FPU)
- cpu_set(tc, mt_fpu_cpumask);
- }
-#endif
-
- /* set up ipi interrupts... */
-
- /* If we have multiple VPEs running, set up the cross-VPE interrupt */
-
- setup_cross_vpe_interrupts(nvpe);
-
- /* Set up queue of free IPI "messages". */
- nipi = NR_CPUS * IPIBUF_PER_CPU;
- if (ipibuffers > 0)
- nipi = ipibuffers;
-
- pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
- if (pipi == NULL)
- panic("kmalloc of IPI message buffers failed\n");
- else
- printk("IPI buffer pool of %d buffers\n", nipi);
- for (i = 0; i < nipi; i++) {
- smtc_ipi_nq(&freeIPIq, pipi);
- pipi++;
- }
-
- /* Arm multithreading and enable other VPEs - but all TCs are Halted */
- emt(EMT_ENABLE);
- evpe(EVPE_ENABLE);
- local_irq_restore(flags);
- /* Initialize SMTC /proc statistics/diagnostics */
- init_smtc_stats();
-}
-
-
-/*
- * Setup the PC, SP, and GP of a secondary processor and start it
- * running!
- * smp_bootstrap is the place to resume from
- * __KSTK_TOS(idle) is apparently the stack pointer
- * (unsigned long)idle->thread_info the gp
- *
- */
-void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
-{
- extern u32 kernelsp[NR_CPUS];
- long flags;
- int mtflags;
-
- LOCK_MT_PRA();
- if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
- dvpe();
- }
- settc(cpu_data[cpu].tc_id);
-
- /* pc */
- write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
-
- /* stack pointer */
- kernelsp[cpu] = __KSTK_TOS(idle);
- write_tc_gpr_sp(__KSTK_TOS(idle));
-
- /* global pointer */
- write_tc_gpr_gp((unsigned long)task_thread_info(idle));
-
- smtc_status |= SMTC_MTC_ACTIVE;
- write_tc_c0_tchalt(0);
- if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
- evpe(EVPE_ENABLE);
- }
- UNLOCK_MT_PRA();
-}
-
-void smtc_init_secondary(void)
-{
- /*
- * Start timer on secondary VPEs if necessary.
- * plat_timer_setup has already have been invoked by init/main
- * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
- * SMTC init code assigns TCs consdecutively and in ascending order
- * to across available VPEs.
- */
- if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
- ((read_c0_tcbind() & TCBIND_CURVPE)
- != cpu_data[smp_processor_id() - 1].vpe_id)){
- write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
- }
-
- local_irq_enable();
-}
-
-void smtc_smp_finish(void)
-{
- printk("TC %d going on-line as CPU %d\n",
- cpu_data[smp_processor_id()].tc_id, smp_processor_id());
-}
-
-void smtc_cpus_done(void)
-{
-}
-
-/*
- * Support for SMTC-optimized driver IRQ registration
- */
-
-/*
- * SMTC Kernel needs to manipulate low-level CPU interrupt mask
- * in do_IRQ. These are passed in setup_irq_smtc() and stored
- * in this table.
- */
-
-int setup_irq_smtc(unsigned int irq, struct irqaction * new,
- unsigned long hwmask)
-{
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- unsigned int vpe = current_cpu_data.vpe_id;
-
- vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
-#endif
- irq_hwmask[irq] = hwmask;
-
- return setup_irq(irq, new);
-}
-
-#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
-/*
- * Support for IRQ affinity to TCs
- */
-
-void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
-{
- /*
- * If a "fast path" cache of quickly decodable affinity state
- * is maintained, this is where it gets done, on a call up
- * from the platform affinity code.
- */
-}
-
-void smtc_forward_irq(unsigned int irq)
-{
- int target;
-
- /*
- * OK wise guy, now figure out how to get the IRQ
- * to be serviced on an authorized "CPU".
- *
- * Ideally, to handle the situation where an IRQ has multiple
- * eligible CPUS, we would maintain state per IRQ that would
- * allow a fair distribution of service requests. Since the
- * expected use model is any-or-only-one, for simplicity
- * and efficiency, we just pick the easiest one to find.
- */
-
- target = first_cpu(irq_desc[irq].affinity);
-
- /*
- * We depend on the platform code to have correctly processed
- * IRQ affinity change requests to ensure that the IRQ affinity
- * mask has been purged of bits corresponding to nonexistent and
- * offline "CPUs", and to TCs bound to VPEs other than the VPE
- * connected to the physical interrupt input for the interrupt
- * in question. Otherwise we have a nasty problem with interrupt
- * mask management. This is best handled in non-performance-critical
- * platform IRQ affinity setting code, to minimize interrupt-time
- * checks.
- */
-
- /* If no one is eligible, service locally */
- if (target >= NR_CPUS) {
- do_IRQ_no_affinity(irq);
- return;
- }
-
- smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
-}
-
-#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
-
-/*
- * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
- * Within a VPE one TC can interrupt another by different approaches.
- * The easiest to get right would probably be to make all TCs except
- * the target IXMT and set a software interrupt, but an IXMT-based
- * scheme requires that a handler must run before a new IPI could
- * be sent, which would break the "broadcast" loops in MIPS MT.
- * A more gonzo approach within a VPE is to halt the TC, extract
- * its Restart, Status, and a couple of GPRs, and program the Restart
- * address to emulate an interrupt.
- *
- * Within a VPE, one can be confident that the target TC isn't in
- * a critical EXL state when halted, since the write to the Halt
- * register could not have issued on the writing thread if the
- * halting thread had EXL set. So k0 and k1 of the target TC
- * can be used by the injection code. Across VPEs, one can't
- * be certain that the target TC isn't in a critical exception
- * state. So we try a two-step process of sending a software
- * interrupt to the target VPE, which either handles the event
- * itself (if it was the target) or injects the event within
- * the VPE.
- */
-
-static void smtc_ipi_qdump(void)
-{
- int i;
-
- for (i = 0; i < NR_CPUS ;i++) {
- printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
- i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
- IPIQ[i].depth);
- }
-}
-
-/*
- * The standard atomic.h primitives don't quite do what we want
- * here: We need an atomic add-and-return-previous-value (which
- * could be done with atomic_add_return and a decrement) and an
- * atomic set/zero-and-return-previous-value (which can't really
- * be done with the atomic.h primitives). And since this is
- * MIPS MT, we can assume that we have LL/SC.
- */
-static inline int atomic_postincrement(atomic_t *v)
-{
- unsigned long result;
-
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: ll %0, %2 \n"
- " addu %1, %0, 1 \n"
- " sc %1, %2 \n"
- " beqz %1, 1b \n"
- __WEAK_LLSC_MB
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "m" (v->counter)
- : "memory");
-
- return result;
-}
-
-void smtc_send_ipi(int cpu, int type, unsigned int action)
-{
- int tcstatus;
- struct smtc_ipi *pipi;
- long flags;
- int mtflags;
-
- if (cpu == smp_processor_id()) {
- printk("Cannot Send IPI to self!\n");
- return;
- }
- /* Set up a descriptor, to be delivered either promptly or queued */
- pipi = smtc_ipi_dq(&freeIPIq);
- if (pipi == NULL) {
- bust_spinlocks(1);
- mips_mt_regdump(dvpe());
- panic("IPI Msg. Buffers Depleted\n");
- }
- pipi->type = type;
- pipi->arg = (void *)action;
- pipi->dest = cpu;
- if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
- if (type == SMTC_CLOCK_TICK)
- atomic_inc(&ipi_timer_latch[cpu]);
- /* If not on same VPE, enqueue and send cross-VPE interrupt */
- smtc_ipi_nq(&IPIQ[cpu], pipi);
- LOCK_CORE_PRA();
- settc(cpu_data[cpu].tc_id);
- write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
- UNLOCK_CORE_PRA();
- } else {
- /*
- * Not sufficient to do a LOCK_MT_PRA (dmt) here,
- * since ASID shootdown on the other VPE may
- * collide with this operation.
- */
- LOCK_CORE_PRA();
- settc(cpu_data[cpu].tc_id);
- /* Halt the targeted TC */
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
-
- /*
- * Inspect TCStatus - if IXMT is set, we have to queue
- * a message. Otherwise, we set up the "interrupt"
- * of the other TC
- */
- tcstatus = read_tc_c0_tcstatus();
-
- if ((tcstatus & TCSTATUS_IXMT) != 0) {
- /*
- * Spin-waiting here can deadlock,
- * so we queue the message for the target TC.
- */
- write_tc_c0_tchalt(0);
- UNLOCK_CORE_PRA();
- /* Try to reduce redundant timer interrupt messages */
- if (type == SMTC_CLOCK_TICK) {
- if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
- smtc_ipi_nq(&freeIPIq, pipi);
- return;
- }
- }
- smtc_ipi_nq(&IPIQ[cpu], pipi);
- } else {
- if (type == SMTC_CLOCK_TICK)
- atomic_inc(&ipi_timer_latch[cpu]);
- post_direct_ipi(cpu, pipi);
- write_tc_c0_tchalt(0);
- UNLOCK_CORE_PRA();
- }
- }
-}
-
-/*
- * Send IPI message to Halted TC, TargTC/TargVPE already having been set
- */
-static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
-{
- struct pt_regs *kstack;
- unsigned long tcstatus;
- unsigned long tcrestart;
- extern u32 kernelsp[NR_CPUS];
- extern void __smtc_ipi_vector(void);
-//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
-
- /* Extract Status, EPC from halted TC */
- tcstatus = read_tc_c0_tcstatus();
- tcrestart = read_tc_c0_tcrestart();
- /* If TCRestart indicates a WAIT instruction, advance the PC */
- if ((tcrestart & 0x80000000)
- && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
- tcrestart += 4;
- }
- /*
- * Save on TC's future kernel stack
- *
- * CU bit of Status is indicator that TC was
- * already running on a kernel stack...
- */
- if (tcstatus & ST0_CU0) {
- /* Note that this "- 1" is pointer arithmetic */
- kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
- } else {
- kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
- }
-
- kstack->cp0_epc = (long)tcrestart;
- /* Save TCStatus */
- kstack->cp0_tcstatus = tcstatus;
- /* Pass token of operation to be performed kernel stack pad area */
- kstack->pad0[4] = (unsigned long)pipi;
- /* Pass address of function to be called likewise */
- kstack->pad0[5] = (unsigned long)&ipi_decode;
- /* Set interrupt exempt and kernel mode */
- tcstatus |= TCSTATUS_IXMT;
- tcstatus &= ~TCSTATUS_TKSU;
- write_tc_c0_tcstatus(tcstatus);
- ehb();
- /* Set TC Restart address to be SMTC IPI vector */
- write_tc_c0_tcrestart(__smtc_ipi_vector);
-}
-
-static void ipi_resched_interrupt(void)
-{
- /* Return from interrupt should be enough to cause scheduler check */
-}
-
-
-static void ipi_call_interrupt(void)
-{
- /* Invoke generic function invocation code in smp.c */
- smp_call_function_interrupt();
-}
-
-DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
-
-void ipi_decode(struct smtc_ipi *pipi)
-{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
- void *arg_copy = pipi->arg;
- int type_copy = pipi->type;
- int ticks;
-
- smtc_ipi_nq(&freeIPIq, pipi);
- switch (type_copy) {
- case SMTC_CLOCK_TICK:
- irq_enter();
- kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
- cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
- ticks = atomic_read(&ipi_timer_latch[cpu]);
- atomic_sub(ticks, &ipi_timer_latch[cpu]);
- while (ticks) {
- cd->event_handler(cd);
- ticks--;
- }
- irq_exit();
- break;
-
- case LINUX_SMP_IPI:
- switch ((int)arg_copy) {
- case SMP_RESCHEDULE_YOURSELF:
- ipi_resched_interrupt();
- break;
- case SMP_CALL_FUNCTION:
- ipi_call_interrupt();
- break;
- default:
- printk("Impossible SMTC IPI Argument 0x%x\n",
- (int)arg_copy);
- break;
- }
- break;
-#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
- case IRQ_AFFINITY_IPI:
- /*
- * Accept a "forwarded" interrupt that was initially
- * taken by a TC who doesn't have affinity for the IRQ.
- */
- do_IRQ_no_affinity((int)arg_copy);
- break;
-#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
- default:
- printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
- break;
- }
-}
-
-void deferred_smtc_ipi(void)
-{
- struct smtc_ipi *pipi;
- unsigned long flags;
-/* DEBUG */
- int q = smp_processor_id();
-
- /*
- * Test is not atomic, but much faster than a dequeue,
- * and the vast majority of invocations will have a null queue.
- */
- if (IPIQ[q].head != NULL) {
- while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
- /* ipi_decode() should be called with interrupts off */
- local_irq_save(flags);
- ipi_decode(pipi);
- local_irq_restore(flags);
- }
- }
-}
-
-/*
- * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
- * set via cross-VPE MTTR manipulation of the Cause register. It would be
- * in some regards preferable to have external logic for "doorbell" hardware
- * interrupts.
- */
-
-static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
-
-static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
-{
- int my_vpe = cpu_data[smp_processor_id()].vpe_id;
- int my_tc = cpu_data[smp_processor_id()].tc_id;
- int cpu;
- struct smtc_ipi *pipi;
- unsigned long tcstatus;
- int sent;
- long flags;
- unsigned int mtflags;
- unsigned int vpflags;
-
- /*
- * So long as cross-VPE interrupts are done via
- * MFTR/MTTR read-modify-writes of Cause, we need
- * to stop other VPEs whenever the local VPE does
- * anything similar.
- */
- local_irq_save(flags);
- vpflags = dvpe();
- clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
- set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
- irq_enable_hazard();
- evpe(vpflags);
- local_irq_restore(flags);
-
- /*
- * Cross-VPE Interrupt handler: Try to directly deliver IPIs
- * queued for TCs on this VPE other than the current one.
- * Return-from-interrupt should cause us to drain the queue
- * for the current TC, so we ought not to have to do it explicitly here.
- */
-
- for_each_online_cpu(cpu) {
- if (cpu_data[cpu].vpe_id != my_vpe)
- continue;
-
- pipi = smtc_ipi_dq(&IPIQ[cpu]);
- if (pipi != NULL) {
- if (cpu_data[cpu].tc_id != my_tc) {
- sent = 0;
- LOCK_MT_PRA();
- settc(cpu_data[cpu].tc_id);
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
- tcstatus = read_tc_c0_tcstatus();
- if ((tcstatus & TCSTATUS_IXMT) == 0) {
- post_direct_ipi(cpu, pipi);
- sent = 1;
- }
- write_tc_c0_tchalt(0);
- UNLOCK_MT_PRA();
- if (!sent) {
- smtc_ipi_req(&IPIQ[cpu], pipi);
- }
- } else {
- /*
- * ipi_decode() should be called
- * with interrupts off
- */
- local_irq_save(flags);
- ipi_decode(pipi);
- local_irq_restore(flags);
- }
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static void ipi_irq_dispatch(void)
-{
- do_IRQ(cpu_ipi_irq);
-}
-
-static struct irqaction irq_ipi = {
- .handler = ipi_interrupt,
- .flags = IRQF_DISABLED,
- .name = "SMTC_IPI",
- .flags = IRQF_PERCPU
-};
-
-static void setup_cross_vpe_interrupts(unsigned int nvpe)
-{
- if (nvpe < 1)
- return;
-
- if (!cpu_has_vint)
- panic("SMTC Kernel requires Vectored Interrupt support");
-
- set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
-
- setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
-
- set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
-}
-
-/*
- * SMTC-specific hacks invoked from elsewhere in the kernel.
- *
- * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
- * called with interrupts disabled. We do rely on interrupts being disabled
- * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
- * result in a recursive call to raw_local_irq_restore().
- */
-
-static void __smtc_ipi_replay(void)
-{
- unsigned int cpu = smp_processor_id();
-
- /*
- * To the extent that we've ever turned interrupts off,
- * we may have accumulated deferred IPIs. This is subtle.
- * If we use the smtc_ipi_qdepth() macro, we'll get an
- * exact number - but we'll also disable interrupts
- * and create a window of failure where a new IPI gets
- * queued after we test the depth but before we re-enable
- * interrupts. So long as IXMT never gets set, however,
- * we should be OK: If we pick up something and dispatch
- * it here, that's great. If we see nothing, but concurrent
- * with this operation, another TC sends us an IPI, IXMT
- * is clear, and we'll handle it as a real pseudo-interrupt
- * and not a pseudo-pseudo interrupt.
- */
- if (IPIQ[cpu].depth > 0) {
- while (1) {
- struct smtc_ipi_q *q = &IPIQ[cpu];
- struct smtc_ipi *pipi;
- extern void self_ipi(struct smtc_ipi *);
-
- spin_lock(&q->lock);
- pipi = __smtc_ipi_dq(q);
- spin_unlock(&q->lock);
- if (!pipi)
- break;
-
- self_ipi(pipi);
- smtc_cpu_stats[cpu].selfipis++;
- }
- }
-}
-
-void smtc_ipi_replay(void)
-{
- raw_local_irq_disable();
- __smtc_ipi_replay();
-}
-
-EXPORT_SYMBOL(smtc_ipi_replay);
-
-void smtc_idle_loop_hook(void)
-{
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- int im;
- int flags;
- int mtflags;
- int bit;
- int vpe;
- int tc;
- int hook_ntcs;
- /*
- * printk within DMT-protected regions can deadlock,
- * so buffer diagnostic messages for later output.
- */
- char *pdb_msg;
- char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
-
- if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
- if (atomic_add_return(1, &idle_hook_initialized) == 1) {
- int mvpconf0;
- /* Tedious stuff to just do once */
- mvpconf0 = read_c0_mvpconf0();
- hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
- if (hook_ntcs > NR_CPUS)
- hook_ntcs = NR_CPUS;
- for (tc = 0; tc < hook_ntcs; tc++) {
- tcnoprog[tc] = 0;
- clock_hang_reported[tc] = 0;
- }
- for (vpe = 0; vpe < 2; vpe++)
- for (im = 0; im < 8; im++)
- imstuckcount[vpe][im] = 0;
- printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
- atomic_set(&idle_hook_initialized, 1000);
- } else {
- /* Someone else is initializing in parallel - let 'em finish */
- while (atomic_read(&idle_hook_initialized) < 1000)
- ;
- }
- }
-
- /* Have we stupidly left IXMT set somewhere? */
- if (read_c0_tcstatus() & 0x400) {
- write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
- ehb();
- printk("Dangling IXMT in cpu_idle()\n");
- }
-
- /* Have we stupidly left an IM bit turned off? */
-#define IM_LIMIT 2000
- local_irq_save(flags);
- mtflags = dmt();
- pdb_msg = &id_ho_db_msg[0];
- im = read_c0_status();
- vpe = current_cpu_data.vpe_id;
- for (bit = 0; bit < 8; bit++) {
- /*
- * In current prototype, I/O interrupts
- * are masked for VPE > 0
- */
- if (vpemask[vpe][bit]) {
- if (!(im & (0x100 << bit)))
- imstuckcount[vpe][bit]++;
- else
- imstuckcount[vpe][bit] = 0;
- if (imstuckcount[vpe][bit] > IM_LIMIT) {
- set_c0_status(0x100 << bit);
- ehb();
- imstuckcount[vpe][bit] = 0;
- pdb_msg += sprintf(pdb_msg,
- "Dangling IM %d fixed for VPE %d\n", bit,
- vpe);
- }
- }
- }
-
- /*
- * Now that we limit outstanding timer IPIs, check for hung TC
- */
- for (tc = 0; tc < NR_CPUS; tc++) {
- /* Don't check ourself - we'll dequeue IPIs just below */
- if ((tc != smp_processor_id()) &&
- atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
- if (clock_hang_reported[tc] == 0) {
- pdb_msg += sprintf(pdb_msg,
- "TC %d looks hung with timer latch at %d\n",
- tc, atomic_read(&ipi_timer_latch[tc]));
- clock_hang_reported[tc]++;
- }
- }
- }
- emt(mtflags);
- local_irq_restore(flags);
- if (pdb_msg != &id_ho_db_msg[0])
- printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
-#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
-
- /*
- * Replay any accumulated deferred IPIs. If "Instant Replay"
- * is in use, there should never be any.
- */
-#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
- {
- unsigned long flags;
-
- local_irq_save(flags);
- __smtc_ipi_replay();
- local_irq_restore(flags);
- }
-#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
-}
-
-void smtc_soft_dump(void)
-{
- int i;
-
- printk("Counter Interrupts taken per CPU (TC)\n");
- for (i=0; i < NR_CPUS; i++) {
- printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
- }
- printk("Self-IPI invocations:\n");
- for (i=0; i < NR_CPUS; i++) {
- printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
- }
- smtc_ipi_qdump();
- printk("Timer IPI Backlogs:\n");
- for (i=0; i < NR_CPUS; i++) {
- printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
- }
- printk("%d Recoveries of \"stolen\" FPU\n",
- atomic_read(&smtc_fpu_recoveries));
-}
-
-
-/*
- * TLB management routines special to SMTC
- */
-
-void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
-{
- unsigned long flags, mtflags, tcstat, prevhalt, asid;
- int tlb, i;
-
- /*
- * It would be nice to be able to use a spinlock here,
- * but this is invoked from within TLB flush routines
- * that protect themselves with DVPE, so if a lock is
- * held by another TC, it'll never be freed.
- *
- * DVPE/DMT must not be done with interrupts enabled,
- * so even so most callers will already have disabled
- * them, let's be really careful...
- */
-
- local_irq_save(flags);
- if (smtc_status & SMTC_TLB_SHARED) {
- mtflags = dvpe();
- tlb = 0;
- } else {
- mtflags = dmt();
- tlb = cpu_data[cpu].vpe_id;
- }
- asid = asid_cache(cpu);
-
- do {
- if (!((asid += ASID_INC) & ASID_MASK) ) {
- if (cpu_has_vtag_icache)
- flush_icache_all();
- /* Traverse all online CPUs (hack requires contigous range) */
- for_each_online_cpu(i) {
- /*
- * We don't need to worry about our own CPU, nor those of
- * CPUs who don't share our TLB.
- */
- if ((i != smp_processor_id()) &&
- ((smtc_status & SMTC_TLB_SHARED) ||
- (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
- settc(cpu_data[i].tc_id);
- prevhalt = read_tc_c0_tchalt() & TCHALT_H;
- if (!prevhalt) {
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
- }
- tcstat = read_tc_c0_tcstatus();
- smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
- if (!prevhalt)
- write_tc_c0_tchalt(0);
- }
- }
- if (!asid) /* fix version if needed */
- asid = ASID_FIRST_VERSION;
- local_flush_tlb_all(); /* start new asid cycle */
- }
- } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
-
- /*
- * SMTC shares the TLB within VPEs and possibly across all VPEs.
- */
- for_each_online_cpu(i) {
- if ((smtc_status & SMTC_TLB_SHARED) ||
- (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
- cpu_context(i, mm) = asid_cache(i) = asid;
- }
-
- if (smtc_status & SMTC_TLB_SHARED)
- evpe(mtflags);
- else
- emt(mtflags);
- local_irq_restore(flags);
-}
-
-/*
- * Invoked from macros defined in mmu_context.h
- * which must already have disabled interrupts
- * and done a DVPE or DMT as appropriate.
- */
-
-void smtc_flush_tlb_asid(unsigned long asid)
-{
- int entry;
- unsigned long ehi;
-
- entry = read_c0_wired();
-
- /* Traverse all non-wired entries */
- while (entry < current_cpu_data.tlbsize) {
- write_c0_index(entry);
- ehb();
- tlb_read();
- ehb();
- ehi = read_c0_entryhi();
- if ((ehi & ASID_MASK) == asid) {
- /*
- * Invalidate only entries with specified ASID,
- * makiing sure all entries differ.
- */
- write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
- write_c0_entrylo0(0);
- write_c0_entrylo1(0);
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- }
- entry++;
- }
- write_c0_index(PARKED_INDEX);
- tlbw_use_hazard();
-}
-
-/*
- * Support for single-threading cache flush operations.
- */
-
-static int halt_state_save[NR_CPUS];
-
-/*
- * To really, really be sure that nothing is being done
- * by other TCs, halt them all. This code assumes that
- * a DVPE has already been done, so while their Halted
- * state is theoretically architecturally unstable, in
- * practice, it's not going to change while we're looking
- * at it.
- */
-
-void smtc_cflush_lockdown(void)
-{
- int cpu;
-
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id()) {
- settc(cpu_data[cpu].tc_id);
- halt_state_save[cpu] = read_tc_c0_tchalt();
- write_tc_c0_tchalt(TCHALT_H);
- }
- }
- mips_ihb();
-}
-
-/* It would be cheating to change the cpu_online states during a flush! */
-
-void smtc_cflush_release(void)
-{
- int cpu;
-
- /*
- * Start with a hazard barrier to ensure
- * that all CACHE ops have played through.
- */
- mips_ihb();
-
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id()) {
- settc(cpu_data[cpu].tc_id);
- write_tc_c0_tchalt(halt_state_save[cpu]);
- }
- }
- mips_ihb();
-}
diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c
new file mode 100644
index 00000000000..39f7ab7b042
--- /dev/null
+++ b/arch/mips/kernel/spinlock_test.c
@@ -0,0 +1,141 @@
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/hrtimer.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+
+
+static int ss_get(void *data, u64 *val)
+{
+ ktime_t start, finish;
+ int loops;
+ int cont;
+ DEFINE_RAW_SPINLOCK(ss_spin);
+
+ loops = 1000000;
+ cont = 1;
+
+ start = ktime_get();
+
+ while (cont) {
+ raw_spin_lock(&ss_spin);
+ loops--;
+ if (loops == 0)
+ cont = 0;
+ raw_spin_unlock(&ss_spin);
+ }
+
+ finish = ktime_get();
+
+ *val = ktime_us_delta(finish, start);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n");
+
+
+
+struct spin_multi_state {
+ raw_spinlock_t lock;
+ atomic_t start_wait;
+ atomic_t enter_wait;
+ atomic_t exit_wait;
+ int loops;
+};
+
+struct spin_multi_per_thread {
+ struct spin_multi_state *state;
+ ktime_t start;
+};
+
+static int multi_other(void *data)
+{
+ int loops;
+ int cont;
+ struct spin_multi_per_thread *pt = data;
+ struct spin_multi_state *s = pt->state;
+
+ loops = s->loops;
+ cont = 1;
+
+ atomic_dec(&s->enter_wait);
+
+ while (atomic_read(&s->enter_wait))
+ ; /* spin */
+
+ pt->start = ktime_get();
+
+ atomic_dec(&s->start_wait);
+
+ while (atomic_read(&s->start_wait))
+ ; /* spin */
+
+ while (cont) {
+ raw_spin_lock(&s->lock);
+ loops--;
+ if (loops == 0)
+ cont = 0;
+ raw_spin_unlock(&s->lock);
+ }
+
+ atomic_dec(&s->exit_wait);
+ while (atomic_read(&s->exit_wait))
+ ; /* spin */
+ return 0;
+}
+
+static int multi_get(void *data, u64 *val)
+{
+ ktime_t finish;
+ struct spin_multi_state ms;
+ struct spin_multi_per_thread t1, t2;
+
+ ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get");
+ ms.loops = 1000000;
+
+ atomic_set(&ms.start_wait, 2);
+ atomic_set(&ms.enter_wait, 2);
+ atomic_set(&ms.exit_wait, 2);
+ t1.state = &ms;
+ t2.state = &ms;
+
+ kthread_run(multi_other, &t2, "multi_get");
+
+ multi_other(&t1);
+
+ finish = ktime_get();
+
+ *val = ktime_us_delta(finish, t1.start);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
+
+
+extern struct dentry *mips_debugfs_dir;
+static int __init spinlock_test(void)
+{
+ struct dentry *d;
+
+ if (!mips_debugfs_dir)
+ return -ENODEV;
+
+ d = debugfs_create_file("spin_single", S_IRUGO,
+ mips_debugfs_dir, NULL,
+ &fops_ss);
+ if (!d)
+ return -ENOMEM;
+
+ d = debugfs_create_file("spin_multi", S_IRUGO,
+ mips_debugfs_dir, NULL,
+ &fops_multi);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+device_initcall(spinlock_test);
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
new file mode 100644
index 00000000000..67f2495def1
--- /dev/null
+++ b/arch/mips/kernel/spram.c
@@ -0,0 +1,221 @@
+/*
+ * MIPS SPRAM support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (C) 2007, 2008 MIPS Technologies, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/stddef.h>
+
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/r4kcache.h>
+#include <asm/hazards.h>
+
+/*
+ * These definitions are correct for the 24K/34K/74K SPRAM sample
+ * implementation. The 4KS interpreted the tags differently...
+ */
+#define SPRAM_TAG0_ENABLE 0x00000080
+#define SPRAM_TAG0_PA_MASK 0xfffff000
+#define SPRAM_TAG1_SIZE_MASK 0xfffff000
+
+#define SPRAM_TAG_STRIDE 8
+
+#define ERRCTL_SPRAM (1 << 28)
+
+/* errctl access */
+#define read_c0_errctl(x) read_c0_ecc(x)
+#define write_c0_errctl(x) write_c0_ecc(x)
+
+/*
+ * Different semantics to the set_c0_* function built by __BUILD_SET_C0
+ */
+static unsigned int bis_c0_errctl(unsigned int set)
+{
+ unsigned int res;
+ res = read_c0_errctl();
+ write_c0_errctl(res | set);
+ return res;
+}
+
+static void ispram_store_tag(unsigned int offset, unsigned int data)
+{
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+
+ write_c0_taglo(data);
+ ehb();
+
+ cache_op(Index_Store_Tag_I, CKSEG0|offset);
+ ehb();
+
+ write_c0_errctl(errctl);
+ ehb();
+}
+
+
+static unsigned int ispram_load_tag(unsigned int offset)
+{
+ unsigned int data;
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ cache_op(Index_Load_Tag_I, CKSEG0 | offset);
+ ehb();
+ data = read_c0_taglo();
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+
+ return data;
+}
+
+static void dspram_store_tag(unsigned int offset, unsigned int data)
+{
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ write_c0_dtaglo(data);
+ ehb();
+ cache_op(Index_Store_Tag_D, CKSEG0 | offset);
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+}
+
+
+static unsigned int dspram_load_tag(unsigned int offset)
+{
+ unsigned int data;
+ unsigned int errctl;
+
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ cache_op(Index_Load_Tag_D, CKSEG0 | offset);
+ ehb();
+ data = read_c0_dtaglo();
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+
+ return data;
+}
+
+static void probe_spram(char *type,
+ unsigned int base,
+ unsigned int (*read)(unsigned int),
+ void (*write)(unsigned int, unsigned int))
+{
+ unsigned int firstsize = 0, lastsize = 0;
+ unsigned int firstpa = 0, lastpa = 0, pa = 0;
+ unsigned int offset = 0;
+ unsigned int size, tag0, tag1;
+ unsigned int enabled;
+ int i;
+
+ /*
+ * The limit is arbitrary but avoids the loop running away if
+ * the SPRAM tags are implemented differently
+ */
+
+ for (i = 0; i < 8; i++) {
+ tag0 = read(offset);
+ tag1 = read(offset+SPRAM_TAG_STRIDE);
+ pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n",
+ type, i, tag0, tag1);
+
+ size = tag1 & SPRAM_TAG1_SIZE_MASK;
+
+ if (size == 0)
+ break;
+
+ if (i != 0) {
+ /* tags may repeat... */
+ if ((pa == firstpa && size == firstsize) ||
+ (pa == lastpa && size == lastsize))
+ break;
+ }
+
+ /* Align base with size */
+ base = (base + size - 1) & ~(size-1);
+
+ /* reprogram the base address base address and enable */
+ tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE;
+ write(offset, tag0);
+
+ base += size;
+
+ /* reread the tag */
+ tag0 = read(offset);
+ pa = tag0 & SPRAM_TAG0_PA_MASK;
+ enabled = tag0 & SPRAM_TAG0_ENABLE;
+
+ if (i == 0) {
+ firstpa = pa;
+ firstsize = size;
+ }
+
+ lastpa = pa;
+ lastsize = size;
+
+ if (strcmp(type, "DSPRAM") == 0) {
+ unsigned int *vp = (unsigned int *)(CKSEG1 | pa);
+ unsigned int v;
+#define TDAT 0x5a5aa5a5
+ vp[0] = TDAT;
+ vp[1] = ~TDAT;
+
+ mb();
+
+ v = vp[0];
+ if (v != TDAT)
+ printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
+ vp, TDAT, v);
+ v = vp[1];
+ if (v != ~TDAT)
+ printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
+ vp+1, ~TDAT, v);
+ }
+
+ pr_info("%s%d: PA=%08x,Size=%08x%s\n",
+ type, i, pa, size, enabled ? ",enabled" : "");
+ offset += 2 * SPRAM_TAG_STRIDE;
+ }
+}
+void spram_config(void)
+{
+ unsigned int config0;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ config0 = read_c0_config();
+ /* FIXME: addresses are Malta specific */
+ if (config0 & (1<<24)) {
+ probe_spram("ISPRAM", 0x1c000000,
+ &ispram_load_tag, &ispram_store_tag);
+ }
+ if (config0 & (1<<23))
+ probe_spram("DSPRAM", 0x1c100000,
+ &dspram_load_tag, &dspram_store_tag);
+ }
+}
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c
index ebd9db8d1ec..1ba775d24d3 100644
--- a/arch/mips/kernel/stacktrace.c
+++ b/arch/mips/kernel/stacktrace.c
@@ -1,12 +1,11 @@
/*
- * arch/mips/kernel/stacktrace.c
- *
* Stack trace management functions
*
* Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
+#include <linux/export.h>
#include <asm/stacktrace.h>
/*
@@ -31,7 +30,8 @@ static void save_raw_context_stack(struct stack_trace *trace,
}
}
-static void save_context_stack(struct stack_trace *trace, struct pt_regs *regs)
+static void save_context_stack(struct stack_trace *trace,
+ struct task_struct *tsk, struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
@@ -40,7 +40,7 @@ static void save_context_stack(struct stack_trace *trace, struct pt_regs *regs)
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
- (unsigned long)task_stack_page(current);
+ (unsigned long)task_stack_page(tsk);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
save_raw_context_stack(trace, sp);
@@ -53,7 +53,7 @@ static void save_context_stack(struct stack_trace *trace, struct pt_regs *regs)
trace->entries[trace->nr_entries++] = pc;
if (trace->nr_entries >= trace->max_entries)
break;
- pc = unwind_stack(current, &sp, pc, &ra);
+ pc = unwind_stack(tsk, &sp, pc, &ra);
} while (pc);
#else
save_raw_context_stack(trace, sp);
@@ -65,11 +65,23 @@ static void save_context_stack(struct stack_trace *trace, struct pt_regs *regs)
*/
void save_stack_trace(struct stack_trace *trace)
{
+ save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
struct pt_regs dummyregs;
struct pt_regs *regs = &dummyregs;
WARN_ON(trace->nr_entries || !trace->max_entries);
- prepare_frametrace(regs);
- save_context_stack(trace, regs);
+ if (tsk != current) {
+ regs->regs[29] = tsk->thread.reg29;
+ regs->regs[31] = 0;
+ regs->cp0_epc = tsk->thread.reg31;
+ } else
+ prepare_frametrace(regs);
+ save_context_stack(trace, tsk, regs);
}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
new file mode 100644
index 00000000000..2242bdd4370
--- /dev/null
+++ b/arch/mips/kernel/sync-r4k.c
@@ -0,0 +1,132 @@
+/*
+ * Count register synchronisation.
+ *
+ * All CPUs will have their count registers synchronised to the CPU0 next time
+ * value. This can cause a small timewarp for CPU0. All other CPU's should
+ * not have done anything significant (but they may have had interrupts
+ * enabled briefly - prom_smp_finish() should not be responsible for enabling
+ * interrupts...)
+ */
+
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+#include <linux/cpumask.h>
+
+#include <asm/r4k-timer.h>
+#include <linux/atomic.h>
+#include <asm/barrier.h>
+#include <asm/mipsregs.h>
+
+static atomic_t count_start_flag = ATOMIC_INIT(0);
+static atomic_t count_count_start = ATOMIC_INIT(0);
+static atomic_t count_count_stop = ATOMIC_INIT(0);
+static atomic_t count_reference = ATOMIC_INIT(0);
+
+#define COUNTON 100
+#define NR_LOOPS 5
+
+void synchronise_count_master(int cpu)
+{
+ int i;
+ unsigned long flags;
+ unsigned int initcount;
+
+ printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
+
+ local_irq_save(flags);
+
+ /*
+ * Notify the slaves that it's time to start
+ */
+ atomic_set(&count_reference, read_c0_count());
+ atomic_set(&count_start_flag, cpu);
+ smp_wmb();
+
+ /* Count will be initialised to current timer for all CPU's */
+ initcount = read_c0_count();
+
+ /*
+ * We loop a few times to get a primed instruction cache,
+ * then the last pass is more or less synchronised and
+ * the master and slaves each set their cycle counters to a known
+ * value all at once. This reduces the chance of having random offsets
+ * between the processors, and guarantees that the maximum
+ * delay between the cycle counters is never bigger than
+ * the latency of information-passing (cachelines) between
+ * two CPUs.
+ */
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ /* slaves loop on '!= 2' */
+ while (atomic_read(&count_count_start) != 1)
+ mb();
+ atomic_set(&count_count_stop, 0);
+ smp_wmb();
+
+ /* this lets the slaves write their count register */
+ atomic_inc(&count_count_start);
+
+ /*
+ * Everyone initialises count in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+ /*
+ * Wait for all slaves to leave the synchronization point:
+ */
+ while (atomic_read(&count_count_stop) != 1)
+ mb();
+ atomic_set(&count_count_start, 0);
+ smp_wmb();
+ atomic_inc(&count_count_stop);
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+ atomic_set(&count_start_flag, 0);
+
+ local_irq_restore(flags);
+
+ /*
+ * i386 code reported the skew here, but the
+ * count registers were almost certainly out of sync
+ * so no point in alarming people
+ */
+ printk("done.\n");
+}
+
+void synchronise_count_slave(int cpu)
+{
+ int i;
+ unsigned int initcount;
+
+ /*
+ * Not every cpu is online at the time this gets called,
+ * so we first wait for the master to say everyone is ready
+ */
+
+ while (atomic_read(&count_start_flag) != cpu)
+ mb();
+
+ /* Count will be initialised to next expire for all CPU's */
+ initcount = atomic_read(&count_reference);
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ atomic_inc(&count_count_start);
+ while (atomic_read(&count_count_start) != 2)
+ mb();
+
+ /*
+ * Everyone initialises count in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+ atomic_inc(&count_count_stop);
+ while (atomic_read(&count_count_stop) != 2)
+ mb();
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+}
+#undef NR_LOOPS
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index af1bdc89748..4a4f9dda565 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -7,29 +7,27 @@
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
-#include <linux/a.out.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/linkage.h>
-#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
-#include <linux/mman.h>
#include <linux/ptrace.h>
-#include <linux/sched.h>
#include <linux/string.h>
#include <linux/syscalls.h>
#include <linux/file.h>
-#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/unistd.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/compiler.h>
-#include <linux/module.h>
#include <linux/ipc.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/elf.h>
+#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
@@ -39,116 +37,28 @@
#include <asm/shmparam.h>
#include <asm/sysmips.h>
#include <asm/uaccess.h>
+#include <asm/switch_to.h>
-asmlinkage int sys_pipe(nabi_no_regargs volatile struct pt_regs regs)
+/*
+ * For historic reasons the pipe(2) syscall on MIPS has an unusual calling
+ * convention. It returns results in registers $v0 / $v1 which means there
+ * is no need for it to do verify the validity of a userspace pointer
+ * argument. Historically that used to be expensive in Linux. These days
+ * the performance advantage is negligible.
+ */
+asmlinkage int sysm_pipe(void)
{
int fd[2];
- int error, res;
-
- error = do_pipe(fd);
- if (error) {
- res = error;
- goto out;
- }
- regs.regs[3] = fd[1];
- res = fd[0];
-out:
- return res;
-}
-
-unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
-
-EXPORT_SYMBOL(shm_align_mask);
-
-#define COLOUR_ALIGN(addr,pgoff) \
- ((((addr) + shm_align_mask) & ~shm_align_mask) + \
- (((pgoff) << PAGE_SHIFT) & shm_align_mask))
-
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct vm_area_struct * vmm;
- int do_color_align;
- unsigned long task_size;
-
- task_size = STACK_TOP;
-
- if (len > task_size)
- return -ENOMEM;
-
- if (flags & MAP_FIXED) {
- /* Even MAP_FIXED mappings must reside within task_size. */
- if (task_size - len < addr)
- return -EINVAL;
-
- /*
- * We do not accept a shared mapping if it would violate
- * cache aliasing constraints.
- */
- if ((flags & MAP_SHARED) && (addr & shm_align_mask))
- return -EINVAL;
- return addr;
- }
-
- do_color_align = 0;
- if (filp || (flags & MAP_SHARED))
- do_color_align = 1;
- if (addr) {
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
- vmm = find_vma(current->mm, addr);
- if (task_size - len >= addr &&
- (!vmm || addr + len <= vmm->vm_start))
- return addr;
- }
- addr = TASK_UNMAPPED_BASE;
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
-
- for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
- /* At this point: (!vmm || addr < vmm->vm_end). */
- if (task_size - len < addr)
- return -ENOMEM;
- if (!vmm || addr + len <= vmm->vm_start)
- return addr;
- addr = vmm->vm_end;
- if (do_color_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- }
-}
-
-/* common code for old and new mmaps */
-static inline unsigned long
-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, unsigned long pgoff)
-{
- unsigned long error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
+ int error = do_pipe_flags(fd, 0);
+ if (error)
+ return error;
+ current_pt_regs()->regs[3] = fd[1];
+ return fd[0];
}
-asmlinkage unsigned long
-old_mmap(unsigned long addr, unsigned long len, int prot,
- int flags, int fd, off_t offset)
+SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long,
+ fd, off_t, offset)
{
unsigned long result;
@@ -156,140 +66,142 @@ old_mmap(unsigned long addr, unsigned long len, int prot,
if (offset & ~PAGE_MASK)
goto out;
- result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
out:
return result;
}
-asmlinkage unsigned long
-sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, unsigned long pgoff)
+SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long, fd,
+ unsigned long, pgoff)
{
if (pgoff & (~PAGE_MASK >> 12))
return -EINVAL;
- return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
}
save_static_function(sys_fork);
-static int __used noinline
-_sys_fork(nabi_no_regargs struct pt_regs regs)
-{
- return do_fork(SIGCHLD, regs.regs[29], &regs, 0, NULL, NULL);
-}
-
save_static_function(sys_clone);
-static int __used noinline
-_sys_clone(nabi_no_regargs struct pt_regs regs)
-{
- unsigned long clone_flags;
- unsigned long newsp;
- int __user *parent_tidptr, *child_tidptr;
-
- clone_flags = regs.regs[4];
- newsp = regs.regs[5];
- if (!newsp)
- newsp = regs.regs[29];
- parent_tidptr = (int __user *) regs.regs[6];
-#ifdef CONFIG_32BIT
- /* We need to fetch the fifth argument off the stack. */
- child_tidptr = NULL;
- if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) {
- int __user *__user *usp = (int __user *__user *) regs.regs[29];
- if (regs.regs[2] == __NR_syscall) {
- if (get_user (child_tidptr, &usp[5]))
- return -EFAULT;
- }
- else if (get_user (child_tidptr, &usp[4]))
- return -EFAULT;
- }
-#else
- child_tidptr = (int __user *) regs.regs[8];
-#endif
- return do_fork(clone_flags, newsp, &regs, 0,
- parent_tidptr, child_tidptr);
-}
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs)
+SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
{
- int error;
- char * filename;
+ struct thread_info *ti = task_thread_info(current);
- filename = getname((char __user *) (long)regs.regs[4]);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename, (char __user *__user *) (long)regs.regs[5],
- (char __user *__user *) (long)regs.regs[6], &regs);
- putname(filename);
+ ti->tp_value = addr;
+ if (cpu_has_userlocal)
+ write_c0_userlocal(addr);
-out:
- return error;
+ return 0;
}
-/*
- * Compacrapability ...
- */
-asmlinkage int sys_uname(struct old_utsname __user * name)
+static inline int mips_atomic_set(unsigned long addr, unsigned long new)
{
- if (name && !copy_to_user(name, utsname(), sizeof (*name)))
- return 0;
- return -EFAULT;
-}
+ unsigned long old, tmp;
+ struct pt_regs *regs;
+ unsigned int err;
-/*
- * Compacrapability ...
- */
-asmlinkage int sys_olduname(struct oldold_utsname __user * name)
-{
- int error;
+ if (unlikely(addr & 3))
+ return -EINVAL;
- if (!name)
- return -EFAULT;
- if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
- return -EFAULT;
+ if (unlikely(!access_ok(VERIFY_WRITE, addr, 4)))
+ return -EINVAL;
- error = __copy_to_user(&name->sysname, &utsname()->sysname,
- __OLD_UTS_LEN);
- error -= __put_user(0, name->sysname + __OLD_UTS_LEN);
- error -= __copy_to_user(&name->nodename, &utsname()->nodename,
- __OLD_UTS_LEN);
- error -= __put_user(0, name->nodename + __OLD_UTS_LEN);
- error -= __copy_to_user(&name->release, &utsname()->release,
- __OLD_UTS_LEN);
- error -= __put_user(0, name->release + __OLD_UTS_LEN);
- error -= __copy_to_user(&name->version, &utsname()->version,
- __OLD_UTS_LEN);
- error -= __put_user(0, name->version + __OLD_UTS_LEN);
- error -= __copy_to_user(&name->machine, &utsname()->machine,
- __OLD_UTS_LEN);
- error = __put_user(0, name->machine + __OLD_UTS_LEN);
- error = error ? -EFAULT : 0;
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__ (
+ " .set arch=r4000 \n"
+ " li %[err], 0 \n"
+ "1: ll %[old], (%[addr]) \n"
+ " move %[tmp], %[new] \n"
+ "2: sc %[tmp], (%[addr]) \n"
+ " beqzl %[tmp], 1b \n"
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ "4: li %[err], %[efault] \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "STR(PTR)" 1b, 4b \n"
+ " "STR(PTR)" 2b, 4b \n"
+ " .previous \n"
+ " .set mips0 \n"
+ : [old] "=&r" (old),
+ [err] "=&r" (err),
+ [tmp] "=&r" (tmp)
+ : [addr] "r" (addr),
+ [new] "r" (new),
+ [efault] "i" (-EFAULT)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__ (
+ " .set arch=r4000 \n"
+ " li %[err], 0 \n"
+ "1: ll %[old], (%[addr]) \n"
+ " move %[tmp], %[new] \n"
+ "2: sc %[tmp], (%[addr]) \n"
+ " bnez %[tmp], 4f \n"
+ "3: \n"
+ " .subsection 2 \n"
+ "4: b 1b \n"
+ " .previous \n"
+ " \n"
+ " .section .fixup,\"ax\" \n"
+ "5: li %[err], %[efault] \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "STR(PTR)" 1b, 5b \n"
+ " "STR(PTR)" 2b, 5b \n"
+ " .previous \n"
+ " .set mips0 \n"
+ : [old] "=&r" (old),
+ [err] "=&r" (err),
+ [tmp] "=&r" (tmp)
+ : [addr] "r" (addr),
+ [new] "r" (new),
+ [efault] "i" (-EFAULT)
+ : "memory");
+ } else {
+ do {
+ preempt_disable();
+ ll_bit = 1;
+ ll_task = current;
+ preempt_enable();
+
+ err = __get_user(old, (unsigned int *) addr);
+ err |= __put_user(new, (unsigned int *) addr);
+ if (err)
+ break;
+ rmb();
+ } while (!ll_bit);
+ }
- return error;
-}
+ if (unlikely(err))
+ return err;
-asmlinkage int sys_set_thread_area(unsigned long addr)
-{
- struct thread_info *ti = task_thread_info(current);
+ regs = current_pt_regs();
+ regs->regs[2] = old;
+ regs->regs[7] = 0; /* No error */
- ti->tp_value = addr;
- if (cpu_has_userlocal)
- write_c0_userlocal(addr);
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ " move $29, %0 \n"
+ " j syscall_exit \n"
+ : /* no outputs */
+ : "r" (regs));
- return 0;
+ /* unreached. Honestly. */
+ unreachable();
}
-asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
+SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
{
switch (cmd) {
case MIPS_ATOMIC_SET:
- printk(KERN_CRIT "How did I get here?\n");
- return -EINVAL;
+ return mips_atomic_set(arg1, arg2);
case MIPS_FIXADE:
if (arg1 & ~3)
@@ -302,7 +214,7 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
if (arg1 & 2)
set_thread_flag(TIF_LOGADE);
else
- clear_thread_flag(TIF_FIXADE);
+ clear_thread_flag(TIF_LOGADE);
return 0;
@@ -315,97 +227,9 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
}
/*
- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
- *
- * This is really horribly ugly.
- */
-asmlinkage int sys_ipc(unsigned int call, int first, int second,
- unsigned long third, void __user *ptr, long fifth)
-{
- int version, ret;
-
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
-
- switch (call) {
- case SEMOP:
- return sys_semtimedop(first, (struct sembuf __user *)ptr,
- second, NULL);
- case SEMTIMEDOP:
- return sys_semtimedop(first, (struct sembuf __user *)ptr,
- second,
- (const struct timespec __user *)fifth);
- case SEMGET:
- return sys_semget(first, second, third);
- case SEMCTL: {
- union semun fourth;
- if (!ptr)
- return -EINVAL;
- if (get_user(fourth.__pad, (void __user *__user *) ptr))
- return -EFAULT;
- return sys_semctl(first, second, third, fourth);
- }
-
- case MSGSND:
- return sys_msgsnd(first, (struct msgbuf __user *) ptr,
- second, third);
- case MSGRCV:
- switch (version) {
- case 0: {
- struct ipc_kludge tmp;
- if (!ptr)
- return -EINVAL;
-
- if (copy_from_user(&tmp,
- (struct ipc_kludge __user *) ptr,
- sizeof(tmp)))
- return -EFAULT;
- return sys_msgrcv(first, tmp.msgp, second,
- tmp.msgtyp, third);
- }
- default:
- return sys_msgrcv(first,
- (struct msgbuf __user *) ptr,
- second, fifth, third);
- }
- case MSGGET:
- return sys_msgget((key_t) first, second);
- case MSGCTL:
- return sys_msgctl(first, second,
- (struct msqid_ds __user *) ptr);
-
- case SHMAT:
- switch (version) {
- default: {
- unsigned long raddr;
- ret = do_shmat(first, (char __user *) ptr, second,
- &raddr);
- if (ret)
- return ret;
- return put_user(raddr, (unsigned long __user *) third);
- }
- case 1: /* iBCS2 emulator entry point */
- if (!segment_eq(get_fs(), get_ds()))
- return -EINVAL;
- return do_shmat(first, (char __user *) ptr, second,
- (unsigned long *) third);
- }
- case SHMDT:
- return sys_shmdt((char __user *)ptr);
- case SHMGET:
- return sys_shmget(first, second, third);
- case SHMCTL:
- return sys_shmctl(first, second,
- (struct shmid_ds __user *) ptr);
- default:
- return -ENOSYS;
- }
-}
-
-/*
* No implemented yet ...
*/
-asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
+SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
{
return -ENOSYS;
}
@@ -418,32 +242,3 @@ asmlinkage void bad_stack(void)
{
do_exit(SIGSEGV);
}
-
-/*
- * Do a system call from kernel instead of calling sys_execve so we
- * end up with proper pt_regs.
- */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
-{
- register unsigned long __a0 asm("$4") = (unsigned long) filename;
- register unsigned long __a1 asm("$5") = (unsigned long) argv;
- register unsigned long __a2 asm("$6") = (unsigned long) envp;
- register unsigned long __a3 asm("$7");
- unsigned long __v0;
-
- __asm__ volatile (" \n"
- " .set noreorder \n"
- " li $2, %5 # __NR_execve \n"
- " syscall \n"
- " move %0, $2 \n"
- " .set reorder \n"
- : "=&r" (__v0), "=r" (__a3)
- : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve)
- : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24",
- "memory");
-
- if (__a3 == 0)
- return __v0;
-
- return -__v0;
-}
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
deleted file mode 100644
index c357762b801..00000000000
--- a/arch/mips/kernel/sysirix.c
+++ /dev/null
@@ -1,2140 +0,0 @@
-/*
- * sysirix.c: IRIX system call emulation.
- *
- * Copyright (C) 1996 David S. Miller
- * Copyright (C) 1997 Miguel de Icaza
- * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle
- */
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/binfmts.h>
-#include <linux/capability.h>
-#include <linux/highuid.h>
-#include <linux/pagemap.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-#include <linux/errno.h>
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/times.h>
-#include <linux/elf.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/utsname.h>
-#include <linux/file.h>
-#include <linux/vfs.h>
-#include <linux/namei.h>
-#include <linux/socket.h>
-#include <linux/security.h>
-#include <linux/syscalls.h>
-#include <linux/resource.h>
-
-#include <asm/ptrace.h>
-#include <asm/page.h>
-#include <asm/uaccess.h>
-#include <asm/inventory.h>
-
-/* 2,191 lines of complete and utter shit coming up... */
-
-extern int max_threads;
-
-/* The sysmp commands supported thus far. */
-#define MP_NPROCS 1 /* # processor in complex */
-#define MP_NAPROCS 2 /* # active processors in complex */
-#define MP_PGSIZE 14 /* Return system page size in v1. */
-
-asmlinkage int irix_sysmp(struct pt_regs *regs)
-{
- unsigned long cmd;
- int base = 0;
- int error = 0;
-
- if(regs->regs[2] == 1000)
- base = 1;
- cmd = regs->regs[base + 4];
- switch(cmd) {
- case MP_PGSIZE:
- error = PAGE_SIZE;
- break;
- case MP_NPROCS:
- case MP_NAPROCS:
- error = num_online_cpus();
- break;
- default:
- printk("SYSMP[%s:%d]: Unsupported opcode %d\n",
- current->comm, current->pid, (int)cmd);
- error = -EINVAL;
- break;
- }
-
- return error;
-}
-
-/* The prctl commands. */
-#define PR_MAXPROCS 1 /* Tasks/user. */
-#define PR_ISBLOCKED 2 /* If blocked, return 1. */
-#define PR_SETSTACKSIZE 3 /* Set largest task stack size. */
-#define PR_GETSTACKSIZE 4 /* Get largest task stack size. */
-#define PR_MAXPPROCS 5 /* Num parallel tasks. */
-#define PR_UNBLKONEXEC 6 /* When task exec/exit's, unblock. */
-#define PR_SETEXITSIG 8 /* When task exit's, set signal. */
-#define PR_RESIDENT 9 /* Make task unswappable. */
-#define PR_ATTACHADDR 10 /* (Re-)Connect a vma to a task. */
-#define PR_DETACHADDR 11 /* Disconnect a vma from a task. */
-#define PR_TERMCHILD 12 /* Kill child if the parent dies. */
-#define PR_GETSHMASK 13 /* Get the sproc() share mask. */
-#define PR_GETNSHARE 14 /* Number of share group members. */
-#define PR_COREPID 15 /* Add task pid to name when it core. */
-#define PR_ATTACHADDRPERM 16 /* (Re-)Connect vma, with specified prot. */
-#define PR_PTHREADEXIT 17 /* Kill a pthread, only for IRIX 6.[234] */
-
-asmlinkage int irix_prctl(unsigned option, ...)
-{
- va_list args;
- int error = 0;
-
- va_start(args, option);
- switch (option) {
- case PR_MAXPROCS:
- printk("irix_prctl[%s:%d]: Wants PR_MAXPROCS\n",
- current->comm, current->pid);
- error = max_threads;
- break;
-
- case PR_ISBLOCKED: {
- struct task_struct *task;
-
- printk("irix_prctl[%s:%d]: Wants PR_ISBLOCKED\n",
- current->comm, current->pid);
- read_lock(&tasklist_lock);
- task = find_task_by_vpid(va_arg(args, pid_t));
- error = -ESRCH;
- if (error)
- error = (task->run_list.next != NULL);
- read_unlock(&tasklist_lock);
- /* Can _your_ OS find this out that fast? */
- break;
- }
-
- case PR_SETSTACKSIZE: {
- long value = va_arg(args, long);
-
- printk("irix_prctl[%s:%d]: Wants PR_SETSTACKSIZE<%08lx>\n",
- current->comm, current->pid, (unsigned long) value);
- if (value > RLIM_INFINITY)
- value = RLIM_INFINITY;
- if (capable(CAP_SYS_ADMIN)) {
- task_lock(current->group_leader);
- current->signal->rlim[RLIMIT_STACK].rlim_max =
- current->signal->rlim[RLIMIT_STACK].rlim_cur = value;
- task_unlock(current->group_leader);
- error = value;
- break;
- }
- task_lock(current->group_leader);
- if (value > current->signal->rlim[RLIMIT_STACK].rlim_max) {
- error = -EINVAL;
- task_unlock(current->group_leader);
- break;
- }
- current->signal->rlim[RLIMIT_STACK].rlim_cur = value;
- task_unlock(current->group_leader);
- error = value;
- break;
- }
-
- case PR_GETSTACKSIZE:
- printk("irix_prctl[%s:%d]: Wants PR_GETSTACKSIZE\n",
- current->comm, current->pid);
- error = current->signal->rlim[RLIMIT_STACK].rlim_cur;
- break;
-
- case PR_MAXPPROCS:
- printk("irix_prctl[%s:%d]: Wants PR_MAXPROCS\n",
- current->comm, current->pid);
- error = 1;
- break;
-
- case PR_UNBLKONEXEC:
- printk("irix_prctl[%s:%d]: Wants PR_UNBLKONEXEC\n",
- current->comm, current->pid);
- error = -EINVAL;
- break;
-
- case PR_SETEXITSIG:
- printk("irix_prctl[%s:%d]: Wants PR_SETEXITSIG\n",
- current->comm, current->pid);
-
- /* We can probably play some game where we set the task
- * exit_code to some non-zero value when this is requested,
- * and check whether exit_code is already set in do_exit().
- */
- error = -EINVAL;
- break;
-
- case PR_RESIDENT:
- printk("irix_prctl[%s:%d]: Wants PR_RESIDENT\n",
- current->comm, current->pid);
- error = 0; /* Compatibility indeed. */
- break;
-
- case PR_ATTACHADDR:
- printk("irix_prctl[%s:%d]: Wants PR_ATTACHADDR\n",
- current->comm, current->pid);
- error = -EINVAL;
- break;
-
- case PR_DETACHADDR:
- printk("irix_prctl[%s:%d]: Wants PR_DETACHADDR\n",
- current->comm, current->pid);
- error = -EINVAL;
- break;
-
- case PR_TERMCHILD:
- printk("irix_prctl[%s:%d]: Wants PR_TERMCHILD\n",
- current->comm, current->pid);
- error = -EINVAL;
- break;
-
- case PR_GETSHMASK:
- printk("irix_prctl[%s:%d]: Wants PR_GETSHMASK\n",
- current->comm, current->pid);
- error = -EINVAL; /* Until I have the sproc() stuff in. */
- break;
-
- case PR_GETNSHARE:
- error = 0; /* Until I have the sproc() stuff in. */
- break;
-
- case PR_COREPID:
- printk("irix_prctl[%s:%d]: Wants PR_COREPID\n",
- current->comm, current->pid);
- error = -EINVAL;
- break;
-
- case PR_ATTACHADDRPERM:
- printk("irix_prctl[%s:%d]: Wants PR_ATTACHADDRPERM\n",
- current->comm, current->pid);
- error = -EINVAL;
- break;
-
- default:
- printk("irix_prctl[%s:%d]: Non-existant opcode %d\n",
- current->comm, current->pid, option);
- error = -EINVAL;
- break;
- }
- va_end(args);
-
- return error;
-}
-
-#undef DEBUG_PROCGRPS
-
-extern unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt);
-extern char *prom_getenv(char *name);
-extern long prom_setenv(char *name, char *value);
-
-/* The syssgi commands supported thus far. */
-#define SGI_SYSID 1 /* Return unique per-machine identifier. */
-#define SGI_INVENT 5 /* Fetch inventory */
-# define SGI_INV_SIZEOF 1
-# define SGI_INV_READ 2
-#define SGI_RDNAME 6 /* Return string name of a process. */
-#define SGI_SETNVRAM 8 /* Set PROM variable. */
-#define SGI_GETNVRAM 9 /* Get PROM variable. */
-#define SGI_SETPGID 21 /* Set process group id. */
-#define SGI_SYSCONF 22 /* POSIX sysconf garbage. */
-#define SGI_PATHCONF 24 /* POSIX sysconf garbage. */
-#define SGI_SETGROUPS 40 /* POSIX sysconf garbage. */
-#define SGI_GETGROUPS 41 /* POSIX sysconf garbage. */
-#define SGI_RUSAGE 56 /* BSD style rusage(). */
-#define SGI_SSYNC 62 /* Synchronous fs sync. */
-#define SGI_GETSID 65 /* SysVr4 get session id. */
-#define SGI_ELFMAP 68 /* Map an elf image. */
-#define SGI_TOSSTSAVE 108 /* Toss saved vma's. */
-#define SGI_FP_BCOPY 129 /* Should FPU bcopy be used on this machine? */
-#define SGI_PHYSP 1011 /* Translate virtual into physical page. */
-
-asmlinkage int irix_syssgi(struct pt_regs *regs)
-{
- unsigned long cmd;
- int retval, base = 0;
-
- if (regs->regs[2] == 1000)
- base = 1;
-
- cmd = regs->regs[base + 4];
- switch(cmd) {
- case SGI_SYSID: {
- char __user *buf = (char __user *) regs->regs[base + 5];
-
- /* XXX Use ethernet addr.... */
- retval = clear_user(buf, 64) ? -EFAULT : 0;
- break;
- }
-#if 0
- case SGI_RDNAME: {
- int pid = (int) regs->regs[base + 5];
- char __user *buf = (char __user *) regs->regs[base + 6];
- struct task_struct *p;
- char tcomm[sizeof(current->comm)];
-
- read_lock(&tasklist_lock);
- p = find_task_by_pid(pid);
- if (!p) {
- read_unlock(&tasklist_lock);
- retval = -ESRCH;
- break;
- }
- get_task_comm(tcomm, p);
- read_unlock(&tasklist_lock);
-
- /* XXX Need to check sizes. */
- retval = copy_to_user(buf, tcomm, sizeof(tcomm)) ? -EFAULT : 0;
- break;
- }
-
- case SGI_GETNVRAM: {
- char __user *name = (char __user *) regs->regs[base+5];
- char __user *buf = (char __user *) regs->regs[base+6];
- char *value;
- return -EINVAL; /* til I fix it */
- value = prom_getenv(name); /* PROM lock? */
- if (!value) {
- retval = -EINVAL;
- break;
- }
- /* Do I strlen() for the length? */
- retval = copy_to_user(buf, value, 128) ? -EFAULT : 0;
- break;
- }
-
- case SGI_SETNVRAM: {
- char __user *name = (char __user *) regs->regs[base+5];
- char __user *value = (char __user *) regs->regs[base+6];
- return -EINVAL; /* til I fix it */
- retval = prom_setenv(name, value);
- /* XXX make sure retval conforms to syssgi(2) */
- printk("[%s:%d] setnvram(\"%s\", \"%s\"): retval %d",
- current->comm, current->pid, name, value, retval);
-/* if (retval == PROM_ENOENT)
- retval = -ENOENT; */
- break;
- }
-#endif
-
- case SGI_SETPGID: {
-#ifdef DEBUG_PROCGRPS
- printk("[%s:%d] setpgid(%d, %d) ",
- current->comm, current->pid,
- (int) regs->regs[base + 5], (int)regs->regs[base + 6]);
-#endif
- retval = sys_setpgid(regs->regs[base + 5], regs->regs[base + 6]);
-
-#ifdef DEBUG_PROCGRPS
- printk("retval=%d\n", retval);
-#endif
- }
-
- case SGI_SYSCONF: {
- switch(regs->regs[base + 5]) {
- case 1:
- retval = (MAX_ARG_PAGES >> 4); /* XXX estimate... */
- goto out;
- case 2:
- retval = max_threads;
- goto out;
- case 3:
- retval = HZ;
- goto out;
- case 4:
- retval = NGROUPS_MAX;
- goto out;
- case 5:
- retval = sysctl_nr_open;
- goto out;
- case 6:
- retval = 1;
- goto out;
- case 7:
- retval = 1;
- goto out;
- case 8:
- retval = 199009;
- goto out;
- case 11:
- retval = PAGE_SIZE;
- goto out;
- case 12:
- retval = 4;
- goto out;
- case 25:
- case 26:
- case 27:
- case 28:
- case 29:
- case 30:
- retval = 0;
- goto out;
- case 31:
- retval = 32;
- goto out;
- default:
- retval = -EINVAL;
- goto out;
- };
- }
-
- case SGI_SETGROUPS:
- retval = sys_setgroups((int) regs->regs[base + 5],
- (gid_t __user *) regs->regs[base + 6]);
- break;
-
- case SGI_GETGROUPS:
- retval = sys_getgroups((int) regs->regs[base + 5],
- (gid_t __user *) regs->regs[base + 6]);
- break;
-
- case SGI_RUSAGE: {
- struct rusage __user *ru = (struct rusage __user *) regs->regs[base + 6];
-
- switch((int) regs->regs[base + 5]) {
- case 0:
- /* rusage self */
- retval = getrusage(current, RUSAGE_SELF, ru);
- goto out;
-
- case -1:
- /* rusage children */
- retval = getrusage(current, RUSAGE_CHILDREN, ru);
- goto out;
-
- default:
- retval = -EINVAL;
- goto out;
- };
- }
-
- case SGI_SSYNC:
- sys_sync();
- retval = 0;
- break;
-
- case SGI_GETSID:
-#ifdef DEBUG_PROCGRPS
- printk("[%s:%d] getsid(%d) ", current->comm, current->pid,
- (int) regs->regs[base + 5]);
-#endif
- retval = sys_getsid(regs->regs[base + 5]);
-#ifdef DEBUG_PROCGRPS
- printk("retval=%d\n", retval);
-#endif
- break;
-
- case SGI_ELFMAP:
- retval = irix_mapelf((int) regs->regs[base + 5],
- (struct elf_phdr __user *) regs->regs[base + 6],
- (int) regs->regs[base + 7]);
- break;
-
- case SGI_TOSSTSAVE:
- /* XXX We don't need to do anything? */
- retval = 0;
- break;
-
- case SGI_FP_BCOPY:
- retval = 0;
- break;
-
- case SGI_PHYSP: {
- unsigned long addr = regs->regs[base + 5];
- int __user *pageno = (int __user *) (regs->regs[base + 6]);
- struct mm_struct *mm = current->mm;
- pgd_t *pgdp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- down_read(&mm->mmap_sem);
- pgdp = pgd_offset(mm, addr);
- pudp = pud_offset(pgdp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset(pmdp, addr);
- retval = -EINVAL;
- if (ptep) {
- pte_t pte = *ptep;
-
- if (pte_val(pte) & (_PAGE_VALID | _PAGE_PRESENT)) {
- /* b0rked on 64-bit */
- retval = put_user((pte_val(pte) & PAGE_MASK) >>
- PAGE_SHIFT, pageno);
- }
- }
- up_read(&mm->mmap_sem);
- break;
- }
-
- case SGI_INVENT: {
- int arg1 = (int) regs->regs [base + 5];
- void __user *buffer = (void __user *) regs->regs [base + 6];
- int count = (int) regs->regs [base + 7];
-
- switch (arg1) {
- case SGI_INV_SIZEOF:
- retval = sizeof(inventory_t);
- break;
- case SGI_INV_READ:
- retval = dump_inventory_to_user(buffer, count);
- break;
- default:
- retval = -EINVAL;
- }
- break;
- }
-
- default:
- printk("irix_syssgi: Unsupported command %d\n", (int)cmd);
- retval = -EINVAL;
- break;
- };
-
-out:
- return retval;
-}
-
-asmlinkage int irix_gtime(struct pt_regs *regs)
-{
- return get_seconds();
-}
-
-/*
- * IRIX is completely broken... it returns 0 on success, otherwise
- * ENOMEM.
- */
-asmlinkage int irix_brk(unsigned long brk)
-{
- unsigned long rlim;
- unsigned long newbrk, oldbrk;
- struct mm_struct *mm = current->mm;
- int ret;
-
- down_write(&mm->mmap_sem);
- if (brk < mm->end_code) {
- ret = -ENOMEM;
- goto out;
- }
-
- newbrk = PAGE_ALIGN(brk);
- oldbrk = PAGE_ALIGN(mm->brk);
- if (oldbrk == newbrk) {
- mm->brk = brk;
- ret = 0;
- goto out;
- }
-
- /*
- * Always allow shrinking brk
- */
- if (brk <= mm->brk) {
- mm->brk = brk;
- do_munmap(mm, newbrk, oldbrk-newbrk);
- ret = 0;
- goto out;
- }
- /*
- * Check against rlimit and stack..
- */
- rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
- if (rlim >= RLIM_INFINITY)
- rlim = ~0;
- if (brk - mm->end_code > rlim) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Check against existing mmap mappings.
- */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Ok, looks good - let it rip.
- */
- if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) {
- ret = -ENOMEM;
- goto out;
- }
- mm->brk = brk;
- ret = 0;
-
-out:
- up_write(&mm->mmap_sem);
- return ret;
-}
-
-asmlinkage int irix_getpid(struct pt_regs *regs)
-{
- regs->regs[3] = task_pid_vnr(current->real_parent);
- return task_pid_vnr(current);
-}
-
-asmlinkage int irix_getuid(struct pt_regs *regs)
-{
- regs->regs[3] = current->euid;
- return current->uid;
-}
-
-asmlinkage int irix_getgid(struct pt_regs *regs)
-{
- regs->regs[3] = current->egid;
- return current->gid;
-}
-
-asmlinkage int irix_stime(int value)
-{
- int err;
- struct timespec tv;
-
- tv.tv_sec = value;
- tv.tv_nsec = 0;
- err = security_settime(&tv, NULL);
- if (err)
- return err;
-
- write_seqlock_irq(&xtime_lock);
- xtime.tv_sec = value;
- xtime.tv_nsec = 0;
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
-
- return 0;
-}
-
-static inline void jiffiestotv(unsigned long jiffies, struct timeval *value)
-{
- value->tv_usec = (jiffies % HZ) * (1000000 / HZ);
- value->tv_sec = jiffies / HZ;
-}
-
-static inline void getitimer_real(struct itimerval *value)
-{
- register unsigned long val, interval;
-
- interval = current->it_real_incr;
- val = 0;
- if (del_timer(&current->real_timer)) {
- unsigned long now = jiffies;
- val = current->real_timer.expires;
- add_timer(&current->real_timer);
- /* look out for negative/zero itimer.. */
- if (val <= now)
- val = now+1;
- val -= now;
- }
- jiffiestotv(val, &value->it_value);
- jiffiestotv(interval, &value->it_interval);
-}
-
-asmlinkage unsigned int irix_alarm(unsigned int seconds)
-{
- return alarm_setitimer(seconds);
-}
-
-asmlinkage int irix_pause(void)
-{
- current->state = TASK_INTERRUPTIBLE;
- schedule();
-
- return -EINTR;
-}
-
-/* XXX need more than this... */
-asmlinkage int irix_mount(char __user *dev_name, char __user *dir_name,
- unsigned long flags, char __user *type, void __user *data, int datalen)
-{
- printk("[%s:%d] irix_mount(%p,%p,%08lx,%p,%p,%d)\n",
- current->comm, current->pid,
- dev_name, dir_name, flags, type, data, datalen);
-
- return sys_mount(dev_name, dir_name, type, flags, data);
-}
-
-struct irix_statfs {
- short f_type;
- long f_bsize, f_frsize, f_blocks, f_bfree, f_files, f_ffree;
- char f_fname[6], f_fpack[6];
-};
-
-asmlinkage int irix_statfs(const char __user *path,
- struct irix_statfs __user *buf, int len, int fs_type)
-{
- struct nameidata nd;
- struct kstatfs kbuf;
- int error, i;
-
- /* We don't support this feature yet. */
- if (fs_type) {
- error = -EINVAL;
- goto out;
- }
- if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statfs))) {
- error = -EFAULT;
- goto out;
- }
-
- error = user_path_walk(path, &nd);
- if (error)
- goto out;
-
- error = vfs_statfs(nd.path.dentry, &kbuf);
- if (error)
- goto dput_and_out;
-
- error = __put_user(kbuf.f_type, &buf->f_type);
- error |= __put_user(kbuf.f_bsize, &buf->f_bsize);
- error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
- error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
- error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
- error |= __put_user(kbuf.f_files, &buf->f_files);
- error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
- for (i = 0; i < 6; i++) {
- error |= __put_user(0, &buf->f_fname[i]);
- error |= __put_user(0, &buf->f_fpack[i]);
- }
-
-dput_and_out:
- path_put(&nd.path);
-out:
- return error;
-}
-
-asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs __user *buf)
-{
- struct kstatfs kbuf;
- struct file *file;
- int error, i;
-
- if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statfs))) {
- error = -EFAULT;
- goto out;
- }
-
- if (!(file = fget(fd))) {
- error = -EBADF;
- goto out;
- }
-
- error = vfs_statfs(file->f_path.dentry, &kbuf);
- if (error)
- goto out_f;
-
- error = __put_user(kbuf.f_type, &buf->f_type);
- error |= __put_user(kbuf.f_bsize, &buf->f_bsize);
- error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
- error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
- error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
- error |= __put_user(kbuf.f_files, &buf->f_files);
- error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
-
- for (i = 0; i < 6; i++) {
- error |= __put_user(0, &buf->f_fname[i]);
- error |= __put_user(0, &buf->f_fpack[i]);
- }
-
-out_f:
- fput(file);
-out:
- return error;
-}
-
-asmlinkage int irix_setpgrp(int flags)
-{
- int error;
-
-#ifdef DEBUG_PROCGRPS
- printk("[%s:%d] setpgrp(%d) ", current->comm, current->pid, flags);
-#endif
- if(!flags)
- error = task_pgrp_vnr(current);
- else
- error = sys_setsid();
-#ifdef DEBUG_PROCGRPS
- printk("returning %d\n", error);
-#endif
-
- return error;
-}
-
-asmlinkage int irix_times(struct tms __user *tbuf)
-{
- int err = 0;
-
- if (tbuf) {
- if (!access_ok(VERIFY_WRITE, tbuf, sizeof *tbuf))
- return -EFAULT;
-
- err = __put_user(current->utime, &tbuf->tms_utime);
- err |= __put_user(current->stime, &tbuf->tms_stime);
- err |= __put_user(current->signal->cutime, &tbuf->tms_cutime);
- err |= __put_user(current->signal->cstime, &tbuf->tms_cstime);
- }
-
- return err;
-}
-
-asmlinkage int irix_exec(struct pt_regs *regs)
-{
- int error, base = 0;
- char *filename;
-
- if(regs->regs[2] == 1000)
- base = 1;
- filename = getname((char __user *) (long)regs->regs[base + 4]);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- return error;
-
- error = do_execve(filename, (char __user * __user *) (long)regs->regs[base + 5],
- NULL, regs);
- putname(filename);
-
- return error;
-}
-
-asmlinkage int irix_exece(struct pt_regs *regs)
-{
- int error, base = 0;
- char *filename;
-
- if (regs->regs[2] == 1000)
- base = 1;
- filename = getname((char __user *) (long)regs->regs[base + 4]);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- return error;
- error = do_execve(filename, (char __user * __user *) (long)regs->regs[base + 5],
- (char __user * __user *) (long)regs->regs[base + 6], regs);
- putname(filename);
-
- return error;
-}
-
-asmlinkage unsigned long irix_gethostid(void)
-{
- printk("[%s:%d]: irix_gethostid() called...\n",
- current->comm, current->pid);
-
- return -EINVAL;
-}
-
-asmlinkage unsigned long irix_sethostid(unsigned long val)
-{
- printk("[%s:%d]: irix_sethostid(%08lx) called...\n",
- current->comm, current->pid, val);
-
- return -EINVAL;
-}
-
-asmlinkage int irix_socket(int family, int type, int protocol)
-{
- switch(type) {
- case 1:
- type = SOCK_DGRAM;
- break;
-
- case 2:
- type = SOCK_STREAM;
- break;
-
- case 3:
- type = 9; /* Invalid... */
- break;
-
- case 4:
- type = SOCK_RAW;
- break;
-
- case 5:
- type = SOCK_RDM;
- break;
-
- case 6:
- type = SOCK_SEQPACKET;
- break;
-
- default:
- break;
- }
-
- return sys_socket(family, type, protocol);
-}
-
-asmlinkage int irix_getdomainname(char __user *name, int len)
-{
- int err;
-
- down_read(&uts_sem);
- if (len > __NEW_UTS_LEN)
- len = __NEW_UTS_LEN;
- err = copy_to_user(name, utsname()->domainname, len) ? -EFAULT : 0;
- up_read(&uts_sem);
-
- return err;
-}
-
-asmlinkage unsigned long irix_getpagesize(void)
-{
- return PAGE_SIZE;
-}
-
-asmlinkage int irix_msgsys(int opcode, unsigned long arg0, unsigned long arg1,
- unsigned long arg2, unsigned long arg3,
- unsigned long arg4)
-{
- switch (opcode) {
- case 0:
- return sys_msgget((key_t) arg0, (int) arg1);
- case 1:
- return sys_msgctl((int) arg0, (int) arg1,
- (struct msqid_ds __user *)arg2);
- case 2:
- return sys_msgrcv((int) arg0, (struct msgbuf __user *) arg1,
- (size_t) arg2, (long) arg3, (int) arg4);
- case 3:
- return sys_msgsnd((int) arg0, (struct msgbuf __user *) arg1,
- (size_t) arg2, (int) arg3);
- default:
- return -EINVAL;
- }
-}
-
-asmlinkage int irix_shmsys(int opcode, unsigned long arg0, unsigned long arg1,
- unsigned long arg2, unsigned long arg3)
-{
- switch (opcode) {
- case 0:
- return do_shmat((int) arg0, (char __user *) arg1, (int) arg2,
- (unsigned long *) arg3);
- case 1:
- return sys_shmctl((int)arg0, (int)arg1,
- (struct shmid_ds __user *)arg2);
- case 2:
- return sys_shmdt((char __user *)arg0);
- case 3:
- return sys_shmget((key_t) arg0, (int) arg1, (int) arg2);
- default:
- return -EINVAL;
- }
-}
-
-asmlinkage int irix_semsys(int opcode, unsigned long arg0, unsigned long arg1,
- unsigned long arg2, int arg3)
-{
- switch (opcode) {
- case 0:
- return sys_semctl((int) arg0, (int) arg1, (int) arg2,
- (union semun) arg3);
- case 1:
- return sys_semget((key_t) arg0, (int) arg1, (int) arg2);
- case 2:
- return sys_semop((int) arg0, (struct sembuf __user *)arg1,
- (unsigned int) arg2);
- default:
- return -EINVAL;
- }
-}
-
-static inline loff_t llseek(struct file *file, loff_t offset, int origin)
-{
- loff_t (*fn)(struct file *, loff_t, int);
- loff_t retval;
-
- fn = default_llseek;
- if (file->f_op && file->f_op->llseek)
- fn = file->f_op->llseek;
- lock_kernel();
- retval = fn(file, offset, origin);
- unlock_kernel();
-
- return retval;
-}
-
-asmlinkage int irix_lseek64(int fd, int _unused, int offhi, int offlow,
- int origin)
-{
- struct file * file;
- loff_t offset;
- int retval;
-
- retval = -EBADF;
- file = fget(fd);
- if (!file)
- goto bad;
- retval = -EINVAL;
- if (origin > 2)
- goto out_putf;
-
- offset = llseek(file, ((loff_t) offhi << 32) | offlow, origin);
- retval = (int) offset;
-
-out_putf:
- fput(file);
-bad:
- return retval;
-}
-
-asmlinkage int irix_sginap(int ticks)
-{
- schedule_timeout_interruptible(ticks);
- return 0;
-}
-
-asmlinkage int irix_sgikopt(char __user *istring, char __user *ostring, int len)
-{
- return -EINVAL;
-}
-
-asmlinkage int irix_gettimeofday(struct timeval __user *tv)
-{
- time_t sec;
- long nsec, seq;
- int err;
-
- if (!access_ok(VERIFY_WRITE, tv, sizeof(struct timeval)))
- return -EFAULT;
-
- do {
- seq = read_seqbegin(&xtime_lock);
- sec = xtime.tv_sec;
- nsec = xtime.tv_nsec;
- } while (read_seqretry(&xtime_lock, seq));
-
- err = __put_user(sec, &tv->tv_sec);
- err |= __put_user((nsec / 1000), &tv->tv_usec);
-
- return err;
-}
-
-#define IRIX_MAP_AUTOGROW 0x40
-
-asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot,
- int flags, int fd, off_t offset)
-{
- struct file *file = NULL;
- unsigned long retval;
-
- if (!(flags & MAP_ANONYMOUS)) {
- if (!(file = fget(fd)))
- return -EBADF;
-
- /* Ok, bad taste hack follows, try to think in something else
- * when reading this. */
- if (flags & IRIX_MAP_AUTOGROW) {
- unsigned long old_pos;
- long max_size = offset + len;
-
- if (max_size > file->f_path.dentry->d_inode->i_size) {
- old_pos = sys_lseek(fd, max_size - 1, 0);
- sys_write(fd, (void __user *) "", 1);
- sys_lseek(fd, old_pos, 0);
- }
- }
- }
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(&current->mm->mmap_sem);
- retval = do_mmap(file, addr, len, prot, flags, offset);
- up_write(&current->mm->mmap_sem);
- if (file)
- fput(file);
-
- return retval;
-}
-
-asmlinkage int irix_madvise(unsigned long addr, int len, int behavior)
-{
- printk("[%s:%d] Wheee.. irix_madvise(%08lx,%d,%d)\n",
- current->comm, current->pid, addr, len, behavior);
-
- return -EINVAL;
-}
-
-asmlinkage int irix_pagelock(char __user *addr, int len, int op)
-{
- printk("[%s:%d] Wheee.. irix_pagelock(%p,%d,%d)\n",
- current->comm, current->pid, addr, len, op);
-
- return -EINVAL;
-}
-
-asmlinkage int irix_quotactl(struct pt_regs *regs)
-{
- printk("[%s:%d] Wheee.. irix_quotactl()\n",
- current->comm, current->pid);
-
- return -EINVAL;
-}
-
-asmlinkage int irix_BSDsetpgrp(int pid, int pgrp)
-{
- int error;
-
-#ifdef DEBUG_PROCGRPS
- printk("[%s:%d] BSDsetpgrp(%d, %d) ", current->comm, current->pid,
- pid, pgrp);
-#endif
- if(!pid)
- pid = task_pid_vnr(current);
-
- /* Wheee, weird sysv thing... */
- if ((pgrp == 0) && (pid == task_pid_vnr(current)))
- error = sys_setsid();
- else
- error = sys_setpgid(pid, pgrp);
-
-#ifdef DEBUG_PROCGRPS
- printk("error = %d\n", error);
-#endif
-
- return error;
-}
-
-asmlinkage int irix_systeminfo(int cmd, char __user *buf, int cnt)
-{
- printk("[%s:%d] Wheee.. irix_systeminfo(%d,%p,%d)\n",
- current->comm, current->pid, cmd, buf, cnt);
-
- return -EINVAL;
-}
-
-struct iuname {
- char sysname[257], nodename[257], release[257];
- char version[257], machine[257];
- char m_type[257], base_rel[257];
- char _unused0[257], _unused1[257], _unused2[257];
- char _unused3[257], _unused4[257], _unused5[257];
-};
-
-asmlinkage int irix_uname(struct iuname __user *buf)
-{
- down_read(&uts_sem);
- if (copy_from_user(utsname()->sysname, buf->sysname, 65)
- || copy_from_user(utsname()->nodename, buf->nodename, 65)
- || copy_from_user(utsname()->release, buf->release, 65)
- || copy_from_user(utsname()->version, buf->version, 65)
- || copy_from_user(utsname()->machine, buf->machine, 65)) {
- return -EFAULT;
- }
- up_read(&uts_sem);
-
- return 1;
-}
-
-#undef DEBUG_XSTAT
-
-static int irix_xstat32_xlate(struct kstat *stat, void __user *ubuf)
-{
- struct xstat32 {
- u32 st_dev, st_pad1[3], st_ino, st_mode, st_nlink, st_uid, st_gid;
- u32 st_rdev, st_pad2[2], st_size, st_pad3;
- u32 st_atime0, st_atime1;
- u32 st_mtime0, st_mtime1;
- u32 st_ctime0, st_ctime1;
- u32 st_blksize, st_blocks;
- char st_fstype[16];
- u32 st_pad4[8];
- } ub;
-
- if (!sysv_valid_dev(stat->dev) || !sysv_valid_dev(stat->rdev))
- return -EOVERFLOW;
- ub.st_dev = sysv_encode_dev(stat->dev);
- ub.st_ino = stat->ino;
- ub.st_mode = stat->mode;
- ub.st_nlink = stat->nlink;
- SET_UID(ub.st_uid, stat->uid);
- SET_GID(ub.st_gid, stat->gid);
- ub.st_rdev = sysv_encode_dev(stat->rdev);
-#if BITS_PER_LONG == 32
- if (stat->size > MAX_NON_LFS)
- return -EOVERFLOW;
-#endif
- ub.st_size = stat->size;
- ub.st_atime0 = stat->atime.tv_sec;
- ub.st_atime1 = stat->atime.tv_nsec;
- ub.st_mtime0 = stat->mtime.tv_sec;
- ub.st_mtime1 = stat->atime.tv_nsec;
- ub.st_ctime0 = stat->ctime.tv_sec;
- ub.st_ctime1 = stat->atime.tv_nsec;
- ub.st_blksize = stat->blksize;
- ub.st_blocks = stat->blocks;
- strcpy(ub.st_fstype, "efs");
-
- return copy_to_user(ubuf, &ub, sizeof(ub)) ? -EFAULT : 0;
-}
-
-static int irix_xstat64_xlate(struct kstat *stat, void __user *ubuf)
-{
- struct xstat64 {
- u32 st_dev; s32 st_pad1[3];
- unsigned long long st_ino;
- u32 st_mode;
- u32 st_nlink; s32 st_uid; s32 st_gid; u32 st_rdev;
- s32 st_pad2[2];
- long long st_size;
- s32 st_pad3;
- struct { s32 tv_sec, tv_nsec; } st_atime, st_mtime, st_ctime;
- s32 st_blksize;
- long long st_blocks;
- char st_fstype[16];
- s32 st_pad4[8];
- } ks;
-
- if (!sysv_valid_dev(stat->dev) || !sysv_valid_dev(stat->rdev))
- return -EOVERFLOW;
-
- ks.st_dev = sysv_encode_dev(stat->dev);
- ks.st_pad1[0] = ks.st_pad1[1] = ks.st_pad1[2] = 0;
- ks.st_ino = (unsigned long long) stat->ino;
- ks.st_mode = (u32) stat->mode;
- ks.st_nlink = (u32) stat->nlink;
- ks.st_uid = (s32) stat->uid;
- ks.st_gid = (s32) stat->gid;
- ks.st_rdev = sysv_encode_dev(stat->rdev);
- ks.st_pad2[0] = ks.st_pad2[1] = 0;
- ks.st_size = (long long) stat->size;
- ks.st_pad3 = 0;
-
- /* XXX hackety hack... */
- ks.st_atime.tv_sec = (s32) stat->atime.tv_sec;
- ks.st_atime.tv_nsec = stat->atime.tv_nsec;
- ks.st_mtime.tv_sec = (s32) stat->mtime.tv_sec;
- ks.st_mtime.tv_nsec = stat->mtime.tv_nsec;
- ks.st_ctime.tv_sec = (s32) stat->ctime.tv_sec;
- ks.st_ctime.tv_nsec = stat->ctime.tv_nsec;
-
- ks.st_blksize = (s32) stat->blksize;
- ks.st_blocks = (long long) stat->blocks;
- memset(ks.st_fstype, 0, 16);
- ks.st_pad4[0] = ks.st_pad4[1] = ks.st_pad4[2] = ks.st_pad4[3] = 0;
- ks.st_pad4[4] = ks.st_pad4[5] = ks.st_pad4[6] = ks.st_pad4[7] = 0;
-
- /* Now write it all back. */
- return copy_to_user(ubuf, &ks, sizeof(ks)) ? -EFAULT : 0;
-}
-
-asmlinkage int irix_xstat(int version, char __user *filename, struct stat __user *statbuf)
-{
- int retval;
- struct kstat stat;
-
-#ifdef DEBUG_XSTAT
- printk("[%s:%d] Wheee.. irix_xstat(%d,%s,%p) ",
- current->comm, current->pid, version, filename, statbuf);
-#endif
-
- retval = vfs_stat(filename, &stat);
- if (!retval) {
- switch(version) {
- case 2:
- retval = irix_xstat32_xlate(&stat, statbuf);
- break;
- case 3:
- retval = irix_xstat64_xlate(&stat, statbuf);
- break;
- default:
- retval = -EINVAL;
- }
- }
- return retval;
-}
-
-asmlinkage int irix_lxstat(int version, char __user *filename, struct stat __user *statbuf)
-{
- int error;
- struct kstat stat;
-
-#ifdef DEBUG_XSTAT
- printk("[%s:%d] Wheee.. irix_lxstat(%d,%s,%p) ",
- current->comm, current->pid, version, filename, statbuf);
-#endif
-
- error = vfs_lstat(filename, &stat);
-
- if (!error) {
- switch (version) {
- case 2:
- error = irix_xstat32_xlate(&stat, statbuf);
- break;
- case 3:
- error = irix_xstat64_xlate(&stat, statbuf);
- break;
- default:
- error = -EINVAL;
- }
- }
- return error;
-}
-
-asmlinkage int irix_fxstat(int version, int fd, struct stat __user *statbuf)
-{
- int error;
- struct kstat stat;
-
-#ifdef DEBUG_XSTAT
- printk("[%s:%d] Wheee.. irix_fxstat(%d,%d,%p) ",
- current->comm, current->pid, version, fd, statbuf);
-#endif
-
- error = vfs_fstat(fd, &stat);
- if (!error) {
- switch (version) {
- case 2:
- error = irix_xstat32_xlate(&stat, statbuf);
- break;
- case 3:
- error = irix_xstat64_xlate(&stat, statbuf);
- break;
- default:
- error = -EINVAL;
- }
- }
- return error;
-}
-
-asmlinkage int irix_xmknod(int ver, char __user *filename, int mode, unsigned dev)
-{
- int retval;
- printk("[%s:%d] Wheee.. irix_xmknod(%d,%s,%x,%x)\n",
- current->comm, current->pid, ver, filename, mode, dev);
-
- switch(ver) {
- case 2:
- /* shouldn't we convert here as well as on stat()? */
- retval = sys_mknod(filename, mode, dev);
- break;
-
- default:
- retval = -EINVAL;
- break;
- };
-
- return retval;
-}
-
-asmlinkage int irix_swapctl(int cmd, char __user *arg)
-{
- printk("[%s:%d] Wheee.. irix_swapctl(%d,%p)\n",
- current->comm, current->pid, cmd, arg);
-
- return -EINVAL;
-}
-
-struct irix_statvfs {
- u32 f_bsize; u32 f_frsize; u32 f_blocks;
- u32 f_bfree; u32 f_bavail; u32 f_files; u32 f_ffree; u32 f_favail;
- u32 f_fsid; char f_basetype[16];
- u32 f_flag; u32 f_namemax;
- char f_fstr[32]; u32 f_filler[16];
-};
-
-asmlinkage int irix_statvfs(char __user *fname, struct irix_statvfs __user *buf)
-{
- struct nameidata nd;
- struct kstatfs kbuf;
- int error, i;
-
- printk("[%s:%d] Wheee.. irix_statvfs(%s,%p)\n",
- current->comm, current->pid, fname, buf);
- if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs)))
- return -EFAULT;
-
- error = user_path_walk(fname, &nd);
- if (error)
- goto out;
- error = vfs_statfs(nd.path.dentry, &kbuf);
- if (error)
- goto dput_and_out;
-
- error |= __put_user(kbuf.f_bsize, &buf->f_bsize);
- error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
- error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
- error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
- error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
- error |= __put_user(kbuf.f_files, &buf->f_files);
- error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
- error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
-#ifdef __MIPSEB__
- error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
-#else
- error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
-#endif
- for (i = 0; i < 16; i++)
- error |= __put_user(0, &buf->f_basetype[i]);
- error |= __put_user(0, &buf->f_flag);
- error |= __put_user(kbuf.f_namelen, &buf->f_namemax);
- for (i = 0; i < 32; i++)
- error |= __put_user(0, &buf->f_fstr[i]);
-
-dput_and_out:
- path_put(&nd.path);
-out:
- return error;
-}
-
-asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs __user *buf)
-{
- struct kstatfs kbuf;
- struct file *file;
- int error, i;
-
- printk("[%s:%d] Wheee.. irix_fstatvfs(%d,%p)\n",
- current->comm, current->pid, fd, buf);
-
- if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs)))
- return -EFAULT;
-
- if (!(file = fget(fd))) {
- error = -EBADF;
- goto out;
- }
- error = vfs_statfs(file->f_path.dentry, &kbuf);
- if (error)
- goto out_f;
-
- error = __put_user(kbuf.f_bsize, &buf->f_bsize);
- error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
- error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
- error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
- error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
- error |= __put_user(kbuf.f_files, &buf->f_files);
- error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
- error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
-#ifdef __MIPSEB__
- error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
-#else
- error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
-#endif
- for(i = 0; i < 16; i++)
- error |= __put_user(0, &buf->f_basetype[i]);
- error |= __put_user(0, &buf->f_flag);
- error |= __put_user(kbuf.f_namelen, &buf->f_namemax);
- error |= __clear_user(&buf->f_fstr, sizeof(buf->f_fstr)) ? -EFAULT : 0;
-
-out_f:
- fput(file);
-out:
- return error;
-}
-
-asmlinkage int irix_priocntl(struct pt_regs *regs)
-{
- printk("[%s:%d] Wheee.. irix_priocntl()\n",
- current->comm, current->pid);
-
- return -EINVAL;
-}
-
-asmlinkage int irix_sigqueue(int pid, int sig, int code, int val)
-{
- printk("[%s:%d] Wheee.. irix_sigqueue(%d,%d,%d,%d)\n",
- current->comm, current->pid, pid, sig, code, val);
-
- return -EINVAL;
-}
-
-asmlinkage int irix_truncate64(char __user *name, int pad, int size1, int size2)
-{
- int retval;
-
- if (size1) {
- retval = -EINVAL;
- goto out;
- }
- retval = sys_truncate(name, size2);
-
-out:
- return retval;
-}
-
-asmlinkage int irix_ftruncate64(int fd, int pad, int size1, int size2)
-{
- int retval;
-
- if (size1) {
- retval = -EINVAL;
- goto out;
- }
- retval = sys_ftruncate(fd, size2);
-
-out:
- return retval;
-}
-
-asmlinkage int irix_mmap64(struct pt_regs *regs)
-{
- int len, prot, flags, fd, off1, off2, error, base = 0;
- unsigned long addr, pgoff, *sp;
- struct file *file = NULL;
- int err;
-
- if (regs->regs[2] == 1000)
- base = 1;
- sp = (unsigned long *) (regs->regs[29] + 16);
- addr = regs->regs[base + 4];
- len = regs->regs[base + 5];
- prot = regs->regs[base + 6];
- if (!base) {
- flags = regs->regs[base + 7];
- if (!access_ok(VERIFY_READ, sp, (4 * sizeof(unsigned long))))
- return -EFAULT;
- fd = sp[0];
- err = __get_user(off1, &sp[1]);
- err |= __get_user(off2, &sp[2]);
- } else {
- if (!access_ok(VERIFY_READ, sp, (5 * sizeof(unsigned long))))
- return -EFAULT;
- err = __get_user(flags, &sp[0]);
- err |= __get_user(fd, &sp[1]);
- err |= __get_user(off1, &sp[2]);
- err |= __get_user(off2, &sp[3]);
- }
-
- if (err)
- return err;
-
- if (off1 & PAGE_MASK)
- return -EOVERFLOW;
-
- pgoff = (off1 << (32 - PAGE_SHIFT)) | (off2 >> PAGE_SHIFT);
-
- if (!(flags & MAP_ANONYMOUS)) {
- if (!(file = fget(fd)))
- return -EBADF;
-
- /* Ok, bad taste hack follows, try to think in something else
- when reading this */
- if (flags & IRIX_MAP_AUTOGROW) {
- unsigned long old_pos;
- long max_size = off2 + len;
-
- if (max_size > file->f_path.dentry->d_inode->i_size) {
- old_pos = sys_lseek(fd, max_size - 1, 0);
- sys_write(fd, (void __user *) "", 1);
- sys_lseek(fd, old_pos, 0);
- }
- }
- }
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-
- return error;
-}
-
-asmlinkage int irix_dmi(struct pt_regs *regs)
-{
- printk("[%s:%d] Wheee.. irix_dmi()\n",
- current->comm, current->pid);
-
- return -EINVAL;
-}
-
-asmlinkage int irix_pread(int fd, char __user *buf, int cnt, int off64,
- int off1, int off2)
-{
- printk("[%s:%d] Wheee.. irix_pread(%d,%p,%d,%d,%d,%d)\n",
- current->comm, current->pid, fd, buf, cnt, off64, off1, off2);
-
- return -EINVAL;
-}
-
-asmlinkage int irix_pwrite(int fd, char __user *buf, int cnt, int off64,
- int off1, int off2)
-{
- printk("[%s:%d] Wheee.. irix_pwrite(%d,%p,%d,%d,%d,%d)\n",
- current->comm, current->pid, fd, buf, cnt, off64, off1, off2);
-
- return -EINVAL;
-}
-
-asmlinkage int irix_sgifastpath(int cmd, unsigned long arg0, unsigned long arg1,
- unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5)
-{
- printk("[%s:%d] Wheee.. irix_fastpath(%d,%08lx,%08lx,%08lx,%08lx,"
- "%08lx,%08lx)\n",
- current->comm, current->pid, cmd, arg0, arg1, arg2,
- arg3, arg4, arg5);
-
- return -EINVAL;
-}
-
-struct irix_statvfs64 {
- u32 f_bsize; u32 f_frsize;
- u64 f_blocks; u64 f_bfree; u64 f_bavail;
- u64 f_files; u64 f_ffree; u64 f_favail;
- u32 f_fsid;
- char f_basetype[16];
- u32 f_flag; u32 f_namemax;
- char f_fstr[32];
- u32 f_filler[16];
-};
-
-asmlinkage int irix_statvfs64(char __user *fname, struct irix_statvfs64 __user *buf)
-{
- struct nameidata nd;
- struct kstatfs kbuf;
- int error, i;
-
- printk("[%s:%d] Wheee.. irix_statvfs64(%s,%p)\n",
- current->comm, current->pid, fname, buf);
- if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs64))) {
- error = -EFAULT;
- goto out;
- }
-
- error = user_path_walk(fname, &nd);
- if (error)
- goto out;
- error = vfs_statfs(nd.path.dentry, &kbuf);
- if (error)
- goto dput_and_out;
-
- error = __put_user(kbuf.f_bsize, &buf->f_bsize);
- error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
- error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
- error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
- error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
- error |= __put_user(kbuf.f_files, &buf->f_files);
- error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
- error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
-#ifdef __MIPSEB__
- error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
-#else
- error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
-#endif
- for(i = 0; i < 16; i++)
- error |= __put_user(0, &buf->f_basetype[i]);
- error |= __put_user(0, &buf->f_flag);
- error |= __put_user(kbuf.f_namelen, &buf->f_namemax);
- for(i = 0; i < 32; i++)
- error |= __put_user(0, &buf->f_fstr[i]);
-
-dput_and_out:
- path_put(&nd.path);
-out:
- return error;
-}
-
-asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs __user *buf)
-{
- struct kstatfs kbuf;
- struct file *file;
- int error, i;
-
- printk("[%s:%d] Wheee.. irix_fstatvfs64(%d,%p)\n",
- current->comm, current->pid, fd, buf);
-
- if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs))) {
- error = -EFAULT;
- goto out;
- }
- if (!(file = fget(fd))) {
- error = -EBADF;
- goto out;
- }
- error = vfs_statfs(file->f_path.dentry, &kbuf);
- if (error)
- goto out_f;
-
- error = __put_user(kbuf.f_bsize, &buf->f_bsize);
- error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
- error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
- error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
- error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
- error |= __put_user(kbuf.f_files, &buf->f_files);
- error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
- error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
-#ifdef __MIPSEB__
- error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
-#else
- error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
-#endif
- for(i = 0; i < 16; i++)
- error |= __put_user(0, &buf->f_basetype[i]);
- error |= __put_user(0, &buf->f_flag);
- error |= __put_user(kbuf.f_namelen, &buf->f_namemax);
- error |= __clear_user(buf->f_fstr, sizeof(buf->f_fstr[i])) ? -EFAULT : 0;
-
-out_f:
- fput(file);
-out:
- return error;
-}
-
-asmlinkage int irix_getmountid(char __user *fname, unsigned long __user *midbuf)
-{
- int err;
-
- printk("[%s:%d] irix_getmountid(%s, %p)\n",
- current->comm, current->pid, fname, midbuf);
- if (!access_ok(VERIFY_WRITE, midbuf, (sizeof(unsigned long) * 4)))
- return -EFAULT;
-
- /*
- * The idea with this system call is that when trying to determine
- * 'pwd' and it's a toss-up for some reason, userland can use the
- * fsid of the filesystem to try and make the right decision, but
- * we don't have this so for now. XXX
- */
- err = __put_user(0, &midbuf[0]);
- err |= __put_user(0, &midbuf[1]);
- err |= __put_user(0, &midbuf[2]);
- err |= __put_user(0, &midbuf[3]);
-
- return err;
-}
-
-asmlinkage int irix_nsproc(unsigned long entry, unsigned long mask,
- unsigned long arg, unsigned long sp, int slen)
-{
- printk("[%s:%d] Wheee.. irix_nsproc(%08lx,%08lx,%08lx,%08lx,%d)\n",
- current->comm, current->pid, entry, mask, arg, sp, slen);
-
- return -EINVAL;
-}
-
-#undef DEBUG_GETDENTS
-
-struct irix_dirent32 {
- u32 d_ino;
- u32 d_off;
- unsigned short d_reclen;
- char d_name[1];
-};
-
-struct irix_dirent32_callback {
- struct irix_dirent32 __user *current_dir;
- struct irix_dirent32 __user *previous;
- int count;
- int error;
-};
-
-#define NAME_OFFSET32(de) ((int) ((de)->d_name - (char *) (de)))
-#define ROUND_UP32(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
-
-static int irix_filldir32(void *__buf, const char *name,
- int namlen, loff_t offset, u64 ino, unsigned int d_type)
-{
- struct irix_dirent32 __user *dirent;
- struct irix_dirent32_callback *buf = __buf;
- unsigned short reclen = ROUND_UP32(NAME_OFFSET32(dirent) + namlen + 1);
- int err = 0;
- u32 d_ino;
-
-#ifdef DEBUG_GETDENTS
- printk("\nirix_filldir32[reclen<%d>namlen<%d>count<%d>]",
- reclen, namlen, buf->count);
-#endif
- buf->error = -EINVAL; /* only used if we fail.. */
- if (reclen > buf->count)
- return -EINVAL;
- d_ino = ino;
- if (sizeof(d_ino) < sizeof(ino) && d_ino != ino)
- return -EOVERFLOW;
- dirent = buf->previous;
- if (dirent)
- err = __put_user(offset, &dirent->d_off);
- dirent = buf->current_dir;
- err |= __put_user(dirent, &buf->previous);
- err |= __put_user(d_ino, &dirent->d_ino);
- err |= __put_user(reclen, &dirent->d_reclen);
- err |= copy_to_user((char __user *)dirent->d_name, name, namlen) ? -EFAULT : 0;
- err |= __put_user(0, &dirent->d_name[namlen]);
- dirent = (struct irix_dirent32 __user *) ((char __user *) dirent + reclen);
-
- buf->current_dir = dirent;
- buf->count -= reclen;
-
- return err;
-}
-
-asmlinkage int irix_ngetdents(unsigned int fd, void __user * dirent,
- unsigned int count, int __user *eob)
-{
- struct file *file;
- struct irix_dirent32 __user *lastdirent;
- struct irix_dirent32_callback buf;
- int error;
-
-#ifdef DEBUG_GETDENTS
- printk("[%s:%d] ngetdents(%d, %p, %d, %p) ", current->comm,
- current->pid, fd, dirent, count, eob);
-#endif
- error = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
-
- buf.current_dir = (struct irix_dirent32 __user *) dirent;
- buf.previous = NULL;
- buf.count = count;
- buf.error = 0;
-
- error = vfs_readdir(file, irix_filldir32, &buf);
- if (error < 0)
- goto out_putf;
-
- error = buf.error;
- lastdirent = buf.previous;
- if (lastdirent) {
- put_user(file->f_pos, &lastdirent->d_off);
- error = count - buf.count;
- }
-
- if (put_user(0, eob) < 0) {
- error = -EFAULT;
- goto out_putf;
- }
-
-#ifdef DEBUG_GETDENTS
- printk("eob=%d returning %d\n", *eob, count - buf.count);
-#endif
- error = count - buf.count;
-
-out_putf:
- fput(file);
-out:
- return error;
-}
-
-struct irix_dirent64 {
- u64 d_ino;
- u64 d_off;
- unsigned short d_reclen;
- char d_name[1];
-};
-
-struct irix_dirent64_callback {
- struct irix_dirent64 __user *curr;
- struct irix_dirent64 __user *previous;
- int count;
- int error;
-};
-
-#define NAME_OFFSET64(de) ((int) ((de)->d_name - (char *) (de)))
-#define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1))
-
-static int irix_filldir64(void *__buf, const char *name,
- int namlen, loff_t offset, u64 ino, unsigned int d_type)
-{
- struct irix_dirent64 __user *dirent;
- struct irix_dirent64_callback * buf = __buf;
- unsigned short reclen = ROUND_UP64(NAME_OFFSET64(dirent) + namlen + 1);
- int err = 0;
-
- if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
- return -EFAULT;
-
- if (__put_user(-EINVAL, &buf->error)) /* only used if we fail.. */
- return -EFAULT;
- if (reclen > buf->count)
- return -EINVAL;
- dirent = buf->previous;
- if (dirent)
- err = __put_user(offset, &dirent->d_off);
- dirent = buf->curr;
- buf->previous = dirent;
- err |= __put_user(ino, &dirent->d_ino);
- err |= __put_user(reclen, &dirent->d_reclen);
- err |= __copy_to_user((char __user *)dirent->d_name, name, namlen)
- ? -EFAULT : 0;
- err |= __put_user(0, &dirent->d_name[namlen]);
-
- dirent = (struct irix_dirent64 __user *) ((char __user *) dirent + reclen);
-
- buf->curr = dirent;
- buf->count -= reclen;
-
- return err;
-}
-
-asmlinkage int irix_getdents64(int fd, void __user *dirent, int cnt)
-{
- struct file *file;
- struct irix_dirent64 __user *lastdirent;
- struct irix_dirent64_callback buf;
- int error;
-
-#ifdef DEBUG_GETDENTS
- printk("[%s:%d] getdents64(%d, %p, %d) ", current->comm,
- current->pid, fd, dirent, cnt);
-#endif
- error = -EBADF;
- if (!(file = fget(fd)))
- goto out;
-
- error = -EFAULT;
- if (!access_ok(VERIFY_WRITE, dirent, cnt))
- goto out_f;
-
- error = -EINVAL;
- if (cnt < (sizeof(struct irix_dirent64) + 255))
- goto out_f;
-
- buf.curr = (struct irix_dirent64 __user *) dirent;
- buf.previous = NULL;
- buf.count = cnt;
- buf.error = 0;
- error = vfs_readdir(file, irix_filldir64, &buf);
- if (error < 0)
- goto out_f;
- lastdirent = buf.previous;
- if (!lastdirent) {
- error = buf.error;
- goto out_f;
- }
- if (put_user(file->f_pos, &lastdirent->d_off))
- return -EFAULT;
-#ifdef DEBUG_GETDENTS
- printk("returning %d\n", cnt - buf.count);
-#endif
- error = cnt - buf.count;
-
-out_f:
- fput(file);
-out:
- return error;
-}
-
-asmlinkage int irix_ngetdents64(int fd, void __user *dirent, int cnt, int *eob)
-{
- struct file *file;
- struct irix_dirent64 __user *lastdirent;
- struct irix_dirent64_callback buf;
- int error;
-
-#ifdef DEBUG_GETDENTS
- printk("[%s:%d] ngetdents64(%d, %p, %d) ", current->comm,
- current->pid, fd, dirent, cnt);
-#endif
- error = -EBADF;
- if (!(file = fget(fd)))
- goto out;
-
- error = -EFAULT;
- if (!access_ok(VERIFY_WRITE, dirent, cnt) ||
- !access_ok(VERIFY_WRITE, eob, sizeof(*eob)))
- goto out_f;
-
- error = -EINVAL;
- if (cnt < (sizeof(struct irix_dirent64) + 255))
- goto out_f;
-
- *eob = 0;
- buf.curr = (struct irix_dirent64 __user *) dirent;
- buf.previous = NULL;
- buf.count = cnt;
- buf.error = 0;
- error = vfs_readdir(file, irix_filldir64, &buf);
- if (error < 0)
- goto out_f;
- lastdirent = buf.previous;
- if (!lastdirent) {
- error = buf.error;
- goto out_f;
- }
- if (put_user(file->f_pos, &lastdirent->d_off))
- return -EFAULT;
-#ifdef DEBUG_GETDENTS
- printk("eob=%d returning %d\n", *eob, cnt - buf.count);
-#endif
- error = cnt - buf.count;
-
-out_f:
- fput(file);
-out:
- return error;
-}
-
-asmlinkage int irix_uadmin(unsigned long op, unsigned long func, unsigned long arg)
-{
- int retval;
-
- switch (op) {
- case 1:
- /* Reboot */
- printk("[%s:%d] irix_uadmin: Wants to reboot...\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-
- case 2:
- /* Shutdown */
- printk("[%s:%d] irix_uadmin: Wants to shutdown...\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-
- case 4:
- /* Remount-root */
- printk("[%s:%d] irix_uadmin: Wants to remount root...\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-
- case 8:
- /* Kill all tasks. */
- printk("[%s:%d] irix_uadmin: Wants to kill all tasks...\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-
- case 256:
- /* Set magic mushrooms... */
- printk("[%s:%d] irix_uadmin: Wants to set magic mushroom[%d]...\n",
- current->comm, current->pid, (int) func);
- retval = -EINVAL;
- goto out;
-
- default:
- printk("[%s:%d] irix_uadmin: Unknown operation [%d]...\n",
- current->comm, current->pid, (int) op);
- retval = -EINVAL;
- goto out;
- };
-
-out:
- return retval;
-}
-
-asmlinkage int irix_utssys(char __user *inbuf, int arg, int type, char __user *outbuf)
-{
- int retval;
-
- switch(type) {
- case 0:
- /* uname() */
- retval = irix_uname((struct iuname __user *)inbuf);
- goto out;
-
- case 2:
- /* ustat() */
- printk("[%s:%d] irix_utssys: Wants to do ustat()\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-
- case 3:
- /* fusers() */
- printk("[%s:%d] irix_utssys: Wants to do fusers()\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-
- default:
- printk("[%s:%d] irix_utssys: Wants to do unknown type[%d]\n",
- current->comm, current->pid, (int) type);
- retval = -EINVAL;
- goto out;
- }
-
-out:
- return retval;
-}
-
-#undef DEBUG_FCNTL
-
-#define IRIX_F_ALLOCSP 10
-
-asmlinkage int irix_fcntl(int fd, int cmd, int arg)
-{
- int retval;
-
-#ifdef DEBUG_FCNTL
- printk("[%s:%d] irix_fcntl(%d, %d, %d) ", current->comm,
- current->pid, fd, cmd, arg);
-#endif
- if (cmd == IRIX_F_ALLOCSP){
- return 0;
- }
- retval = sys_fcntl(fd, cmd, arg);
-#ifdef DEBUG_FCNTL
- printk("%d\n", retval);
-#endif
- return retval;
-}
-
-asmlinkage int irix_ulimit(int cmd, int arg)
-{
- int retval;
-
- switch(cmd) {
- case 1:
- printk("[%s:%d] irix_ulimit: Wants to get file size limit.\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-
- case 2:
- printk("[%s:%d] irix_ulimit: Wants to set file size limit.\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-
- case 3:
- printk("[%s:%d] irix_ulimit: Wants to get brk limit.\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-
- case 4:
-#if 0
- printk("[%s:%d] irix_ulimit: Wants to get fd limit.\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-#endif
- retval = current->signal->rlim[RLIMIT_NOFILE].rlim_cur;
- goto out;
-
- case 5:
- printk("[%s:%d] irix_ulimit: Wants to get txt offset.\n",
- current->comm, current->pid);
- retval = -EINVAL;
- goto out;
-
- default:
- printk("[%s:%d] irix_ulimit: Unknown command [%d].\n",
- current->comm, current->pid, cmd);
- retval = -EINVAL;
- goto out;
- }
-out:
- return retval;
-}
-
-asmlinkage int irix_unimp(struct pt_regs *regs)
-{
- printk("irix_unimp [%s:%d] v0=%d v1=%d a0=%08lx a1=%08lx a2=%08lx "
- "a3=%08lx\n", current->comm, current->pid,
- (int) regs->regs[2], (int) regs->regs[3],
- regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
-
- return -ENOSYS;
-}
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index b45a7093ca2..8d0170969e2 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -5,8 +5,8 @@
*
* Common time service routines for MIPS machines.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -21,11 +21,11 @@
#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
#include <asm/div64.h>
-#include <asm/smtc_ipi.h>
#include <asm/time.h>
/*
@@ -38,7 +38,6 @@ int __weak rtc_mips_set_time(unsigned long sec)
{
return 0;
}
-EXPORT_SYMBOL(rtc_mips_set_time);
int __weak rtc_mips_set_mmss(unsigned long nowtime)
{
@@ -50,13 +49,11 @@ int update_persistent_clock(struct timespec now)
return rtc_mips_set_mmss(now.tv_sec);
}
-int null_perf_irq(void)
+static int null_perf_irq(void)
{
return 0;
}
-EXPORT_SYMBOL(null_perf_irq);
-
int (*perf_irq)(void) = null_perf_irq;
EXPORT_SYMBOL(perf_irq);
@@ -65,8 +62,8 @@ EXPORT_SYMBOL(perf_irq);
* time_init() - it does the following things.
*
* 1) plat_time_init() -
- * a) (optional) set up RTC routines,
- * b) (optional) calibrate and set the mips_hpt_frequency
+ * a) (optional) set up RTC routines,
+ * b) (optional) calibrate and set the mips_hpt_frequency
* (only needed if you intended to use cpu counter as timer interrupt
* source)
* 2) calculate a couple of cached variables for later usage
@@ -74,44 +71,11 @@ EXPORT_SYMBOL(perf_irq);
unsigned int mips_hpt_frequency;
-void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
-{
- u64 temp;
- u32 shift;
-
- /* Find a shift value */
- for (shift = 32; shift > 0; shift--) {
- temp = (u64) NSEC_PER_SEC << shift;
- do_div(temp, clock);
- if ((temp >> 32) == 0)
- break;
- }
- cs->shift = shift;
- cs->mult = (u32) temp;
-}
-
-void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
- unsigned int clock)
-{
- u64 temp;
- u32 shift;
-
- /* Find a shift value */
- for (shift = 32; shift > 0; shift--) {
- temp = (u64) clock << shift;
- do_div(temp, NSEC_PER_SEC);
- if ((temp >> 32) == 0)
- break;
- }
- cd->shift = shift;
- cd->mult = (u32) temp;
-}
-
/*
* This function exists in order to cause an error due to a duplicate
* definition if platform code should have its own implementation. The hook
* to use instead is plat_time_init. plat_time_init does not receive the
- * irqaction pointer argument anymore. This is because any function which
+ * irqaction pointer argument anymore. This is because any function which
* initializes an interrupt timer now takes care of its own request_irq rsp.
* setup_irq calls and each clock_event_device should use its own
* struct irqrequest.
@@ -129,7 +93,7 @@ static __init int cpu_has_mfc0_count_bug(void)
case CPU_R4000MC:
/*
* V3.0 is documented as suffering from the mfc0 from count bug.
- * Afaik this is the last version of the R4000. Later versions
+ * Afaik this is the last version of the R4000. Later versions
* were marketed as R4400.
*/
return 1;
@@ -138,7 +102,7 @@ static __init int cpu_has_mfc0_count_bug(void)
case CPU_R4400SC:
case CPU_R4400MC:
/*
- * The published errata for the R4400 upto 3.0 say the CPU
+ * The published errata for the R4400 up to 3.0 say the CPU
* has the mfc0 from count bug.
*/
if ((current_cpu_data.processor_id & 0xff) <= 0x30)
@@ -157,6 +121,14 @@ void __init time_init(void)
{
plat_time_init();
- if (!mips_clockevent_init() || !cpu_has_mfc0_count_bug())
+ /*
+ * The use of the R4k timer as a clock event takes precedence;
+ * if reading the Count register might interfere with the timer
+ * interrupt, then we don't use the timer as a clock source.
+ * We may still use the timer as a clock source though if the
+ * timer interrupt isn't reliable; the interference doesn't
+ * matter then, because we don't use the interrupt.
+ */
+ if (mips_clockevent_init() != 0 || !cpu_has_mfc0_count_bug())
init_mips_clocksource();
}
diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c
index 660e44ed44d..cf3eb61fad1 100644
--- a/arch/mips/kernel/topology.c
+++ b/arch/mips/kernel/topology.c
@@ -17,7 +17,10 @@ static int __init topology_init(void)
#endif /* CONFIG_NUMA */
for_each_present_cpu(i) {
- ret = register_cpu(&per_cpu(cpu_devices, i), i);
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = 1;
+ ret = register_cpu(c, i);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", i, ret);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 984c0d0a7b4..51706d6dd5b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -8,45 +8,66 @@
* Copyright (C) 1998 Ulf Carlsson
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000, 01 MIPS Technologies, Inc.
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
*/
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/cpu_pm.h>
+#include <linux/kexec.h>
#include <linux/init.h>
-#include <linux/mm.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/notifier.h>
+#include <linux/kdb.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/break.h>
+#include <asm/cop2.h>
#include <asm/cpu.h>
+#include <asm/cpu-type.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/idle.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
+#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
-#include <asm/system.h>
#include <asm/tlbdebug.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
+#include <asm/watch.h>
#include <asm/mmu_context.h>
#include <asm/types.h>
#include <asm/stacktrace.h>
+#include <asm/uasm.h>
+extern void check_wait(void);
+extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void);
-extern asmlinkage void handle_tlbm(void);
-extern asmlinkage void handle_tlbl(void);
-extern asmlinkage void handle_tlbs(void);
+extern u32 handle_tlbl[];
+extern u32 handle_tlbs[];
+extern u32 handle_tlbm[];
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
extern asmlinkage void handle_ibe(void);
@@ -59,7 +80,10 @@ extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
+extern asmlinkage void handle_msa_fpe(void);
extern asmlinkage void handle_fpe(void);
+extern asmlinkage void handle_ftlb(void);
+extern asmlinkage void handle_msa(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_mt(void);
@@ -67,20 +91,17 @@ extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
-extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
- struct mips_fpu_struct *ctx, int has_fpu);
-
-void (*board_watchpoint_handler)(struct pt_regs *regs);
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
-
+void (*board_ebase_setup)(void);
+void(*board_cache_error_setup)(void);
static void show_raw_backtrace(unsigned long reg29)
{
- unsigned long *sp = (unsigned long *)reg29;
+ unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
printk("Call Trace:");
@@ -88,7 +109,12 @@ static void show_raw_backtrace(unsigned long reg29)
printk("\n");
#endif
while (!kstack_end(sp)) {
- addr = *sp++;
+ unsigned long __user *p =
+ (unsigned long __user *)(unsigned long)sp++;
+ if (__get_user(addr, p)) {
+ printk(" (Bad stack address)");
+ break;
+ }
if (__kernel_text_address(addr))
print_ip_sym(addr);
}
@@ -111,6 +137,9 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
+ if (!task)
+ task = current;
+
if (raw_show_trace || !__kernel_text_address(pc)) {
show_raw_backtrace(sp);
return;
@@ -139,7 +168,7 @@ static void show_stacktrace(struct task_struct *task,
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0))
- printk("\n ");
+ printk("\n ");
if (i > 39) {
printk(" ...");
break;
@@ -169,6 +198,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
regs.regs[29] = task->thread.reg29;
regs.regs[31] = 0;
regs.cp0_epc = task->thread.reg31;
+#ifdef CONFIG_KGDB_KDB
+ } else if (atomic_read(&kgdb_active) != -1 &&
+ kdb_current_regs) {
+ memcpy(&regs, kdb_current_regs, sizeof(regs));
+#endif /* CONFIG_KGDB_KDB */
} else {
prepare_frametrace(&regs);
}
@@ -176,32 +210,22 @@ void show_stack(struct task_struct *task, unsigned long *sp)
show_stacktrace(task, &regs);
}
-/*
- * The architecture-independent dump_stack generator
- */
-void dump_stack(void)
-{
- struct pt_regs regs;
-
- prepare_frametrace(&regs);
- show_backtrace(current, &regs);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
static void show_code(unsigned int __user *pc)
{
long i;
+ unsigned short __user *pc16 = NULL;
printk("\nCode:");
+ if ((unsigned long)pc & 1)
+ pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
- if (__get_user(insn, pc + i)) {
+ if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
printk(" (Bad address in epc)\n");
break;
}
- printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
+ printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
}
@@ -211,7 +235,7 @@ static void __show_regs(const struct pt_regs *regs)
unsigned int cause = regs->cp0_cause;
int i;
- printk("Cpu %d\n", smp_processor_id());
+ show_regs_print_info(KERN_DEFAULT);
/*
* Saved main processor registers
@@ -240,15 +264,15 @@ static void __show_regs(const struct pt_regs *regs)
/*
* Saved cp0 registers
*/
- printk("epc : %0*lx ", field, regs->cp0_epc);
- print_symbol("%s ", regs->cp0_epc);
+ printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
+ (void *) regs->cp0_epc);
printk(" %s\n", print_tainted());
- printk("ra : %0*lx ", field, regs->regs[31]);
- print_symbol("%s\n", regs->regs[31]);
+ printk("ra : %0*lx %pS\n", field, regs->regs[31],
+ (void *) regs->regs[31]);
- printk("Status: %08x ", (uint32_t) regs->cp0_status);
+ printk("Status: %08x ", (uint32_t) regs->cp0_status);
- if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
+ if (cpu_has_3kex) {
if (regs->cp0_status & ST0_KUO)
printk("KUo ");
if (regs->cp0_status & ST0_IEO)
@@ -261,7 +285,7 @@ static void __show_regs(const struct pt_regs *regs)
printk("KUc ");
if (regs->cp0_status & ST0_IEC)
printk("IEc ");
- } else {
+ } else if (cpu_has_4kex) {
if (regs->cp0_status & ST0_KX)
printk("KX ");
if (regs->cp0_status & ST0_SX)
@@ -309,51 +333,79 @@ void show_regs(struct pt_regs *regs)
__show_regs((struct pt_regs *)regs);
}
-void show_registers(const struct pt_regs *regs)
+void show_registers(struct pt_regs *regs)
{
+ const int field = 2 * sizeof(unsigned long);
+ mm_segment_t old_fs = get_fs();
+
__show_regs(regs);
print_modules();
- printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
- current->comm, task_pid_nr(current), current_thread_info(), current);
+ printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
+ current->comm, current->pid, current_thread_info(), current,
+ field, current_thread_info()->tp_value);
+ if (cpu_has_userlocal) {
+ unsigned long tls;
+
+ tls = read_c0_userlocal();
+ if (tls != current_thread_info()->tp_value)
+ printk("*HwTLS: %0*lx\n", field, tls);
+ }
+
+ if (!user_mode(regs))
+ /* Necessary for getting the correct stack content */
+ set_fs(KERNEL_DS);
show_stacktrace(current, regs);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
+ set_fs(old_fs);
+}
+
+static int regs_to_trapnr(struct pt_regs *regs)
+{
+ return (regs->cp0_cause >> 2) & 0x1f;
}
-static DEFINE_SPINLOCK(die_lock);
+static DEFINE_RAW_SPINLOCK(die_lock);
-void __noreturn die(const char * str, const struct pt_regs * regs)
+void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long dvpret = dvpe();
-#endif /* CONFIG_MIPS_MT_SMTC */
+ int sig = SIGSEGV;
+
+ oops_enter();
+
+ if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
+ SIGSEGV) == NOTIFY_STOP)
+ sig = 0;
console_verbose();
- spin_lock_irq(&die_lock);
+ raw_spin_lock_irq(&die_lock);
bust_spinlocks(1);
-#ifdef CONFIG_MIPS_MT_SMTC
- mips_mt_regdump(dvpret);
-#endif /* CONFIG_MIPS_MT_SMTC */
+
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
- add_taint(TAINT_DIE);
- spin_unlock_irq(&die_lock);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ raw_spin_unlock_irq(&die_lock);
+
+ oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops) {
- printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
ssleep(5);
panic("Fatal exception");
}
- do_exit(SIGSEGV);
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
+ do_exit(sig);
}
-extern const struct exception_table_entry __start___dbe_table[];
-extern const struct exception_table_entry __stop___dbe_table[];
+extern struct exception_table_entry __start___dbe_table[];
+extern struct exception_table_entry __stop___dbe_table[];
__asm__(
" .section __dbe_table, \"a\"\n"
@@ -376,8 +428,10 @@ asmlinkage void do_be(struct pt_regs *regs)
const struct exception_table_entry *fixup = NULL;
int data = regs->cp0_cause & 4;
int action = MIPS_BE_FATAL;
+ enum ctx_state prev_state;
- /* XXX For now. Fixme, this searches the wrong table ... */
+ prev_state = exception_enter();
+ /* XXX For now. Fixme, this searches the wrong table ... */
if (data && !user_mode(regs))
fixup = search_dbe_tables(exception_epc(regs));
@@ -389,11 +443,11 @@ asmlinkage void do_be(struct pt_regs *regs)
switch (action) {
case MIPS_BE_DISCARD:
- return;
+ goto out;
case MIPS_BE_FIXUP:
if (fixup) {
regs->cp0_epc = fixup->nextinsn;
- return;
+ goto out;
}
break;
default:
@@ -406,8 +460,15 @@ asmlinkage void do_be(struct pt_regs *regs)
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
+ if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
+ SIGBUS) == NOTIFY_STOP)
+ goto out;
+
die_if_kernel("Oops", regs);
force_sig(SIGBUS, current);
+
+out:
+ exception_exit(prev_state);
}
/*
@@ -427,13 +488,18 @@ asmlinkage void do_be(struct pt_regs *regs)
#define SYNC 0x0000000f
#define RDHWR 0x0000003b
+/* microMIPS definitions */
+#define MM_POOL32A_FUNC 0xfc00ffff
+#define MM_RDHWR 0x00006b3c
+#define MM_RS 0x001f0000
+#define MM_RT 0x03e00000
+
/*
* The ll_bit is cleared by r*_switch.S
*/
-unsigned long ll_bit;
-
-static struct task_struct *ll_task = NULL;
+unsigned int ll_bit;
+struct task_struct *ll_task;
static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
{
@@ -451,7 +517,7 @@ static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
offset >>= 16;
vaddr = (unsigned long __user *)
- ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
if ((unsigned long)vaddr & 3)
return SIGBUS;
@@ -491,7 +557,7 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
offset >>= 16;
vaddr = (unsigned long __user *)
- ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
reg = (opcode & RT) >> 16;
if ((unsigned long)vaddr & 3)
@@ -524,10 +590,16 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
*/
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
- if ((opcode & OPCODE) == LL)
+ if ((opcode & OPCODE) == LL) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
return simulate_ll(regs, opcode);
- if ((opcode & OPCODE) == SC)
+ }
+ if ((opcode & OPCODE) == SC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
return simulate_sc(regs, opcode);
+ }
return -1; /* Must be something else ... */
}
@@ -536,40 +608,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
* Simulate trapping 'rdhwr' instructions to provide user accessible
* registers not implemented in hardware.
*/
-static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
+static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
{
struct thread_info *ti = task_thread_info(current);
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
+ switch (rd) {
+ case 0: /* CPU number */
+ regs->regs[rt] = smp_processor_id();
+ return 0;
+ case 1: /* SYNCI length */
+ regs->regs[rt] = min(current_cpu_data.dcache.linesz,
+ current_cpu_data.icache.linesz);
+ return 0;
+ case 2: /* Read count register */
+ regs->regs[rt] = read_c0_count();
+ return 0;
+ case 3: /* Count register resolution */
+ switch (current_cpu_type()) {
+ case CPU_20KC:
+ case CPU_25KF:
+ regs->regs[rt] = 1;
+ break;
+ default:
+ regs->regs[rt] = 2;
+ }
+ return 0;
+ case 29:
+ regs->regs[rt] = ti->tp_value;
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
+{
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
- switch (rd) {
- case 0: /* CPU number */
- regs->regs[rt] = smp_processor_id();
- return 0;
- case 1: /* SYNCI length */
- regs->regs[rt] = min(current_cpu_data.dcache.linesz,
- current_cpu_data.icache.linesz);
- return 0;
- case 2: /* Read count register */
- regs->regs[rt] = read_c0_count();
- return 0;
- case 3: /* Count register resolution */
- switch (current_cpu_data.cputype) {
- case CPU_20KC:
- case CPU_25KF:
- regs->regs[rt] = 1;
- break;
- default:
- regs->regs[rt] = 2;
- }
- return 0;
- case 29:
- regs->regs[rt] = ti->tp_value;
- return 0;
- default:
- return -1;
- }
+
+ simulate_rdhwr(regs, rd, rt);
+ return 0;
+ }
+
+ /* Not ours. */
+ return -1;
+}
+
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
+{
+ if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
+ int rd = (opcode & MM_RS) >> 16;
+ int rt = (opcode & MM_RT) >> 21;
+ simulate_rdhwr(regs, rd, rt);
+ return 0;
}
/* Not ours. */
@@ -578,16 +672,21 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
- if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC)
+ if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
return 0;
+ }
return -1; /* Must be something else ... */
}
asmlinkage void do_ov(struct pt_regs *regs)
{
+ enum ctx_state prev_state;
siginfo_t info;
+ prev_state = exception_enter();
die_if_kernel("Integer overflow", regs);
info.si_code = FPE_INTOVF;
@@ -595,6 +694,33 @@ asmlinkage void do_ov(struct pt_regs *regs)
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
+ exception_exit(prev_state);
+}
+
+int process_fpemu_return(int sig, void __user *fault_addr)
+{
+ if (sig == SIGSEGV || sig == SIGBUS) {
+ struct siginfo si = {0};
+ si.si_addr = fault_addr;
+ si.si_signo = sig;
+ if (sig == SIGSEGV) {
+ down_read(&current->mm->mmap_sem);
+ if (find_vma(current->mm, (unsigned long)fault_addr))
+ si.si_code = SEGV_ACCERR;
+ else
+ si.si_code = SEGV_MAPERR;
+ up_read(&current->mm->mmap_sem);
+ } else {
+ si.si_code = BUS_ADRERR;
+ }
+ force_sig_info(sig, &si, current);
+ return 1;
+ } else if (sig) {
+ force_sig(sig, current);
+ return 1;
+ } else {
+ return 0;
+ }
}
/*
@@ -602,12 +728,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
*/
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
- siginfo_t info;
+ enum ctx_state prev_state;
+ siginfo_t info = {0};
+ prev_state = exception_enter();
+ if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
+ SIGFPE) == NOTIFY_STOP)
+ goto out;
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) {
int sig;
+ void __user *fault_addr = NULL;
/*
* Unimplemented operation exception. If we've got the full
@@ -623,7 +755,8 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
lose_fpu(1);
/* Run the emulator */
- sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1);
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
/*
* We can't allow the emulated instruction to leave any of
@@ -632,13 +765,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
- own_fpu(1); /* Using the FPU again. */
+ own_fpu(1); /* Using the FPU again. */
/* If something went wrong, signal */
- if (sig)
- force_sig(sig, current);
+ process_fpemu_return(sig, fault_addr);
- return;
+ goto out;
} else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV;
else if (fcr31 & FPU_CSR_DIV_X)
@@ -655,37 +787,38 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
+
+out:
+ exception_exit(prev_state);
}
-asmlinkage void do_bp(struct pt_regs *regs)
+static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+ const char *str)
{
- unsigned int opcode, bcode;
siginfo_t info;
+ char b[40];
- if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
- goto out_sigsegv;
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+ if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
- /*
- * There is the ancient bug in the MIPS assemblers that the break
- * code starts left to bit 16 instead to bit 6 in the opcode.
- * Gas is bug-compatible, but not always, grrr...
- * We handle both cases with a simple heuristics. --macro
- */
- bcode = ((opcode >> 6) & ((1 << 20) - 1));
- if (bcode < (1 << 10))
- bcode <<= 10;
+ if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
+ SIGTRAP) == NOTIFY_STOP)
+ return;
/*
- * (A short test says that IRIX 5.3 sends SIGTRAP for all break
- * insns, even for break codes that indicate arithmetic failures.
- * Weird ...)
+ * A short test says that IRIX 5.3 sends SIGTRAP for all trap
+ * insns, even for trap and break codes that indicate arithmetic
+ * failures. Weird ...
* But should we continue the brokenness??? --macro
*/
- switch (bcode) {
- case BRK_OVERFLOW << 10:
- case BRK_DIVZERO << 10:
- die_if_kernel("Break instruction in kernel code", regs);
- if (bcode == (BRK_DIVZERO << 10))
+ switch (code) {
+ case BRK_OVERFLOW:
+ case BRK_DIVZERO:
+ scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
+ die_if_kernel(b, regs);
+ if (code == BRK_DIVZERO)
info.si_code = FPE_INTDIV;
else
info.si_code = FPE_INTOVF;
@@ -695,93 +828,206 @@ asmlinkage void do_bp(struct pt_regs *regs)
force_sig_info(SIGFPE, &info, current);
break;
case BRK_BUG:
- die("Kernel bug detected", regs);
+ die_if_kernel("Kernel bug detected", regs);
+ force_sig(SIGTRAP, current);
+ break;
+ case BRK_MEMU:
+ /*
+ * Address errors may be deliberately induced by the FPU
+ * emulator to retake control of the CPU after executing the
+ * instruction in the delay slot of an emulated branch.
+ *
+ * Terminate if exception was recognized as a delay slot return
+ * otherwise handle as normal.
+ */
+ if (do_dsemulret(regs))
+ return;
+
+ die_if_kernel("Math emu break/trap", regs);
+ force_sig(SIGTRAP, current);
break;
default:
- die_if_kernel("Break instruction in kernel code", regs);
+ scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
+ die_if_kernel(b, regs);
force_sig(SIGTRAP, current);
}
- return;
-
-out_sigsegv:
- force_sig(SIGSEGV, current);
}
-asmlinkage void do_tr(struct pt_regs *regs)
+asmlinkage void do_bp(struct pt_regs *regs)
{
- unsigned int opcode, tcode = 0;
- siginfo_t info;
-
- if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
- goto out_sigsegv;
+ unsigned int opcode, bcode;
+ enum ctx_state prev_state;
+ unsigned long epc;
+ u16 instr[2];
+ mm_segment_t seg;
+
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+
+ prev_state = exception_enter();
+ if (get_isa16_mode(regs->cp0_epc)) {
+ /* Calculate EPC. */
+ epc = exception_epc(regs);
+ if (cpu_has_mmips) {
+ if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
+ (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
+ goto out_sigsegv;
+ opcode = (instr[0] << 16) | instr[1];
+ } else {
+ /* MIPS16e mode */
+ if (__get_user(instr[0],
+ (u16 __user *)msk_isa16_mode(epc)))
+ goto out_sigsegv;
+ bcode = (instr[0] >> 6) & 0x3f;
+ do_trap_or_bp(regs, bcode, "Break");
+ goto out;
+ }
+ } else {
+ if (__get_user(opcode,
+ (unsigned int __user *) exception_epc(regs)))
+ goto out_sigsegv;
+ }
- /* Immediate versions don't provide a code. */
- if (!(opcode & OPCODE))
- tcode = ((opcode >> 6) & ((1 << 10) - 1));
+ /*
+ * There is the ancient bug in the MIPS assemblers that the break
+ * code starts left to bit 16 instead to bit 6 in the opcode.
+ * Gas is bug-compatible, but not always, grrr...
+ * We handle both cases with a simple heuristics. --macro
+ */
+ bcode = ((opcode >> 6) & ((1 << 20) - 1));
+ if (bcode >= (1 << 10))
+ bcode >>= 10;
/*
- * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
- * insns, even for trap codes that indicate arithmetic failures.
- * Weird ...)
- * But should we continue the brokenness??? --macro
+ * notify the kprobe handlers, if instruction is likely to
+ * pertain to them.
*/
- switch (tcode) {
- case BRK_OVERFLOW:
- case BRK_DIVZERO:
- die_if_kernel("Trap instruction in kernel code", regs);
- if (tcode == BRK_DIVZERO)
- info.si_code = FPE_INTDIV;
+ switch (bcode) {
+ case BRK_KPROBE_BP:
+ if (notify_die(DIE_BREAK, "debug", regs, bcode,
+ regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ goto out;
else
- info.si_code = FPE_INTOVF;
- info.si_signo = SIGFPE;
- info.si_errno = 0;
- info.si_addr = (void __user *) regs->cp0_epc;
- force_sig_info(SIGFPE, &info, current);
- break;
- case BRK_BUG:
- die("Kernel bug detected", regs);
- break;
+ break;
+ case BRK_KPROBE_SSTEPBP:
+ if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
+ regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
default:
- die_if_kernel("Trap instruction in kernel code", regs);
- force_sig(SIGTRAP, current);
+ break;
}
+
+ do_trap_or_bp(regs, bcode, "Break");
+
+out:
+ set_fs(seg);
+ exception_exit(prev_state);
return;
out_sigsegv:
force_sig(SIGSEGV, current);
+ goto out;
+}
+
+asmlinkage void do_tr(struct pt_regs *regs)
+{
+ u32 opcode, tcode = 0;
+ enum ctx_state prev_state;
+ u16 instr[2];
+ mm_segment_t seg;
+ unsigned long epc = msk_isa16_mode(exception_epc(regs));
+
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(get_ds());
+
+ prev_state = exception_enter();
+ if (get_isa16_mode(regs->cp0_epc)) {
+ if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
+ __get_user(instr[1], (u16 __user *)(epc + 2)))
+ goto out_sigsegv;
+ opcode = (instr[0] << 16) | instr[1];
+ /* Immediate versions don't provide a code. */
+ if (!(opcode & OPCODE))
+ tcode = (opcode >> 12) & ((1 << 4) - 1);
+ } else {
+ if (__get_user(opcode, (u32 __user *)epc))
+ goto out_sigsegv;
+ /* Immediate versions don't provide a code. */
+ if (!(opcode & OPCODE))
+ tcode = (opcode >> 6) & ((1 << 10) - 1);
+ }
+
+ do_trap_or_bp(regs, tcode, "Trap");
+
+out:
+ set_fs(seg);
+ exception_exit(prev_state);
+ return;
+
+out_sigsegv:
+ force_sig(SIGSEGV, current);
+ goto out;
}
asmlinkage void do_ri(struct pt_regs *regs)
{
unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
unsigned long old_epc = regs->cp0_epc;
+ unsigned long old31 = regs->regs[31];
+ enum ctx_state prev_state;
unsigned int opcode = 0;
int status = -1;
+ prev_state = exception_enter();
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
+ SIGILL) == NOTIFY_STOP)
+ goto out;
+
die_if_kernel("Reserved instruction in kernel code", regs);
if (unlikely(compute_return_epc(regs) < 0))
- return;
+ goto out;
+
+ if (get_isa16_mode(regs->cp0_epc)) {
+ unsigned short mmop[2] = { 0 };
- if (unlikely(get_user(opcode, epc) < 0))
- status = SIGSEGV;
+ if (unlikely(get_user(mmop[0], epc) < 0))
+ status = SIGSEGV;
+ if (unlikely(get_user(mmop[1], epc) < 0))
+ status = SIGSEGV;
+ opcode = (mmop[0] << 16) | mmop[1];
- if (!cpu_has_llsc && status < 0)
- status = simulate_llsc(regs, opcode);
+ if (status < 0)
+ status = simulate_rdhwr_mm(regs, opcode);
+ } else {
+ if (unlikely(get_user(opcode, epc) < 0))
+ status = SIGSEGV;
- if (status < 0)
- status = simulate_rdhwr(regs, opcode);
+ if (!cpu_has_llsc && status < 0)
+ status = simulate_llsc(regs, opcode);
- if (status < 0)
- status = simulate_sync(regs, opcode);
+ if (status < 0)
+ status = simulate_rdhwr_normal(regs, opcode);
+
+ if (status < 0)
+ status = simulate_sync(regs, opcode);
+ }
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
+ regs->regs[31] = old31;
force_sig(status, current);
}
+
+out:
+ exception_exit(prev_state);
}
/*
@@ -802,114 +1048,302 @@ static void mt_ase_fp_affinity(void)
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
cpumask_t tmask;
- cpus_and(tmask, current->thread.user_cpus_allowed,
- mt_fpu_cpumask);
- set_cpus_allowed(current, tmask);
+ current->thread.user_cpus_allowed
+ = current->cpus_allowed;
+ cpus_and(tmask, current->cpus_allowed,
+ mt_fpu_cpumask);
+ set_cpus_allowed_ptr(current, &tmask);
set_thread_flag(TIF_FPUBOUND);
}
}
#endif /* CONFIG_MIPS_MT_FPAFF */
}
+/*
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(cu2_chain);
+
+int __ref register_cu2_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&cu2_chain, nb);
+}
+
+int cu2_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&cu2_chain, val, v);
+}
+
+static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
+ void *data)
+{
+ struct pt_regs *regs = data;
+
+ die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
+ "instruction", regs);
+ force_sig(SIGILL, current);
+
+ return NOTIFY_OK;
+}
+
+static int enable_restore_fp_context(int msa)
+{
+ int err, was_fpu_owner;
+
+ if (!used_math()) {
+ /* First time FP context user. */
+ err = init_fpu();
+ if (msa && !err)
+ enable_msa();
+ if (!err)
+ set_used_math();
+ return err;
+ }
+
+ /*
+ * This task has formerly used the FP context.
+ *
+ * If this thread has no live MSA vector context then we can simply
+ * restore the scalar FP context. If it has live MSA vector context
+ * (that is, it has or may have used MSA since last performing a
+ * function call) then we'll need to restore the vector context. This
+ * applies even if we're currently only executing a scalar FP
+ * instruction. This is because if we were to later execute an MSA
+ * instruction then we'd either have to:
+ *
+ * - Restore the vector context & clobber any registers modified by
+ * scalar FP instructions between now & then.
+ *
+ * or
+ *
+ * - Not restore the vector context & lose the most significant bits
+ * of all vector registers.
+ *
+ * Neither of those options is acceptable. We cannot restore the least
+ * significant bits of the registers now & only restore the most
+ * significant bits later because the most significant bits of any
+ * vector registers whose aliased FP register is modified now will have
+ * been zeroed. We'd have no way to know that when restoring the vector
+ * context & thus may load an outdated value for the most significant
+ * bits of a vector register.
+ */
+ if (!msa && !thread_msa_context_live())
+ return own_fpu(1);
+
+ /*
+ * This task is using or has previously used MSA. Thus we require
+ * that Status.FR == 1.
+ */
+ was_fpu_owner = is_fpu_owner();
+ err = own_fpu(0);
+ if (err)
+ return err;
+
+ enable_msa();
+ write_msa_csr(current->thread.fpu.msacsr);
+ set_thread_flag(TIF_USEDMSA);
+
+ /*
+ * If this is the first time that the task is using MSA and it has
+ * previously used scalar FP in this time slice then we already nave
+ * FP context which we shouldn't clobber.
+ */
+ if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner)
+ return 0;
+
+ /* We need to restore the vector context. */
+ restore_msa(current);
+ return 0;
+}
+
asmlinkage void do_cpu(struct pt_regs *regs)
{
+ enum ctx_state prev_state;
unsigned int __user *epc;
- unsigned long old_epc;
+ unsigned long old_epc, old31;
unsigned int opcode;
unsigned int cpid;
- int status;
-
- die_if_kernel("do_cpu invoked from kernel context!", regs);
+ int status, err;
+ unsigned long __maybe_unused flags;
+ prev_state = exception_enter();
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
+ if (cpid != 2)
+ die_if_kernel("do_cpu invoked from kernel context!", regs);
+
switch (cpid) {
case 0:
epc = (unsigned int __user *)exception_epc(regs);
old_epc = regs->cp0_epc;
+ old31 = regs->regs[31];
opcode = 0;
status = -1;
if (unlikely(compute_return_epc(regs) < 0))
- return;
+ goto out;
- if (unlikely(get_user(opcode, epc) < 0))
- status = SIGSEGV;
+ if (get_isa16_mode(regs->cp0_epc)) {
+ unsigned short mmop[2] = { 0 };
- if (!cpu_has_llsc && status < 0)
- status = simulate_llsc(regs, opcode);
+ if (unlikely(get_user(mmop[0], epc) < 0))
+ status = SIGSEGV;
+ if (unlikely(get_user(mmop[1], epc) < 0))
+ status = SIGSEGV;
+ opcode = (mmop[0] << 16) | mmop[1];
- if (status < 0)
- status = simulate_rdhwr(regs, opcode);
+ if (status < 0)
+ status = simulate_rdhwr_mm(regs, opcode);
+ } else {
+ if (unlikely(get_user(opcode, epc) < 0))
+ status = SIGSEGV;
+
+ if (!cpu_has_llsc && status < 0)
+ status = simulate_llsc(regs, opcode);
+
+ if (status < 0)
+ status = simulate_rdhwr_normal(regs, opcode);
+ }
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
+ regs->regs[31] = old31;
force_sig(status, current);
}
- return;
+ goto out;
+
+ case 3:
+ /*
+ * Old (MIPS I and MIPS II) processors will set this code
+ * for COP1X opcode instructions that replaced the original
+ * COP3 space. We don't limit COP1 space instructions in
+ * the emulator according to the CPU ISA, so we want to
+ * treat COP1X instructions consistently regardless of which
+ * code the CPU chose. Therefore we redirect this trap to
+ * the FP emulator too.
+ *
+ * Then some newer FPU-less processors use this code
+ * erroneously too, so they are covered by this choice
+ * as well.
+ */
+ if (raw_cpu_has_fpu)
+ break;
+ /* Fall through. */
case 1:
- if (used_math()) /* Using the FPU again. */
- own_fpu(1);
- else { /* First time FPU user. */
- init_fpu();
- set_used_math();
- }
+ err = enable_restore_fp_context(0);
- if (!raw_cpu_has_fpu) {
+ if (!raw_cpu_has_fpu || err) {
int sig;
+ void __user *fault_addr = NULL;
sig = fpu_emulator_cop1Handler(regs,
- &current->thread.fpu, 0);
- if (sig)
- force_sig(sig, current);
- else
+ &current->thread.fpu,
+ 0, &fault_addr);
+ if (!process_fpemu_return(sig, fault_addr) && !err)
mt_ase_fp_affinity();
}
- return;
+ goto out;
case 2:
- case 3:
- break;
+ raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
+ goto out;
}
force_sig(SIGILL, current);
+
+out:
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_msa_fpe(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
+ force_sig(SIGFPE, current);
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_msa(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+ int err;
+
+ prev_state = exception_enter();
+
+ if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
+ force_sig(SIGILL, current);
+ goto out;
+ }
+
+ die_if_kernel("do_msa invoked from kernel context!", regs);
+
+ err = enable_restore_fp_context(1);
+ if (err)
+ force_sig(SIGILL, current);
+out:
+ exception_exit(prev_state);
}
asmlinkage void do_mdmx(struct pt_regs *regs)
{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
force_sig(SIGILL, current);
+ exception_exit(prev_state);
}
+/*
+ * Called with interrupts disabled.
+ */
asmlinkage void do_watch(struct pt_regs *regs)
{
- if (board_watchpoint_handler) {
- (*board_watchpoint_handler)(regs);
- return;
- }
+ enum ctx_state prev_state;
+ u32 cause;
+ prev_state = exception_enter();
/*
- * We use the watch exception where available to detect stack
- * overflows.
+ * Clear WP (bit 22) bit of cause register so we don't loop
+ * forever.
*/
- dump_tlb_all();
- show_regs(regs);
- panic("Caught WATCH exception - probably caused by stack overflow.");
+ cause = read_c0_cause();
+ cause &= ~(1 << 22);
+ write_c0_cause(cause);
+
+ /*
+ * If the current thread has the watch registers loaded, save
+ * their values and send SIGTRAP. Otherwise another thread
+ * left the registers set, clear them and continue.
+ */
+ if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
+ mips_read_watch_registers();
+ local_irq_enable();
+ force_sig(SIGTRAP, current);
+ } else {
+ mips_clear_watch_registers();
+ local_irq_enable();
+ }
+ exception_exit(prev_state);
}
asmlinkage void do_mcheck(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
+ enum ctx_state prev_state;
+ prev_state = exception_enter();
show_regs(regs);
if (multi_match) {
- printk("Index : %0x\n", read_c0_index());
+ printk("Index : %0x\n", read_c0_index());
printk("Pagemask: %0x\n", read_c0_pagemask());
printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
@@ -952,7 +1386,7 @@ asmlinkage void do_mt(struct pt_regs *regs)
printk(KERN_DEBUG "YIELD Scheduler Exception\n");
break;
case 5:
- printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
+ printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
break;
default:
printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
@@ -968,7 +1402,7 @@ asmlinkage void do_mt(struct pt_regs *regs)
asmlinkage void do_dsp(struct pt_regs *regs)
{
if (cpu_has_dsp)
- panic("Unexpected DSP exception\n");
+ panic("Unexpected DSP exception");
force_sig(SIGILL, current);
}
@@ -976,7 +1410,7 @@ asmlinkage void do_dsp(struct pt_regs *regs)
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
- * Game over - no way to handle this if it ever occurs. Most probably
+ * Game over - no way to handle this if it ever occurs. Most probably
* caused by a new unknown cpu type or after another deadly
* hard/software error.
*/
@@ -985,6 +1419,21 @@ asmlinkage void do_reserved(struct pt_regs *regs)
(regs->cp0_cause & 0x7f) >> 2);
}
+static int __initdata l1parity = 1;
+static int __init nol1parity(char *s)
+{
+ l1parity = 0;
+ return 1;
+}
+__setup("nol1par", nol1parity);
+static int __initdata l2parity = 1;
+static int __init nol2parity(char *s)
+{
+ l2parity = 0;
+ return 1;
+}
+__setup("nol2par", nol2parity);
+
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
@@ -994,7 +1443,69 @@ static inline void parity_protection_init(void)
switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ {
+#define ERRCTL_PE 0x80000000
+#define ERRCTL_L2P 0x00800000
+ unsigned long errctl;
+ unsigned int l1parity_present, l2parity_present;
+
+ errctl = read_c0_ecc();
+ errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
+
+ /* probe L1 parity support */
+ write_c0_ecc(errctl | ERRCTL_PE);
+ back_to_back_c0_hazard();
+ l1parity_present = (read_c0_ecc() & ERRCTL_PE);
+
+ /* probe L2 parity support */
+ write_c0_ecc(errctl|ERRCTL_L2P);
+ back_to_back_c0_hazard();
+ l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
+
+ if (l1parity_present && l2parity_present) {
+ if (l1parity)
+ errctl |= ERRCTL_PE;
+ if (l1parity ^ l2parity)
+ errctl |= ERRCTL_L2P;
+ } else if (l1parity_present) {
+ if (l1parity)
+ errctl |= ERRCTL_PE;
+ } else if (l2parity_present) {
+ if (l2parity)
+ errctl |= ERRCTL_L2P;
+ } else {
+ /* No parity available */
+ }
+
+ printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
+
+ write_c0_ecc(errctl);
+ back_to_back_c0_hazard();
+ errctl = read_c0_ecc();
+ printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
+
+ if (l1parity_present)
+ printk(KERN_INFO "Cache parity protection %sabled\n",
+ (errctl & ERRCTL_PE) ? "en" : "dis");
+
+ if (l2parity_present) {
+ if (l1parity_present && l1parity)
+ errctl ^= ERRCTL_L2P;
+ printk(KERN_INFO "L2 cache parity protection %sabled\n",
+ (errctl & ERRCTL_L2P) ? "en" : "dis");
+ }
+ }
+ break;
+
case CPU_5KC:
+ case CPU_5KE:
+ case CPU_LOONGSON1:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
@@ -1027,14 +1538,27 @@ asmlinkage void cache_parity_error(void)
printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
reg_val & (1<<30) ? "secondary" : "primary",
reg_val & (1<<31) ? "data" : "insn");
- printk("Error bits: %s%s%s%s%s%s%s\n",
- reg_val & (1<<29) ? "ED " : "",
- reg_val & (1<<28) ? "ET " : "",
- reg_val & (1<<26) ? "EE " : "",
- reg_val & (1<<25) ? "EB " : "",
- reg_val & (1<<24) ? "EI " : "",
- reg_val & (1<<23) ? "E1 " : "",
- reg_val & (1<<22) ? "E0 " : "");
+ if (cpu_has_mips_r2 &&
+ ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
+ pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
+ reg_val & (1<<29) ? "ED " : "",
+ reg_val & (1<<28) ? "ET " : "",
+ reg_val & (1<<27) ? "ES " : "",
+ reg_val & (1<<26) ? "EE " : "",
+ reg_val & (1<<25) ? "EB " : "",
+ reg_val & (1<<24) ? "EI " : "",
+ reg_val & (1<<23) ? "E1 " : "",
+ reg_val & (1<<22) ? "E0 " : "");
+ } else {
+ pr_err("Error bits: %s%s%s%s%s%s%s\n",
+ reg_val & (1<<29) ? "ED " : "",
+ reg_val & (1<<28) ? "ET " : "",
+ reg_val & (1<<26) ? "EE " : "",
+ reg_val & (1<<25) ? "EB " : "",
+ reg_val & (1<<24) ? "EI " : "",
+ reg_val & (1<<23) ? "E1 " : "",
+ reg_val & (1<<22) ? "E0 " : "");
+ }
printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
@@ -1048,6 +1572,34 @@ asmlinkage void cache_parity_error(void)
panic("Can't handle the cache error!");
}
+asmlinkage void do_ftlb(void)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int reg_val;
+
+ /* For the moment, report the problem and hang. */
+ if (cpu_has_mips_r2 &&
+ ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
+ pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
+ read_c0_ecc());
+ pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
+ reg_val = read_c0_cacheerr();
+ pr_err("c0_cacheerr == %08x\n", reg_val);
+
+ if ((reg_val & 0xc0000000) == 0xc0000000) {
+ pr_err("Decoded c0_cacheerr: FTLB parity error\n");
+ } else {
+ pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
+ reg_val & (1<<30) ? "secondary" : "primary",
+ reg_val & (1<<31) ? "data" : "insn");
+ }
+ } else {
+ pr_err("FTLB error exception\n");
+ }
+ /* Just print the cacheerr bits for now */
+ cache_parity_error();
+}
+
/*
* SDBBP EJTAG debug exception handler.
* We skip the instruction and return to the next instruction.
@@ -1055,7 +1607,7 @@ asmlinkage void cache_parity_error(void)
void ejtag_exception_handler(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
- unsigned long depc, old_epc;
+ unsigned long depc, old_epc, old_ra;
unsigned int debug;
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
@@ -1070,10 +1622,12 @@ void ejtag_exception_handler(struct pt_regs *regs)
* calculation.
*/
old_epc = regs->cp0_epc;
+ old_ra = regs->regs[31];
regs->cp0_epc = depc;
- __compute_return_epc(regs);
+ compute_return_epc(regs);
depc = regs->cp0_epc;
regs->cp0_epc = old_epc;
+ regs->regs[31] = old_ra;
} else
depc += 4;
write_c0_depc(depc);
@@ -1086,12 +1640,25 @@ void ejtag_exception_handler(struct pt_regs *regs)
/*
* NMI exception handler.
+ * No lock; only written during early bootup by CPU 0.
*/
-NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs)
+static RAW_NOTIFIER_HEAD(nmi_chain);
+
+int register_nmi_notifier(struct notifier_block *nb)
{
+ return raw_notifier_chain_register(&nmi_chain, nb);
+}
+
+void __noreturn nmi_exception_handler(struct pt_regs *regs)
+{
+ char str[100];
+
+ raw_notifier_call_chain(&nmi_chain, 0, regs);
bust_spinlocks(1);
- printk("NMI taken!!!!\n");
- die("NMI", regs);
+ snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
+ smp_processor_id(), regs->cp0_epc);
+ regs->cp0_epc = read_c0_errorepc();
+ die(str, regs);
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
@@ -1100,26 +1667,46 @@ unsigned long ebase;
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
-/*
- * As a side effect of the way this is implemented we're limited
- * to interrupt handlers in the address range from
- * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
- */
-void *set_except_vector(int n, void *addr)
+void __init *set_except_vector(int n, void *addr)
{
unsigned long handler = (unsigned long) addr;
- unsigned long old_handler = exception_handlers[n];
+ unsigned long old_handler;
+
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * Only the TLB handlers are cache aligned with an even
+ * address. All other handlers are on an odd address and
+ * require no modification. Otherwise, MIPS32 mode will
+ * be entered when handling any TLB exceptions. That
+ * would be bad...since we must stay in microMIPS mode.
+ */
+ if (!(handler & 0x1))
+ handler |= 1;
+#endif
+ old_handler = xchg(&exception_handlers[n], handler);
- exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
- *(u32 *)(ebase + 0x200) = 0x08000000 |
- (0x03ffffff & (handler >> 2));
- flush_icache_range(ebase + 0x200, ebase + 0x204);
+#ifdef CONFIG_CPU_MICROMIPS
+ unsigned long jump_mask = ~((1 << 27) - 1);
+#else
+ unsigned long jump_mask = ~((1 << 28) - 1);
+#endif
+ u32 *buf = (u32 *)(ebase + 0x200);
+ unsigned int k0 = 26;
+ if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
+ uasm_i_j(&buf, handler & ~jump_mask);
+ uasm_i_nop(&buf);
+ } else {
+ UASM_i_LA(&buf, k0, handler);
+ uasm_i_jr(&buf, k0);
+ uasm_i_nop(&buf);
+ }
+ local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
}
return (void *)old_handler;
}
-static asmlinkage void do_default_vi(void)
+static void do_default_vi(void)
{
show_regs(get_irq_regs());
panic("Caught unexpected vectored interrupt.");
@@ -1130,18 +1717,17 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
- u32 *w;
+ u16 *h;
unsigned char *b;
- if (!cpu_has_veic && !cpu_has_vint)
- BUG();
+ BUG_ON(!cpu_has_veic && !cpu_has_vint);
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
srs = 0;
} else
handler = (unsigned long) addr;
- vi_handlers[n] = (unsigned long) addr;
+ vi_handlers[n] = handler;
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
@@ -1160,23 +1746,21 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
if (srs == 0) {
/*
* If no shadow set is selected then use the default handler
- * that does normal register saving and a standard interrupt exit
+ * that does normal register saving and standard interrupt exit
*/
-
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
-#ifdef CONFIG_MIPS_MT_SMTC
- /*
- * We need to provide the SMTC vectored interrupt handler
- * not only with the address of the handler, but with the
- * Status.IM bit to be masked before going there.
- */
- extern char except_vec_vi_mori;
- const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
-#endif /* CONFIG_MIPS_MT_SMTC */
- const int handler_len = &except_vec_vi_end - &except_vec_vi;
- const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
- const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
+ extern char rollback_except_vec_vi;
+ char *vec_start = using_rollback_handler() ?
+ &rollback_except_vec_vi : &except_vec_vi;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+ const int lui_offset = &except_vec_vi_lui - vec_start + 2;
+ const int ori_offset = &except_vec_vi_ori - vec_start + 2;
+#else
+ const int lui_offset = &except_vec_vi_lui - vec_start;
+ const int ori_offset = &except_vec_vi_ori - vec_start;
+#endif
+ const int handler_len = &except_vec_vi_end - vec_start;
if (handler_len > VECTORSPACING) {
/*
@@ -1186,30 +1770,40 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
panic("VECTORSPACING too small");
}
- memcpy(b, &except_vec_vi, handler_len);
-#ifdef CONFIG_MIPS_MT_SMTC
- BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
-
- w = (u32 *)(b + mori_offset);
- *w = (*w & 0xffff0000) | (0x100 << n);
-#endif /* CONFIG_MIPS_MT_SMTC */
- w = (u32 *)(b + lui_offset);
- *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
- w = (u32 *)(b + ori_offset);
- *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
- flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
+ set_handler(((unsigned long)b - ebase), vec_start,
+#ifdef CONFIG_CPU_MICROMIPS
+ (handler_len - 1));
+#else
+ handler_len);
+#endif
+ h = (u16 *)(b + lui_offset);
+ *h = (handler >> 16) & 0xffff;
+ h = (u16 *)(b + ori_offset);
+ *h = (handler & 0xffff);
+ local_flush_icache_range((unsigned long)b,
+ (unsigned long)(b+handler_len));
}
else {
/*
- * In other cases jump directly to the interrupt handler
- *
- * It is the handlers responsibility to save registers if required
- * (eg hi/lo) and return from the exception using "eret"
+ * In other cases jump directly to the interrupt handler. It
+ * is the handler's responsibility to save registers if required
+ * (eg hi/lo) and return from the exception using "eret".
*/
- w = (u32 *)b;
- *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
- *w = 0;
- flush_icache_range((unsigned long)b, (unsigned long)(b+8));
+ u32 insn;
+
+ h = (u16 *)b;
+ /* j handler */
+#ifdef CONFIG_CPU_MICROMIPS
+ insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
+#else
+ insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
+#endif
+ h[0] = (insn >> 16) & 0xffff;
+ h[1] = insn & 0xffff;
+ h[2] = 0;
+ h[3] = 0;
+ local_flush_icache_range((unsigned long)b,
+ (unsigned long)(b+8));
}
return (void *)old_handler;
@@ -1220,85 +1814,14 @@ void *set_vi_handler(int n, vi_handler_t addr)
return set_vi_srs_handler(n, addr, 0);
}
-/*
- * This is used by native signal handling
- */
-asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
-asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
-
-extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
-extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
-
-extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
-extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
-
-#ifdef CONFIG_SMP
-static int smp_save_fp_context(struct sigcontext __user *sc)
-{
- return raw_cpu_has_fpu
- ? _save_fp_context(sc)
- : fpu_emulator_save_context(sc);
-}
-
-static int smp_restore_fp_context(struct sigcontext __user *sc)
-{
- return raw_cpu_has_fpu
- ? _restore_fp_context(sc)
- : fpu_emulator_restore_context(sc);
-}
-#endif
-
-static inline void signal_init(void)
-{
-#ifdef CONFIG_SMP
- /* For now just do the cpu_has_fpu check when the functions are invoked */
- save_fp_context = smp_save_fp_context;
- restore_fp_context = smp_restore_fp_context;
-#else
- if (cpu_has_fpu) {
- save_fp_context = _save_fp_context;
- restore_fp_context = _restore_fp_context;
- } else {
- save_fp_context = fpu_emulator_save_context;
- restore_fp_context = fpu_emulator_restore_context;
- }
-#endif
-}
-
-#ifdef CONFIG_MIPS32_COMPAT
-
-/*
- * This is used by 32-bit signal stuff on the 64-bit kernel
- */
-asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
-asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
-
-extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
-
-extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
-
-static inline void signal32_init(void)
-{
- if (cpu_has_fpu) {
- save_fp_context32 = _save_fp_context32;
- restore_fp_context32 = _restore_fp_context32;
- } else {
- save_fp_context32 = fpu_emulator_save_context32;
- restore_fp_context32 = fpu_emulator_restore_context32;
- }
-}
-#endif
-
-extern void cpu_cache_init(void);
extern void tlb_init(void);
-extern void flush_tlb_handlers(void);
/*
* Timer interrupt
*/
int cp0_compare_irq;
+EXPORT_SYMBOL_GPL(cp0_compare_irq);
+int cp0_compare_irq_shift;
/*
* Performance counter IRQ or -1 if shared with timer
@@ -1306,59 +1829,60 @@ int cp0_compare_irq;
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
-void __cpuinit per_cpu_trap_init(void)
-{
- unsigned int cpu = smp_processor_id();
- unsigned int status_set = ST0_CU0;
-#ifdef CONFIG_MIPS_MT_SMTC
- int secondaryTC = 0;
- int bootTC = (cpu == 0);
+static int noulri;
- /*
- * Only do per_cpu_trap_init() for first TC of Each VPE.
- * Note that this hack assumes that the SMTC init code
- * assigns TCs consecutively and in ascending order.
- */
+static int __init ulri_disable(char *s)
+{
+ pr_info("Disabling ulri\n");
+ noulri = 1;
- if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
- ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
- secondaryTC = 1;
-#endif /* CONFIG_MIPS_MT_SMTC */
+ return 1;
+}
+__setup("noulri", ulri_disable);
+/* configure STATUS register */
+static void configure_status(void)
+{
/*
* Disable coprocessors and select 32-bit or 64-bit addressing
* and the 16/32 or 32/32 FPR register model. Reset the BEV
* flag that some firmware may have left set and the TS bit (for
* IP27). Set XX for ISA IV code to work.
*/
+ unsigned int status_set = ST0_CU0;
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
- if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
+ if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
status_set |= ST0_XX;
if (cpu_has_dsp)
status_set |= ST0_MX;
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
+}
-#ifdef CONFIG_CPU_MIPSR2
- if (cpu_has_mips_r2) {
- unsigned int enable = 0x0000000f;
+/* configure HWRENA register */
+static void configure_hwrena(void)
+{
+ unsigned int hwrena = cpu_hwrena_impl_bits;
- if (cpu_has_userlocal)
- enable |= (1 << 29);
+ if (cpu_has_mips_r2)
+ hwrena |= 0x0000000f;
- write_c0_hwrena(enable);
- }
-#endif
+ if (!noulri && cpu_has_userlocal)
+ hwrena |= (1 << 29);
-#ifdef CONFIG_MIPS_MT_SMTC
- if (!secondaryTC) {
-#endif /* CONFIG_MIPS_MT_SMTC */
+ if (hwrena)
+ write_c0_hwrena(hwrena);
+}
+static void configure_exception_vector(void)
+{
if (cpu_has_veic || cpu_has_vint) {
+ unsigned long sr = set_c0_status(ST0_BEV);
write_c0_ebase(ebase);
+ write_c0_status(sr);
/* Setting vector spacing enables EI/VI mode */
change_c0_intctl(0x3e0, VECTORSPACING);
}
@@ -1370,6 +1894,16 @@ void __cpuinit per_cpu_trap_init(void)
} else
set_c0_cause(CAUSEF_IV);
}
+}
+
+void per_cpu_trap_init(bool is_boot_cpu)
+{
+ unsigned int cpu = smp_processor_id();
+
+ configure_status();
+ configure_hwrena();
+
+ configure_exception_vector();
/*
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
@@ -1378,64 +1912,55 @@ void __cpuinit per_cpu_trap_init(void)
* o read IntCtl.IPPCI to determine the performance counter interrupt
*/
if (cpu_has_mips_r2) {
- cp0_compare_irq = (read_c0_intctl() >> 29) & 7;
- cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7;
+ cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
+ cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
+ cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
if (cp0_perfcount_irq == cp0_compare_irq)
cp0_perfcount_irq = -1;
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
+ cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
cp0_perfcount_irq = -1;
}
-#ifdef CONFIG_MIPS_MT_SMTC
- }
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
- TLBMISS_HANDLER_SETUP();
+ if (!cpu_data[cpu].asid_cache)
+ cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
-#ifdef CONFIG_MIPS_MT_SMTC
- if (bootTC) {
-#endif /* CONFIG_MIPS_MT_SMTC */
- cpu_cache_init();
+ /* Boot CPU's cache setup in setup_arch(). */
+ if (!is_boot_cpu)
+ cpu_cache_init();
tlb_init();
-#ifdef CONFIG_MIPS_MT_SMTC
- } else if (!secondaryTC) {
- /*
- * First TC in non-boot VPE must do subset of tlb_init()
- * for MMU countrol registers.
- */
- write_c0_pagemask(PM_DEFAULT_MASK);
- write_c0_wired(0);
- }
-#endif /* CONFIG_MIPS_MT_SMTC */
+ TLBMISS_HANDLER_SETUP();
}
/* Install CPU exception handler */
-void __init set_handler(unsigned long offset, void *addr, unsigned long size)
+void set_handler(unsigned long offset, void *addr, unsigned long size)
{
+#ifdef CONFIG_CPU_MICROMIPS
+ memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
+#else
memcpy((void *)(ebase + offset), addr, size);
- flush_icache_range(ebase + offset, ebase + offset + size);
+#endif
+ local_flush_icache_range(ebase + offset, ebase + offset + size);
}
-static char panic_null_cerr[] __cpuinitdata =
+static char panic_null_cerr[] =
"Trying to set NULL cache error exception handler";
-/* Install uncached CPU exception handler */
-void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
+/*
+ * Install uncached CPU exception handler.
+ * This is suitable only for the cache error exception which is the only
+ * exception handler that is being run uncached.
+ */
+void set_uncached_handler(unsigned long offset, void *addr,
unsigned long size)
{
-#ifdef CONFIG_32BIT
- unsigned long uncached_ebase = KSEG1ADDR(ebase);
-#endif
-#ifdef CONFIG_64BIT
- unsigned long uncached_ebase = TO_UNCAC(ebase);
-#endif
+ unsigned long uncached_ebase = CKSEG1ADDR(ebase);
if (!addr)
panic(panic_null_cerr);
@@ -1454,16 +1979,45 @@ __setup("rdhwr_noopt", set_rdhwr_noopt);
void __init trap_init(void)
{
- extern char except_vec3_generic, except_vec3_r4000;
+ extern char except_vec3_generic;
extern char except_vec4;
+ extern char except_vec3_r4000;
unsigned long i;
- if (cpu_has_veic || cpu_has_vint)
- ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64);
- else
- ebase = CAC_BASE;
+ check_wait();
+
+#if defined(CONFIG_KGDB)
+ if (kgdb_early_setup)
+ return; /* Already done */
+#endif
- per_cpu_trap_init();
+ if (cpu_has_veic || cpu_has_vint) {
+ unsigned long size = 0x200 + VECTORSPACING*64;
+ ebase = (unsigned long)
+ __alloc_bootmem(size, 1 << fls(size), 0);
+ } else {
+#ifdef CONFIG_KVM_GUEST
+#define KVM_GUEST_KSEG0 0x40000000
+ ebase = KVM_GUEST_KSEG0;
+#else
+ ebase = CKSEG0;
+#endif
+ if (cpu_has_mips_r2)
+ ebase += (read_c0_ebase() & 0x3ffff000);
+ }
+
+ if (cpu_has_mmips) {
+ unsigned int config3 = read_c0_config3();
+
+ if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
+ write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
+ else
+ write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
+ }
+
+ if (board_ebase_setup)
+ board_ebase_setup();
+ per_cpu_trap_init(true);
/*
* Copy the generic exception handlers to their final destination.
@@ -1516,7 +2070,8 @@ void __init trap_init(void)
if (board_be_init)
board_be_init();
- set_except_vector(0, handle_int);
+ set_except_vector(0, using_rollback_handler() ? rollback_handle_int
+ : handle_int);
set_except_vector(1, handle_tlbm);
set_except_vector(2, handle_tlbl);
set_except_vector(3, handle_tlbs);
@@ -1535,6 +2090,7 @@ void __init trap_init(void)
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
+ set_except_vector(14, handle_msa_fpe);
if (current_cpu_type() == CPU_R6000 ||
current_cpu_type() == CPU_R6000A) {
@@ -1542,7 +2098,7 @@ void __init trap_init(void)
* The R6000 is the only R-series CPU that features a machine
* check exception (similar to the R4000 cache error) and
* unaligned ldc1/sdc1 exception. The handlers have not been
- * written yet. Well, anyway there is no R6000 machine on the
+ * written yet. Well, anyway there is no R6000 machine on the
* current list of targets for Linux/MIPS.
* (Duh, crap, there is someone with a triple R6k machine)
*/
@@ -1557,6 +2113,8 @@ void __init trap_init(void)
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(15, handle_fpe);
+ set_except_vector(16, handle_ftlb);
+ set_except_vector(21, handle_msa);
set_except_vector(22, handle_mdmx);
if (cpu_has_mcheck)
@@ -1567,19 +2125,49 @@ void __init trap_init(void)
set_except_vector(26, handle_dsp);
+ if (board_cache_error_setup)
+ board_cache_error_setup();
+
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
- memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
+ set_handler(0x180, &except_vec3_r4000, 0x100);
else if (cpu_has_4kex)
- memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
+ set_handler(0x180, &except_vec3_generic, 0x80);
else
- memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
+ set_handler(0x080, &except_vec3_generic, 0x80);
- signal_init();
-#ifdef CONFIG_MIPS32_COMPAT
- signal32_init();
-#endif
+ local_flush_icache_range(ebase, ebase + 0x400);
+
+ sort_extable(__start___dbe_table, __stop___dbe_table);
- flush_icache_range(ebase, ebase + 0x400);
- flush_tlb_handlers();
+ cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
+}
+
+static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ configure_status();
+ configure_hwrena();
+ configure_exception_vector();
+
+ /* Restore register with CPU number for TLB handlers */
+ TLBMISS_HANDLER_RESTORE();
+
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block trap_pm_notifier_block = {
+ .notifier_call = trap_pm_notifier,
+};
+
+static int __init trap_pm_init(void)
+{
+ return cpu_pm_register_notifier(&trap_pm_notifier_block);
}
+arch_initcall(trap_pm_init);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index c327b21bca8..2b3517214d6 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -7,6 +7,7 @@
*
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
*
* This file contains exception handler for address error exception with the
* special capability to execute faulting instructions in software. The
@@ -21,11 +22,11 @@
*
* For now I enable fixing of address errors by default to make life easier.
* I however intend to disable this somewhen in the future when the alignment
- * problems with user programs have been fixed. For programmers this is the
+ * problems with user programs have been fixed. For programmers this is the
* right way to go.
*
* Fixing address errors is a per process option. The option is inherited
- * across fork(2) and execve(2) calls. If you really want to use the
+ * across fork(2) and execve(2) calls. If you really want to use the
* option in your user programs - I discourage the use of the software
* emulation strongly - use the following code in your userland stuff:
*
@@ -43,49 +44,55 @@
* #include <sys/sysmips.h>
*
* struct foo {
- * unsigned char bar[8];
+ * unsigned char bar[8];
* };
*
* main(int argc, char *argv[])
* {
- * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
- * unsigned int *p = (unsigned int *) (x.bar + 3);
- * int i;
+ * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
+ * unsigned int *p = (unsigned int *) (x.bar + 3);
+ * int i;
*
- * if (argc > 1)
- * sysmips(MIPS_FIXADE, atoi(argv[1]));
+ * if (argc > 1)
+ * sysmips(MIPS_FIXADE, atoi(argv[1]));
*
- * printf("*p = %08lx\n", *p);
+ * printf("*p = %08lx\n", *p);
*
- * *p = 0xdeadface;
+ * *p = 0xdeadface;
*
- * for(i = 0; i <= 7; i++)
- * printf("%02x ", x.bar[i]);
- * printf("\n");
+ * for(i = 0; i <= 7; i++)
+ * printf("%02x ", x.bar[i]);
+ * printf("\n");
* }
*
* Coprocessor loads are not supported; I think this case is unimportant
* in the practice.
*
* TODO: Handle ndc (attempted store to doubleword in uncached memory)
- * exception for the R6000.
- * A store crossing a page boundary might be executed only partially.
- * Undo the partial store in this case.
+ * exception for the R6000.
+ * A store crossing a page boundary might be executed only partially.
+ * Undo the partial store in this case.
*/
+#include <linux/context_tracking.h>
#include <linux/mm.h>
-#include <linux/module.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
+#include <linux/perf_event.h>
+
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
+#include <asm/cop2.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
#include <asm/inst.h>
#include <asm/uaccess.h>
-#include <asm/system.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
-#define STR(x) __STR(x)
+#define STR(x) __STR(x)
#define __STR(x) #x
enum {
@@ -101,14 +108,336 @@ static u32 unaligned_action;
#endif
extern void show_registers(struct pt_regs *regs);
+#ifdef __BIG_ENDIAN
+#define LoadHW(addr, value, res) \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\t"user_lb("%0", "0(%2)")"\n" \
+ "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\t"user_lwl("%0", "(%2)")"\n" \
+ "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadHWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\t"user_lbu("%0", "0(%2)")"\n" \
+ "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\t"user_lwl("%0", "(%2)")"\n" \
+ "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tldl\t%0, (%2)\n" \
+ "2:\tldr\t%0, 7(%2)\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define StoreHW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\t"user_sb("%1", "1(%2)")"\n" \
+ "srl\t$1, %1, 0x8\n" \
+ "2:\t"user_sb("$1", "0(%2)")"\n" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define StoreW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\t"user_swl("%1", "(%2)")"\n" \
+ "2:\t"user_swr("%1", "3(%2)")"\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define StoreDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tsdl\t%1,(%2)\n" \
+ "2:\tsdr\t%1, 7(%2)\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#define LoadHW(addr, value, res) \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\t"user_lb("%0", "1(%2)")"\n" \
+ "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\t"user_lwl("%0", "3(%2)")"\n" \
+ "2:\t"user_lwr("%0", "(%2)")"\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadHWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\t"user_lbu("%0", "1(%2)")"\n" \
+ "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\t"user_lwl("%0", "3(%2)")"\n" \
+ "2:\t"user_lwr("%0", "(%2)")"\n\t" \
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tldl\t%0, 7(%2)\n" \
+ "2:\tldr\t%0, (%2)\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define StoreHW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\t"user_sb("%1", "0(%2)")"\n" \
+ "srl\t$1,%1, 0x8\n" \
+ "2:\t"user_sb("$1", "1(%2)")"\n" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define StoreW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\t"user_swl("%1", "3(%2)")"\n" \
+ "2:\t"user_swr("%1", "(%2)")"\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define StoreDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tsdl\t%1, 7(%2)\n" \
+ "2:\tsdr\t%1, (%2)\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc)
{
union mips_instruction insn;
unsigned long value;
unsigned int res;
+ unsigned long origpc;
+ unsigned long orig31;
+ void __user *fault_addr = NULL;
+#ifdef CONFIG_EVA
+ mm_segment_t seg;
+#endif
+ origpc = (unsigned long)pc;
+ orig31 = regs->regs[31];
- regs->regs[0] = 0;
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
/*
* This load never faults.
@@ -116,22 +445,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
__get_user(insn.word, pc);
switch (insn.i_format.opcode) {
- /*
- * These are instructions that a compiler doesn't generate. We
- * can assume therefore that the code is MIPS-aware and
- * really buggy. Emulating these instructions would break the
- * semantics anyway.
- */
+ /*
+ * These are instructions that a compiler doesn't generate. We
+ * can assume therefore that the code is MIPS-aware and
+ * really buggy. Emulating these instructions would break the
+ * semantics anyway.
+ */
case ll_op:
case lld_op:
case sc_op:
case scd_op:
- /*
- * For these instructions the only way to create an address
- * error is an attempted access to kernel/supervisor address
- * space.
- */
+ /*
+ * For these instructions the only way to create an address
+ * error is an attempted access to kernel/supervisor address
+ * space.
+ */
case ldl_op:
case ldr_op:
case lwl_op:
@@ -145,36 +474,97 @@ static void emulate_load_store_insn(struct pt_regs *regs,
case sb_op:
goto sigbus;
- /*
- * The remaining opcodes are the ones that are really of interest.
- */
+ /*
+ * The remaining opcodes are the ones that are really of
+ * interest.
+ */
+#ifdef CONFIG_EVA
+ case spec3_op:
+ /*
+ * we can land here only from kernel accessing user memory,
+ * so we need to "switch" the address limit to user space, so
+ * address check can work properly.
+ */
+ seg = get_fs();
+ set_fs(USER_DS);
+ switch (insn.spec3_format.func) {
+ case lhe_op:
+ if (!access_ok(VERIFY_READ, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ LoadHW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lwe_op:
+ if (!access_ok(VERIFY_READ, addr, 4)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ LoadW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lhue_op:
+ if (!access_ok(VERIFY_READ, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ LoadHWU(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case she_op:
+ if (!access_ok(VERIFY_WRITE, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreHW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ break;
+ case swe_op:
+ if (!access_ok(VERIFY_WRITE, addr, 4)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ break;
+ default:
+ set_fs(seg);
+ goto sigill;
+ }
+ set_fs(seg);
+ break;
+#endif
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- __asm__ __volatile__ (".set\tnoat\n"
-#ifdef __BIG_ENDIAN
- "1:\tlb\t%0, 0(%2)\n"
- "2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tlb\t%0, 1(%2)\n"
- "2:\tlbu\t$1, 0(%2)\n\t"
-#endif
- "sll\t%0, 0x8\n\t"
- "or\t%0, $1\n\t"
- "li\t%1, 0\n"
- "3:\t.set\tat\n\t"
- ".section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%1, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=&r" (value), "=r" (res)
- : "r" (addr), "i" (-EFAULT));
+ LoadHW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
@@ -185,26 +575,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- "1:\tlwl\t%0, (%2)\n"
- "2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tlwl\t%0, 3(%2)\n"
- "2:\tlwr\t%0, (%2)\n\t"
-#endif
- "li\t%1, 0\n"
- "3:\t.section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%1, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=&r" (value), "=r" (res)
- : "r" (addr), "i" (-EFAULT));
+ LoadW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
@@ -215,30 +586,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- __asm__ __volatile__ (
- ".set\tnoat\n"
-#ifdef __BIG_ENDIAN
- "1:\tlbu\t%0, 0(%2)\n"
- "2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tlbu\t%0, 1(%2)\n"
- "2:\tlbu\t$1, 0(%2)\n\t"
-#endif
- "sll\t%0, 0x8\n\t"
- "or\t%0, $1\n\t"
- "li\t%1, 0\n"
- "3:\t.set\tat\n\t"
- ".section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%1, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=&r" (value), "=r" (res)
- : "r" (addr), "i" (-EFAULT));
+ LoadHWU(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
@@ -257,28 +605,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- "1:\tlwl\t%0, (%2)\n"
- "2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tlwl\t%0, 3(%2)\n"
- "2:\tlwr\t%0, (%2)\n\t"
-#endif
- "dsll\t%0, %0, 32\n\t"
- "dsrl\t%0, %0, 32\n\t"
- "li\t%1, 0\n"
- "3:\t.section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%1, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=&r" (value), "=r" (res)
- : "r" (addr), "i" (-EFAULT));
+ LoadWU(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
@@ -301,26 +628,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 8))
goto sigbus;
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- "1:\tldl\t%0, (%2)\n"
- "2:\tldr\t%0, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tldl\t%0, 7(%2)\n"
- "2:\tldr\t%0, (%2)\n\t"
-#endif
- "li\t%1, 0\n"
- "3:\t.section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%1, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=&r" (value), "=r" (res)
- : "r" (addr), "i" (-EFAULT));
+ LoadDW(addr, value, res);
if (res)
goto fault;
compute_return_epc(regs);
@@ -335,68 +643,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, addr, 2))
goto sigbus;
+ compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- ".set\tnoat\n"
- "1:\tsb\t%1, 1(%2)\n\t"
- "srl\t$1, %1, 0x8\n"
- "2:\tsb\t$1, 0(%2)\n\t"
- ".set\tat\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- ".set\tnoat\n"
- "1:\tsb\t%1, 0(%2)\n\t"
- "srl\t$1,%1, 0x8\n"
- "2:\tsb\t$1, 1(%2)\n\t"
- ".set\tat\n\t"
-#endif
- "li\t%0, 0\n"
- "3:\n\t"
- ".section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%0, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=r" (res)
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ StoreHW(addr, value, res);
if (res)
goto fault;
- compute_return_epc(regs);
break;
case sw_op:
if (!access_ok(VERIFY_WRITE, addr, 4))
goto sigbus;
+ compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- "1:\tswl\t%1,(%2)\n"
- "2:\tswr\t%1, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tswl\t%1, 3(%2)\n"
- "2:\tswr\t%1, (%2)\n\t"
-#endif
- "li\t%0, 0\n"
- "3:\n\t"
- ".section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%0, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=r" (res)
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ StoreW(addr, value, res);
if (res)
goto fault;
- compute_return_epc(regs);
break;
case sd_op:
@@ -411,31 +673,11 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, addr, 8))
goto sigbus;
+ compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
- "1:\tsdl\t%1,(%2)\n"
- "2:\tsdr\t%1, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
- "1:\tsdl\t%1, 7(%2)\n"
- "2:\tsdr\t%1, (%2)\n\t"
-#endif
- "li\t%0, 0\n"
- "3:\n\t"
- ".section\t.fixup,\"ax\"\n\t"
- "4:\tli\t%0, %3\n\t"
- "j\t3b\n\t"
- ".previous\n\t"
- ".section\t__ex_table,\"a\"\n\t"
- STR(PTR)"\t1b, 4b\n\t"
- STR(PTR)"\t2b, 4b\n\t"
- ".previous"
- : "=r" (res)
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ StoreDW(addr, value, res);
if (res)
goto fault;
- compute_return_epc(regs);
break;
#endif /* CONFIG_64BIT */
@@ -446,22 +688,43 @@ static void emulate_load_store_insn(struct pt_regs *regs,
case ldc1_op:
case swc1_op:
case sdc1_op:
- /*
- * I herewith declare: this does not happen. So send SIGBUS.
- */
- goto sigbus;
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+ BUG_ON(!is_fpu_owner());
+
+ lose_fpu(1); /* Save FPU state for the emulator. */
+ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+ own_fpu(1); /* Restore FPU state. */
+
+ /* Signal if something went wrong. */
+ process_fpemu_return(res, fault_addr);
+ if (res == 0)
+ break;
+ return;
+
+ /*
+ * COP2 is available to implementor for application specific use.
+ * It's up to applications to register a notifier chain and do
+ * whatever they have to do, including possible sending of signals.
+ */
case lwc2_op:
+ cu2_notifier_call_chain(CU2_LWC2_OP, regs);
+ break;
+
case ldc2_op:
+ cu2_notifier_call_chain(CU2_LDC2_OP, regs);
+ break;
+
case swc2_op:
+ cu2_notifier_call_chain(CU2_SWC2_OP, regs);
+ break;
+
case sdc2_op:
- /*
- * These are the coprocessor 2 load/stores. The current
- * implementations don't use cp2 and cp2 should always be
- * disabled in c0_status. So send SIGILL.
- * (No longer true: The Sony Praystation uses cp2 for
- * 3D matrix operations. Dunno if that thingy has a MMU ...)
- */
+ cu2_notifier_call_chain(CU2_SDC2_OP, regs);
+ break;
+
default:
/*
* Pheeee... We encountered an yet unknown instruction or
@@ -477,62 +740,969 @@ static void emulate_load_store_insn(struct pt_regs *regs,
return;
fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
- send_sig(SIGSEGV, current, 1);
+ force_sig(SIGSEGV, current);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
- send_sig(SIGBUS, current, 1);
+ force_sig(SIGBUS, current);
return;
sigill:
- die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
- send_sig(SIGILL, current, 1);
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL, current);
}
-asmlinkage void do_ade(struct pt_regs *regs)
+/* Recode table from 16-bit register notation to 32-bit GPR. */
+const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
+const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+
+static void emulate_load_store_microMIPS(struct pt_regs *regs,
+ void __user *addr)
{
- extern int do_dsemulret(struct pt_regs *);
- unsigned int __user *pc;
- mm_segment_t seg;
+ unsigned long value;
+ unsigned int res;
+ int i;
+ unsigned int reg = 0, rvar;
+ unsigned long orig31;
+ u16 __user *pc16;
+ u16 halfword;
+ unsigned int word;
+ unsigned long origpc, contpc;
+ union mips_instruction insn;
+ struct mm_decoded_insn mminsn;
+ void __user *fault_addr = NULL;
+
+ origpc = regs->cp0_epc;
+ orig31 = regs->regs[31];
+
+ mminsn.micro_mips_mode = 1;
+
+ /*
+ * This load never faults.
+ */
+ pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 2;
+ word = ((unsigned int)halfword << 16);
+ mminsn.pc_inc = 2;
+
+ if (!mm_insn_16bit(halfword)) {
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 4;
+ mminsn.pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.insn = word;
+
+ if (get_user(halfword, pc16))
+ goto fault;
+ mminsn.next_pc_inc = 2;
+ word = ((unsigned int)halfword << 16);
+
+ if (!mm_insn_16bit(halfword)) {
+ pc16++;
+ if (get_user(halfword, pc16))
+ goto fault;
+ mminsn.next_pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.next_insn = word;
+
+ insn = (union mips_instruction)(mminsn.insn);
+ if (mm_isBranchInstr(regs, mminsn, &contpc))
+ insn = (union mips_instruction)(mminsn.next_insn);
+
+ /* Parse instruction to find what to do */
+
+ switch (insn.mm_i_format.opcode) {
+
+ case mm_pool32a_op:
+ switch (insn.mm_x_format.func) {
+ case mm_lwxs_op:
+ reg = insn.mm_x_format.rd;
+ goto loadW;
+ }
+
+ goto sigbus;
+
+ case mm_pool32b_op:
+ switch (insn.mm_m_format.func) {
+ case mm_lwp_func:
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(VERIFY_READ, addr, 8))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ addr += 4;
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg + 1] = value;
+ goto success;
+
+ case mm_swp_func:
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(VERIFY_WRITE, addr, 8))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ value = regs->regs[reg + 1];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+ case mm_ldp_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(VERIFY_READ, addr, 16))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ addr += 8;
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg + 1] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_sdp_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (!access_ok(VERIFY_WRITE, addr, 16))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ value = regs->regs[reg + 1];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_lwm32_func:
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok
+ (VERIFY_READ, addr, 4 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+ for (i = 16; rvar; rvar--, i++) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ if ((reg & 0xf) == 9) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[30] = value;
+ }
+ if (reg & 0x10) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+ }
+ goto success;
+
+ case mm_swm32_func:
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok
+ (VERIFY_WRITE, addr, 4 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ if ((reg & 0xf) == 9) {
+ value = regs->regs[30];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ if (reg & 0x10) {
+ value = regs->regs[31];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ }
+ goto success;
+
+ case mm_ldm_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok
+ (VERIFY_READ, addr, 8 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(VERIFY_READ, addr, 8 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+
+ for (i = 16; rvar; rvar--, i++) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ if ((reg & 0xf) == 9) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ regs->regs[30] = value;
+ }
+ if (reg & 0x10) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+ }
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_sdm_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (!access_ok
+ (VERIFY_WRITE, addr, 8 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ }
+ if ((reg & 0xf) == 9) {
+ value = regs->regs[30];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ }
+ if (reg & 0x10) {
+ value = regs->regs[31];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ }
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ /* LWC2, SWC2, LDC2, SDC2 are not serviced */
+ }
+
+ goto sigbus;
+
+ case mm_pool32c_op:
+ switch (insn.mm_m_format.func) {
+ case mm_lwu_func:
+ reg = insn.mm_m_format.rd;
+ goto loadWU;
+ }
+
+ /* LL,SC,LLD,SCD are not serviced */
+ goto sigbus;
+
+ case mm_pool32f_op:
+ switch (insn.mm_x_format.func) {
+ case mm_lwxc1_func:
+ case mm_swxc1_func:
+ case mm_ldxc1_func:
+ case mm_sdxc1_func:
+ goto fpu_emul;
+ }
+
+ goto sigbus;
+
+ case mm_ldc132_op:
+ case mm_sdc132_op:
+ case mm_lwc132_op:
+ case mm_swc132_op:
+fpu_emul:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+ BUG_ON(!is_fpu_owner());
+
+ lose_fpu(1); /* save the FPU state for the emulator */
+ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+ own_fpu(1); /* restore FPU state */
+
+ /* If something went wrong, signal */
+ process_fpemu_return(res, fault_addr);
+
+ if (res == 0)
+ goto success;
+ return;
+
+ case mm_lh32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadHW;
+
+ case mm_lhu32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadHWU;
+
+ case mm_lw32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadW;
+
+ case mm_sh32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeHW;
+
+ case mm_sw32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeW;
+
+ case mm_ld32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadDW;
+
+ case mm_sd32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeDW;
+
+ case mm_pool16c_op:
+ switch (insn.mm16_m_format.func) {
+ case mm_lwm16_op:
+ reg = insn.mm16_m_format.rlist;
+ rvar = reg + 1;
+ if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+ goto sigbus;
+
+ for (i = 16; rvar; rvar--, i++) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+
+ goto success;
+
+ case mm_swm16_op:
+ reg = insn.mm16_m_format.rlist;
+ rvar = reg + 1;
+ if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+ goto sigbus;
+
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ value = regs->regs[31];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+
+ goto success;
+
+ }
+
+ goto sigbus;
+
+ case mm_lhu16_op:
+ reg = reg16to32[insn.mm16_rb_format.rt];
+ goto loadHWU;
+
+ case mm_lw16_op:
+ reg = reg16to32[insn.mm16_rb_format.rt];
+ goto loadW;
+
+ case mm_sh16_op:
+ reg = reg16to32st[insn.mm16_rb_format.rt];
+ goto storeHW;
+
+ case mm_sw16_op:
+ reg = reg16to32st[insn.mm16_rb_format.rt];
+ goto storeW;
+
+ case mm_lwsp16_op:
+ reg = insn.mm16_r5_format.rt;
+ goto loadW;
+
+ case mm_swsp16_op:
+ reg = insn.mm16_r5_format.rt;
+ goto storeW;
+
+ case mm_lwgp16_op:
+ reg = reg16to32[insn.mm16_r3_format.rt];
+ goto loadW;
+
+ default:
+ goto sigill;
+ }
+
+loadHW:
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadHWU:
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+ LoadHWU(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadW:
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+loadWU:
+#ifdef CONFIG_64BIT
/*
- * Address errors may be deliberately induced by the FPU emulator to
- * retake control of the CPU after executing the instruction in the
- * delay slot of an emulated branch.
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
*/
- /* Terminate if exception was recognized as a delay slot return */
- if (do_dsemulret(regs))
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+
+ LoadWU(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+loadDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_READ, addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+storeHW:
+ if (!access_ok(VERIFY_WRITE, addr, 2))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreHW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+storeW:
+ if (!access_ok(VERIFY_WRITE, addr, 4))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+storeDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_WRITE, addr, 8))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+success:
+ regs->cp0_epc = contpc; /* advance or branch */
+
+#ifdef CONFIG_DEBUG_FS
+ unaligned_instructions++;
+#endif
+ return;
+
+fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return;
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV, current);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS, current);
+
+ return;
+
+sigill:
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL, current);
+}
+
+static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
+{
+ unsigned long value;
+ unsigned int res;
+ int reg;
+ unsigned long orig31;
+ u16 __user *pc16;
+ unsigned long origpc;
+ union mips16e_instruction mips16inst, oldinst;
+
+ origpc = regs->cp0_epc;
+ orig31 = regs->regs[31];
+ pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
+ /*
+ * This load never faults.
+ */
+ __get_user(mips16inst.full, pc16);
+ oldinst = mips16inst;
+
+ /* skip EXTEND instruction */
+ if (mips16inst.ri.opcode == MIPS16e_extend_op) {
+ pc16++;
+ __get_user(mips16inst.full, pc16);
+ } else if (delay_slot(regs)) {
+ /* skip jump instructions */
+ /* JAL/JALX are 32 bits but have OPCODE in first short int */
+ if (mips16inst.ri.opcode == MIPS16e_jal_op)
+ pc16++;
+ pc16++;
+ if (get_user(mips16inst.full, pc16))
+ goto sigbus;
+ }
+
+ switch (mips16inst.ri.opcode) {
+ case MIPS16e_i64_op: /* I64 or RI64 instruction */
+ switch (mips16inst.i64.func) { /* I64/RI64 func field check */
+ case MIPS16e_ldpc_func:
+ case MIPS16e_ldsp_func:
+ reg = reg16to32[mips16inst.ri64.ry];
+ goto loadDW;
+
+ case MIPS16e_sdsp_func:
+ reg = reg16to32[mips16inst.ri64.ry];
+ goto writeDW;
+
+ case MIPS16e_sdrasp_func:
+ reg = 29; /* GPRSP */
+ goto writeDW;
+ }
+
+ goto sigbus;
+
+ case MIPS16e_swsp_op:
+ case MIPS16e_lwpc_op:
+ case MIPS16e_lwsp_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ break;
+
+ case MIPS16e_i8_op:
+ if (mips16inst.i8.func != MIPS16e_swrasp_func)
+ goto sigbus;
+ reg = 29; /* GPRSP */
+ break;
+
+ default:
+ reg = reg16to32[mips16inst.rri.ry];
+ break;
+ }
+
+ switch (mips16inst.ri.opcode) {
+
+ case MIPS16e_lb_op:
+ case MIPS16e_lbu_op:
+ case MIPS16e_sb_op:
+ goto sigbus;
+
+ case MIPS16e_lh_op:
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lhu_op:
+ if (!access_ok(VERIFY_READ, addr, 2))
+ goto sigbus;
+
+ LoadHWU(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lw_op:
+ case MIPS16e_lwpc_op:
+ case MIPS16e_lwsp_op:
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lwu_op:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_READ, addr, 4))
+ goto sigbus;
+
+ LoadWU(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case MIPS16e_ld_op:
+loadDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_READ, addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case MIPS16e_sh_op:
+ if (!access_ok(VERIFY_WRITE, addr, 2))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreHW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+
+ case MIPS16e_sw_op:
+ case MIPS16e_swsp_op:
+ case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
+ if (!access_ok(VERIFY_WRITE, addr, 4))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+
+ case MIPS16e_sd_op:
+writeDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (!access_ok(VERIFY_WRITE, addr, 8))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ default:
+ /*
+ * Pheeee... We encountered an yet unknown instruction or
+ * cache coherence problem. Die sucker, die ...
+ */
+ goto sigill;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ unaligned_instructions++;
+#endif
+
+ return;
+
+fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
return;
- /* Otherwise handle as normal */
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV, current);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS, current);
+
+ return;
+
+sigill:
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL, current);
+}
+
+asmlinkage void do_ade(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+ unsigned int __user *pc;
+ mm_segment_t seg;
+ prev_state = exception_enter();
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
+ 1, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
- * Or are we running in MIPS16 mode?
*/
- if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
+ if (regs->cp0_badvaddr == regs->cp0_epc)
goto sigbus;
- pc = (unsigned int __user *) exception_epc(regs);
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
goto sigbus;
- else if (unaligned_action == UNALIGNED_ACTION_SHOW)
- show_registers(regs);
/*
* Do branch emulation only if we didn't forward the exception.
* This is all so but ugly ...
*/
+
+ /*
+ * Are we running in microMIPS mode?
+ */
+ if (get_isa16_mode(regs->cp0_epc)) {
+ /*
+ * Did we catch a fault trying to load an instruction in
+ * 16-bit mode?
+ */
+ if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
+ goto sigbus;
+ if (unaligned_action == UNALIGNED_ACTION_SHOW)
+ show_registers(regs);
+
+ if (cpu_has_mmips) {
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+ emulate_load_store_microMIPS(regs,
+ (void __user *)regs->cp0_badvaddr);
+ set_fs(seg);
+
+ return;
+ }
+
+ if (cpu_has_mips16) {
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+ emulate_load_store_MIPS16e(regs,
+ (void __user *)regs->cp0_badvaddr);
+ set_fs(seg);
+
+ return;
+ }
+
+ goto sigbus;
+ }
+
+ if (unaligned_action == UNALIGNED_ACTION_SHOW)
+ show_registers(regs);
+ pc = (unsigned int __user *)exception_epc(regs);
+
seg = get_fs();
if (!user_mode(regs))
set_fs(KERNEL_DS);
@@ -548,6 +1718,7 @@ sigbus:
/*
* XXX On return from the signal handler we should advance the epc
*/
+ exception_exit(prev_state);
}
#ifdef CONFIG_DEBUG_FS
@@ -560,12 +1731,12 @@ static int __init debugfs_unaligned(void)
return -ENODEV;
d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
mips_debugfs_dir, &unaligned_instructions);
- if (IS_ERR(d))
- return PTR_ERR(d);
+ if (!d)
+ return -ENOMEM;
d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
mips_debugfs_dir, &unaligned_action);
- if (IS_ERR(d))
- return PTR_ERR(d);
+ if (!d)
+ return -ENOMEM;
return 0;
}
__initcall(debugfs_unaligned);
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
new file mode 100644
index 00000000000..0f1af58b036
--- /dev/null
+++ b/arch/mips/kernel/vdso.c
@@ -0,0 +1,109 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009, 2010 Cavium Networks, Inc.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/unistd.h>
+
+#include <asm/vdso.h>
+#include <asm/uasm.h>
+
+/*
+ * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
+ */
+#define __NR_O32_sigreturn 4119
+#define __NR_O32_rt_sigreturn 4193
+#define __NR_N32_rt_sigreturn 6211
+
+static struct page *vdso_page;
+
+static void __init install_trampoline(u32 *tramp, unsigned int sigreturn)
+{
+ uasm_i_addiu(&tramp, 2, 0, sigreturn); /* li v0, sigreturn */
+ uasm_i_syscall(&tramp, 0);
+}
+
+static int __init init_vdso(void)
+{
+ struct mips_vdso *vdso;
+
+ vdso_page = alloc_page(GFP_KERNEL);
+ if (!vdso_page)
+ panic("Cannot allocate vdso");
+
+ vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
+ if (!vdso)
+ panic("Cannot map vdso");
+ clear_page(vdso);
+
+ install_trampoline(vdso->rt_signal_trampoline, __NR_rt_sigreturn);
+#ifdef CONFIG_32BIT
+ install_trampoline(vdso->signal_trampoline, __NR_sigreturn);
+#else
+ install_trampoline(vdso->n32_rt_signal_trampoline,
+ __NR_N32_rt_sigreturn);
+ install_trampoline(vdso->o32_signal_trampoline, __NR_O32_sigreturn);
+ install_trampoline(vdso->o32_rt_signal_trampoline,
+ __NR_O32_rt_sigreturn);
+#endif
+
+ vunmap(vdso);
+
+ return 0;
+}
+subsys_initcall(init_vdso);
+
+static unsigned long vdso_addr(unsigned long start)
+{
+ return STACK_TOP;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ int ret;
+ unsigned long addr;
+ struct mm_struct *mm = current->mm;
+
+ down_write(&mm->mmap_sem);
+
+ addr = vdso_addr(mm->start_stack);
+
+ addr = get_unmapped_area(NULL, addr, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ &vdso_page);
+
+ if (ret)
+ goto up_fail;
+
+ mm->context.vdso = (void *)addr;
+
+up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
+ return "[vdso]";
+ return NULL;
+}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index b5470ceb418..3b46f7ce9ca 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -1,4 +1,14 @@
#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#define PAGE_SIZE _PAGE_SIZE
+
+/*
+ * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+ * ensure that it has .bss alignment (64K).
+ */
+#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+
#include <asm-generic/vmlinux.lds.h>
#undef mips
@@ -9,7 +19,16 @@ PHDRS {
text PT_LOAD FLAGS(7); /* RWX */
note PT_NOTE FLAGS(4); /* R__ */
}
-jiffies = JIFFIES;
+
+#ifdef CONFIG_32BIT
+ #ifdef CONFIG_CPU_LITTLE_ENDIAN
+ jiffies = jiffies_64;
+ #else
+ jiffies = jiffies_64 + 4;
+ #endif
+#else
+ jiffies = jiffies_64;
+#endif
SECTIONS
{
@@ -28,7 +47,7 @@ SECTIONS
/* . = 0xa800000000300000; */
. = 0xffffffff80300000;
#endif
- . = LOADADDR;
+ . = VMLINUX_LOAD_ADDRESS;
/* read-only */
_text = .; /* Text and read-only data */
.text : {
@@ -36,18 +55,14 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IRQENTRY_TEXT
+ *(.text.*)
*(.fixup)
*(.gnu.warning)
} :text = 0
_etext = .; /* End of text section */
- /* Exception table */
- . = ALIGN(16);
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(16)
/* Exception table for data bus errors */
__dbe_table : {
@@ -59,25 +74,17 @@ SECTIONS
NOTES :text :note
.dummy : { *(.dummy) } :text
+ _sdata = .; /* Start of data section */
RODATA
/* writeable */
.data : { /* Data */
. = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
- /*
- * This ALIGN is needed as a workaround for a bug a
- * gcc bug upto 4.1 which limits the maximum alignment
- * to at most 32kB and results in the following
- * warning:
- *
- * CC arch/mips/kernel/init_task.o
- * arch/mips/kernel/init_task.c:30: warning: alignment
- * of ‘init_thread_union’ is greater than maximum
- * object file alignment. Using 32768
- */
- . = ALIGN(_PAGE_SIZE);
- *(.data.init_task)
+ INIT_TASK_DATA(THREAD_SIZE)
+ NOSAVE_DATA
+ CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
DATA_DATA
CONSTRUCTORS
}
@@ -94,51 +101,20 @@ SECTIONS
.sdata : {
*(.sdata)
}
-
- . = ALIGN(_PAGE_SIZE);
- .data_nosave : {
- __nosave_begin = .;
- *(.data.nosave)
- }
- . = ALIGN(_PAGE_SIZE);
- __nosave_end = .;
-
- . = ALIGN(32);
- .data.cacheline_aligned : {
- *(.data.cacheline_aligned)
- }
_edata = .; /* End of data section */
/* will be freed after init */
- . = ALIGN(_PAGE_SIZE); /* Init code and data */
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
- .init.data : {
- INIT_DATA
- }
- . = ALIGN(16);
- .init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
-
- .initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
- .con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
+ . = ALIGN(4);
+ .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
+ __mips_machines_start = .;
+ *(.mips.machines.init)
+ __mips_machines_end = .;
}
- SECURITY_INIT
/* .exit.text is discarded at runtime, not link time, to deal with
* references from .rodata
@@ -149,43 +125,26 @@ SECTIONS
.exit.data : {
EXIT_DATA
}
-#if defined(CONFIG_BLK_DEV_INITRD)
- . = ALIGN(_PAGE_SIZE);
- .init.ramfs : {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- }
-#endif
- PERCPU(_PAGE_SIZE)
- . = ALIGN(_PAGE_SIZE);
+
+ PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ /*
+ * Align to 64K in attempt to eliminate holes before the
+ * .bss..swapper_pg_dir section at the start of .bss. This
+ * also satisfies PAGE_SIZE alignment as the largest page size
+ * allowed is 64K.
+ */
+ . = ALIGN(0x10000);
__init_end = .;
/* freed after init ends here */
- __bss_start = .; /* BSS */
- .sbss : {
- *(.sbss)
- *(.scommon)
- }
- .bss : {
- *(.bss)
- *(COMMON)
- }
- __bss_stop = .;
+ /*
+ * Force .bss to 64K alignment so that .bss..swapper_pg_dir
+ * gets that alignment. .sbss should be empty, so there will be
+ * no holes after __init_end. */
+ BSS_SECTION(0, 0x10000, 0)
_end = . ;
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.exitcall.exit)
-
- /* ABI crap starts here */
- *(.MIPS.options)
- *(.options)
- *(.pdr)
- *(.reginfo)
- }
-
/* These mark the ABI of the kernel for debuggers. */
.mdebug.abi32 : {
KEEP(*(.mdebug.abi32))
@@ -211,4 +170,15 @@ SECTIONS
*(.gptab.bss)
*(.gptab.sbss)
}
+
+ /* Sections to be discarded */
+ DISCARDS
+ /DISCARD/ : {
+ /* ABI crap starts here */
+ *(.MIPS.options)
+ *(.options)
+ *(.pdr)
+ *(.reginfo)
+ *(.eh_frame)
+ }
}
diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c
new file mode 100644
index 00000000000..9268ebc0f61
--- /dev/null
+++ b/arch/mips/kernel/vpe-cmp.c
@@ -0,0 +1,180 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <asm/vpe.h>
+
+static int major;
+
+void cleanup_tc(struct tc *tc)
+{
+
+}
+
+static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ struct vpe_notifications *notifier;
+
+ list_for_each_entry(notifier, &vpe->notify, list)
+ notifier->stop(aprp_cpu_index());
+
+ release_progmem(vpe->load_addr);
+ vpe->state = VPE_STATE_UNUSED;
+
+ return len;
+}
+static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
+
+static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
+ char *buf)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+
+ return sprintf(buf, "%d\n", vpe->ntcs);
+}
+
+static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ unsigned long new;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &new);
+ if (ret < 0)
+ return ret;
+
+ /* APRP can only reserve one TC in a VPE and no more. */
+ if (new != 1)
+ return -EINVAL;
+
+ vpe->ntcs = new;
+
+ return len;
+}
+static DEVICE_ATTR_RW(ntcs);
+
+static struct attribute *vpe_attrs[] = {
+ &dev_attr_kill.attr,
+ &dev_attr_ntcs.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vpe);
+
+static void vpe_device_release(struct device *cd)
+{
+ kfree(cd);
+}
+
+static struct class vpe_class = {
+ .name = "vpe",
+ .owner = THIS_MODULE,
+ .dev_release = vpe_device_release,
+ .dev_groups = vpe_groups,
+};
+
+static struct device vpe_device;
+
+int __init vpe_module_init(void)
+{
+ struct vpe *v = NULL;
+ struct tc *t;
+ int err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (num_possible_cpus() - aprp_cpu_index() < 1) {
+ pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
+ "Pass maxcpus=<n> argument as kernel argument\n");
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
+ if (major < 0) {
+ pr_warn("VPE loader: unable to register character device\n");
+ return major;
+ }
+
+ err = class_register(&vpe_class);
+ if (err) {
+ pr_err("vpe_class registration failed\n");
+ goto out_chrdev;
+ }
+
+ device_initialize(&vpe_device);
+ vpe_device.class = &vpe_class,
+ vpe_device.parent = NULL,
+ dev_set_name(&vpe_device, "vpe_sp");
+ vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
+ err = device_add(&vpe_device);
+ if (err) {
+ pr_err("Adding vpe_device failed\n");
+ goto out_class;
+ }
+
+ t = alloc_tc(aprp_cpu_index());
+ if (!t) {
+ pr_warn("VPE: unable to allocate TC\n");
+ err = -ENOMEM;
+ goto out_dev;
+ }
+
+ /* VPE */
+ v = alloc_vpe(aprp_cpu_index());
+ if (v == NULL) {
+ pr_warn("VPE: unable to allocate VPE\n");
+ kfree(t);
+ err = -ENOMEM;
+ goto out_dev;
+ }
+
+ v->ntcs = 1;
+
+ /* add the tc to the list of this vpe's tc's. */
+ list_add(&t->tc, &v->tc);
+
+ /* TC */
+ t->pvpe = v; /* set the parent vpe */
+
+ return 0;
+
+out_dev:
+ device_del(&vpe_device);
+
+out_class:
+ class_unregister(&vpe_class);
+
+out_chrdev:
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ return err;
+}
+
+void __exit vpe_module_exit(void)
+{
+ struct vpe *v, *n;
+
+ device_del(&vpe_device);
+ class_unregister(&vpe_class);
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ /* No locking needed here */
+ list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list)
+ if (v->state != VPE_STATE_UNUSED)
+ release_vpe(v);
+}
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
new file mode 100644
index 00000000000..2e003b11a09
--- /dev/null
+++ b/arch/mips/kernel/vpe-mt.c
@@ -0,0 +1,521 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+
+static int major;
+
+/* The number of TCs and VPEs physically available on the core */
+static int hw_tcs, hw_vpes;
+
+/* We are prepared so configure and start the VPE... */
+int vpe_run(struct vpe *v)
+{
+ unsigned long flags, val, dmt_flag;
+ struct vpe_notifications *notifier;
+ unsigned int vpeflags;
+ struct tc *t;
+
+ /* check we are the Master VPE */
+ local_irq_save(flags);
+ val = read_c0_vpeconf0();
+ if (!(val & VPECONF0_MVP)) {
+ pr_warn("VPE loader: only Master VPE's are able to config MT\n");
+ local_irq_restore(flags);
+
+ return -1;
+ }
+
+ dmt_flag = dmt();
+ vpeflags = dvpe();
+
+ if (list_empty(&v->tc)) {
+ evpe(vpeflags);
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ pr_warn("VPE loader: No TC's associated with VPE %d\n",
+ v->minor);
+
+ return -ENOEXEC;
+ }
+
+ t = list_first_entry(&v->tc, struct tc, tc);
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(t->index);
+
+ /* should check it is halted, and not activated */
+ if ((read_tc_c0_tcstatus() & TCSTATUS_A) ||
+ !(read_tc_c0_tchalt() & TCHALT_H)) {
+ evpe(vpeflags);
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ pr_warn("VPE loader: TC %d is already active!\n",
+ t->index);
+
+ return -ENOEXEC;
+ }
+
+ /*
+ * Write the address we want it to start running from in the TCPC
+ * register.
+ */
+ write_tc_c0_tcrestart((unsigned long)v->__start);
+ write_tc_c0_tccontext((unsigned long)0);
+
+ /*
+ * Mark the TC as activated, not interrupt exempt and not dynamically
+ * allocatable
+ */
+ val = read_tc_c0_tcstatus();
+ val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
+ write_tc_c0_tcstatus(val);
+
+ write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
+
+ /*
+ * The sde-kit passes 'memsize' to __start in $a3, so set something
+ * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
+ * DFLT_HEAP_SIZE when you compile your program
+ */
+ mttgpr(6, v->ntcs);
+ mttgpr(7, physical_memsize);
+
+ /* set up VPE1 */
+ /*
+ * bind the TC to VPE 1 as late as possible so we only have the final
+ * VPE registers to set up, and so an EJTAG probe can trigger on it
+ */
+ write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
+
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
+
+ back_to_back_c0_hazard();
+
+ /* Set up the XTC bit in vpeconf0 to point at our tc */
+ write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
+ | (t->index << VPECONF0_XTC_SHIFT));
+
+ back_to_back_c0_hazard();
+
+ /* enable this VPE */
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+
+ /* clear out any left overs from a previous program */
+ write_vpe_c0_status(0);
+ write_vpe_c0_cause(0);
+
+ /* take system out of configuration state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /*
+ * SMVP kernels manage VPE enable independently, but uniprocessor
+ * kernels need to turn it on, even if that wasn't the pre-dvpe() state.
+ */
+#ifdef CONFIG_SMP
+ evpe(vpeflags);
+#else
+ evpe(EVPE_ENABLE);
+#endif
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ list_for_each_entry(notifier, &v->notify, list)
+ notifier->start(VPE_MODULE_MINOR);
+
+ return 0;
+}
+
+void cleanup_tc(struct tc *tc)
+{
+ unsigned long flags;
+ unsigned int mtflags, vpflags;
+ int tmp;
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(tc->index);
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not allocated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+
+ write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+}
+
+/* module wrapper entry points */
+/* give me a vpe */
+void *vpe_alloc(void)
+{
+ int i;
+ struct vpe *v;
+
+ /* find a vpe */
+ for (i = 1; i < MAX_VPES; i++) {
+ v = get_vpe(i);
+ if (v != NULL) {
+ v->state = VPE_STATE_INUSE;
+ return v;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(vpe_alloc);
+
+/* start running from here */
+int vpe_start(void *vpe, unsigned long start)
+{
+ struct vpe *v = vpe;
+
+ v->__start = start;
+ return vpe_run(v);
+}
+EXPORT_SYMBOL(vpe_start);
+
+/* halt it for now */
+int vpe_stop(void *vpe)
+{
+ struct vpe *v = vpe;
+ struct tc *t;
+ unsigned int evpe_flags;
+
+ evpe_flags = dvpe();
+
+ t = list_entry(v->tc.next, struct tc, tc);
+ if (t != NULL) {
+ settc(t->index);
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
+ }
+
+ evpe(evpe_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpe_stop);
+
+/* I've done with it thank you */
+int vpe_free(void *vpe)
+{
+ struct vpe *v = vpe;
+ struct tc *t;
+ unsigned int evpe_flags;
+
+ t = list_entry(v->tc.next, struct tc, tc);
+ if (t == NULL)
+ return -ENOEXEC;
+
+ evpe_flags = dvpe();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(t->index);
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
+
+ /* halt the TC */
+ write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
+ /* mark the TC unallocated */
+ write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
+
+ v->state = VPE_STATE_UNUSED;
+
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(evpe_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpe_free);
+
+static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ struct vpe_notifications *notifier;
+
+ list_for_each_entry(notifier, &vpe->notify, list)
+ notifier->stop(aprp_cpu_index());
+
+ release_progmem(vpe->load_addr);
+ cleanup_tc(get_tc(aprp_cpu_index()));
+ vpe_stop(vpe);
+ vpe_free(vpe);
+
+ return len;
+}
+static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
+
+static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
+ char *buf)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+
+ return sprintf(buf, "%d\n", vpe->ntcs);
+}
+
+static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ unsigned long new;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &new);
+ if (ret < 0)
+ return ret;
+
+ if (new == 0 || new > (hw_tcs - aprp_cpu_index()))
+ return -EINVAL;
+
+ vpe->ntcs = new;
+
+ return len;
+}
+static DEVICE_ATTR_RW(ntcs);
+
+static struct attribute *vpe_attrs[] = {
+ &dev_attr_kill.attr,
+ &dev_attr_ntcs.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vpe);
+
+static void vpe_device_release(struct device *cd)
+{
+ kfree(cd);
+}
+
+static struct class vpe_class = {
+ .name = "vpe",
+ .owner = THIS_MODULE,
+ .dev_release = vpe_device_release,
+ .dev_groups = vpe_groups,
+};
+
+static struct device vpe_device;
+
+int __init vpe_module_init(void)
+{
+ unsigned int mtflags, vpflags;
+ unsigned long flags, val;
+ struct vpe *v = NULL;
+ struct tc *t;
+ int tc, err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (vpelimit == 0) {
+ pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
+ "Pass maxvpes=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ if (aprp_cpu_index() == 0) {
+ pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n"
+ "Pass maxtcs=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
+ if (major < 0) {
+ pr_warn("VPE loader: unable to register character device\n");
+ return major;
+ }
+
+ err = class_register(&vpe_class);
+ if (err) {
+ pr_err("vpe_class registration failed\n");
+ goto out_chrdev;
+ }
+
+ device_initialize(&vpe_device);
+ vpe_device.class = &vpe_class,
+ vpe_device.parent = NULL,
+ dev_set_name(&vpe_device, "vpe1");
+ vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
+ err = device_add(&vpe_device);
+ if (err) {
+ pr_err("Adding vpe_device failed\n");
+ goto out_class;
+ }
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ val = read_c0_mvpconf0();
+ hw_tcs = (val & MVPCONF0_PTC) + 1;
+ hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+
+ for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) {
+ /*
+ * Must re-enable multithreading temporarily or in case we
+ * reschedule send IPIs or similar we might hang.
+ */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+ t = alloc_tc(tc);
+ if (!t) {
+ err = -ENOMEM;
+ goto out_dev;
+ }
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* VPE's */
+ if (tc < hw_tcs) {
+ settc(tc);
+
+ v = alloc_vpe(tc);
+ if (v == NULL) {
+ pr_warn("VPE: unable to allocate VPE\n");
+ goto out_reenable;
+ }
+
+ v->ntcs = hw_tcs - aprp_cpu_index();
+
+ /* add the tc to the list of this vpe's tc's. */
+ list_add(&t->tc, &v->tc);
+
+ /* deactivate all but vpe0 */
+ if (tc >= aprp_cpu_index()) {
+ unsigned long tmp = read_vpe_c0_vpeconf0();
+
+ tmp &= ~VPECONF0_VPA;
+
+ /* master VPE */
+ tmp |= VPECONF0_MVP;
+ write_vpe_c0_vpeconf0(tmp);
+ }
+
+ /* disable multi-threading with TC's */
+ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() &
+ ~VPECONTROL_TE);
+
+ if (tc >= vpelimit) {
+ /*
+ * Set config to be the same as vpe0,
+ * particularly kseg0 coherency alg
+ */
+ write_vpe_c0_config(read_c0_config());
+ }
+ }
+
+ /* TC's */
+ t->pvpe = v; /* set the parent vpe */
+
+ if (tc >= aprp_cpu_index()) {
+ unsigned long tmp;
+
+ settc(tc);
+
+ /*
+ * A TC that is bound to any other VPE gets bound to
+ * VPE0, ideally I'd like to make it homeless but it
+ * doesn't appear to let me bind a TC to a non-existent
+ * VPE. Which is perfectly reasonable.
+ *
+ * The (un)bound state is visible to an EJTAG probe so
+ * may notify GDB...
+ */
+ tmp = read_tc_c0_tcbind();
+ if (tmp & TCBIND_CURVPE) {
+ /* tc is bound >vpe0 */
+ write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
+
+ t->pvpe = get_vpe(0); /* set the parent vpe */
+ }
+
+ /* halt the TC */
+ write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not activated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+ }
+ }
+
+out_reenable:
+ /* release config state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+
+ return 0;
+
+out_dev:
+ device_del(&vpe_device);
+
+out_class:
+ class_unregister(&vpe_class);
+
+out_chrdev:
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ return err;
+}
+
+void __exit vpe_module_exit(void)
+{
+ struct vpe *v, *n;
+
+ device_del(&vpe_device);
+ class_unregister(&vpe_class);
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ /* No locking needed here */
+ list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
+ if (v->state != VPE_STATE_UNUSED)
+ release_vpe(v);
+ }
+}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 39804c584ed..11da314565c 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -1,38 +1,22 @@
/*
- * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-/*
- * VPE support module
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
- * Provides support for loading a MIPS SP program on VPE1.
- * The SP enviroment is rather simple, no tlb's. It needs to be relocatable
- * (or partially linked). You should initialise your stack in the startup
- * code. This loader looks for the symbol __start and sets up
- * execution to resume from there. The MIPS SDE kit contains suitable examples.
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
*
- * To load and run, simply cat a SP 'program file' to /dev/vpe1.
- * i.e cat spapp >/dev/vpe1.
+ * VPE spport module for loading a MIPS SP program into VPE1. The SP
+ * environment is rather simple since there are no TLBs. It needs
+ * to be relocatable (or partiall linked). Initialize your stack in
+ * the startup-code. The loader looks for the symbol __start and sets
+ * up the execution to resume from there. To load and run, simply do
+ * a cat SP 'binary' to the /dev/vpe1 device.
*/
#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/module.h>
#include <linux/fs.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/vmalloc.h>
@@ -46,15 +30,10 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/cacheflush.h>
-#include <asm/atomic.h>
-#include <asm/cpu.h>
+#include <linux/atomic.h>
#include <asm/mips_mt.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/vpe.h>
-#include <asm/kspd.h>
-
-typedef void *vpe_handle;
#ifndef ARCH_SHF_SMALL
#define ARCH_SHF_SMALL 0
@@ -63,138 +42,50 @@ typedef void *vpe_handle;
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
-/*
- * The number of TCs and VPEs physically available on the core
- */
-static int hw_tcs, hw_vpes;
-static char module_name[] = "vpe";
-static int major;
-static const int minor = 1; /* fixed for now */
-
-#ifdef CONFIG_MIPS_APSP_KSPD
- static struct kspd_notifications kspd_events;
-static int kspd_events_reqd = 0;
-#endif
-
-/* grab the likely amount of memory we will need. */
-#ifdef CONFIG_MIPS_VPE_LOADER_TOM
-#define P_SIZE (2 * 1024 * 1024)
-#else
-/* add an overhead to the max kmalloc size for non-striped symbols/etc */
-#define P_SIZE (256 * 1024)
-#endif
-
-extern unsigned long physical_memsize;
-
-#define MAX_VPES 16
-#define VPE_PATH_MAX 256
-
-enum vpe_state {
- VPE_STATE_UNUSED = 0,
- VPE_STATE_INUSE,
- VPE_STATE_RUNNING
-};
-
-enum tc_state {
- TC_STATE_UNUSED = 0,
- TC_STATE_INUSE,
- TC_STATE_RUNNING,
- TC_STATE_DYNAMIC
-};
-
-struct vpe {
- enum vpe_state state;
-
- /* (device) minor associated with this vpe */
- int minor;
-
- /* elfloader stuff */
- void *load_addr;
- unsigned long len;
- char *pbuffer;
- unsigned long plen;
- unsigned int uid, gid;
- char cwd[VPE_PATH_MAX];
-
- unsigned long __start;
-
- /* tc's associated with this vpe */
- struct list_head tc;
-
- /* The list of vpe's */
- struct list_head list;
-
- /* shared symbol address */
- void *shared_ptr;
-
- /* the list of who wants to know when something major happens */
- struct list_head notify;
-
- unsigned int ntcs;
-};
-
-struct tc {
- enum tc_state state;
- int index;
-
- struct vpe *pvpe; /* parent VPE */
- struct list_head tc; /* The list of TC's with this VPE */
- struct list_head list; /* The global list of tc's */
+struct vpe_control vpecontrol = {
+ .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
+ .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
+ .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
+ .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
};
-struct {
- /* Virtual processing elements */
- struct list_head vpe_list;
-
- /* Thread contexts */
- struct list_head tc_list;
-} vpecontrol = {
- .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
- .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
-};
-
-static void release_progmem(void *ptr);
-extern void save_gp_address(unsigned int secbase, unsigned int rel);
-
/* get the vpe associated with this minor */
struct vpe *get_vpe(int minor)
{
- struct vpe *v;
+ struct vpe *res, *v;
if (!cpu_has_mipsmt)
return NULL;
+ res = NULL;
+ spin_lock(&vpecontrol.vpe_list_lock);
list_for_each_entry(v, &vpecontrol.vpe_list, list) {
- if (v->minor == minor)
- return v;
+ if (v->minor == VPE_MODULE_MINOR) {
+ res = v;
+ break;
+ }
}
+ spin_unlock(&vpecontrol.vpe_list_lock);
- return NULL;
+ return res;
}
/* get the vpe associated with this minor */
struct tc *get_tc(int index)
{
- struct tc *t;
-
- list_for_each_entry(t, &vpecontrol.tc_list, list) {
- if (t->index == index)
- return t;
- }
-
- return NULL;
-}
-
-struct tc *get_tc_unused(void)
-{
- struct tc *t;
+ struct tc *res, *t;
+ res = NULL;
+ spin_lock(&vpecontrol.tc_list_lock);
list_for_each_entry(t, &vpecontrol.tc_list, list) {
- if (t->state == TC_STATE_UNUSED)
- return t;
+ if (t->index == index) {
+ res = t;
+ break;
+ }
}
+ spin_unlock(&vpecontrol.tc_list_lock);
- return NULL;
+ return res;
}
/* allocate a vpe and associate it with this minor (or index) */
@@ -202,15 +93,19 @@ struct vpe *alloc_vpe(int minor)
{
struct vpe *v;
- if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
- return NULL;
- }
+ v = kzalloc(sizeof(struct vpe), GFP_KERNEL);
+ if (v == NULL)
+ goto out;
INIT_LIST_HEAD(&v->tc);
+ spin_lock(&vpecontrol.vpe_list_lock);
list_add_tail(&v->list, &vpecontrol.vpe_list);
+ spin_unlock(&vpecontrol.vpe_list_lock);
INIT_LIST_HEAD(&v->notify);
- v->minor = minor;
+ v->minor = VPE_MODULE_MINOR;
+
+out:
return v;
}
@@ -219,12 +114,16 @@ struct tc *alloc_tc(int index)
{
struct tc *tc;
- if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL)
+ tc = kzalloc(sizeof(struct tc), GFP_KERNEL);
+ if (tc == NULL)
goto out;
INIT_LIST_HEAD(&tc->tc);
tc->index = index;
+
+ spin_lock(&vpecontrol.tc_list_lock);
list_add_tail(&tc->list, &vpecontrol.tc_list);
+ spin_unlock(&vpecontrol.tc_list_lock);
out:
return tc;
@@ -239,28 +138,8 @@ void release_vpe(struct vpe *v)
kfree(v);
}
-void dump_mtregs(void)
-{
- unsigned long val;
-
- val = read_c0_config3();
- printk("config3 0x%lx MT %ld\n", val,
- (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
-
- val = read_c0_mvpcontrol();
- printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
- (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
- (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
- (val & MVPCONTROL_EVP));
-
- val = read_c0_mvpconf0();
- printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
- (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
- val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
-}
-
-/* Find some VPE program space */
-static void *alloc_progmem(unsigned long len)
+/* Find some VPE program space */
+void *alloc_progmem(unsigned long len)
{
void *addr;
@@ -269,7 +148,7 @@ static void *alloc_progmem(unsigned long len)
* This means you must tell Linux to use less memory than you
* physically have, for example by passing a mem= boot argument.
*/
- addr = pfn_to_kaddr(max_pfn);
+ addr = pfn_to_kaddr(max_low_pfn);
memset(addr, 0, len);
#else
/* simple grab some mem for now */
@@ -279,7 +158,7 @@ static void *alloc_progmem(unsigned long len)
return addr;
}
-static void release_progmem(void *ptr)
+void release_progmem(void *ptr)
{
#ifndef CONFIG_MIPS_VPE_LOADER_TOM
kfree(ptr);
@@ -287,7 +166,7 @@ static void release_progmem(void *ptr)
}
/* Update size with this section: return offset. */
-static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
+static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
{
long ret;
@@ -297,11 +176,11 @@ static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
}
/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
- might -- code, read-only data, read-write data, small data. Tally
+ might -- code, read-only data, read-write data, small data. Tally
sizes, and place the offsets into sh_entsize fields: high bit means it
belongs in init. */
-static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
- Elf_Shdr * sechdrs, const char *secstrings)
+static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs, const char *secstrings)
{
static unsigned long const masks[][2] = {
/* NOTE: all executable code must be the first section
@@ -321,12 +200,12 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
for (i = 0; i < hdr->e_shnum; ++i) {
Elf_Shdr *s = &sechdrs[i];
- // || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL)
continue;
- s->sh_entsize = get_offset(&mod->core_size, s);
+ s->sh_entsize =
+ get_offset((unsigned long *)&mod->core_size, s);
}
if (m == 0)
@@ -335,7 +214,6 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
}
}
-
/* from module-elf32.c, but subverted a little */
struct mips_hi16 {
@@ -358,20 +236,18 @@ static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
{
int rel;
- if( !(*location & 0xffff) ) {
+ if (!(*location & 0xffff)) {
rel = (int)v - gp_addr;
- }
- else {
+ } else {
/* .sbss + gp(relative) + offset */
/* kludge! */
rel = (int)(short)((int)v + gp_offs +
(int)(short)(*location & 0xffff) - gp_addr);
}
- if( (rel > 32768) || (rel < -32768) ) {
- printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
- "relative address 0x%x out of range of gp register\n",
- rel);
+ if ((rel > 32768) || (rel < -32768)) {
+ pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n",
+ rel);
return -ENOEXEC;
}
@@ -385,12 +261,12 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location,
{
int rel;
rel = (((unsigned int)v - (unsigned int)location));
- rel >>= 2; // because the offset is in _instructions_ not bytes.
- rel -= 1; // and one instruction less due to the branch delay slot.
+ rel >>= 2; /* because the offset is in _instructions_ not bytes. */
+ rel -= 1; /* and one instruction less due to the branch delay slot. */
- if( (rel > 32768) || (rel < -32768) ) {
- printk(KERN_DEBUG "VPE loader: "
- "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
+ if ((rel > 32768) || (rel < -32768)) {
+ pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n",
+ rel);
return -ENOEXEC;
}
@@ -411,8 +287,7 @@ static int apply_r_mips_26(struct module *me, uint32_t *location,
Elf32_Addr v)
{
if (v % 4) {
- printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
- " unaligned relocation\n");
+ pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n");
return -ENOEXEC;
}
@@ -443,7 +318,7 @@ static int apply_r_mips_hi16(struct module *me, uint32_t *location,
* the carry we need to add. Save the information, and let LO16 do the
* actual relocation.
*/
- n = kmalloc(sizeof *n, GFP_KERNEL);
+ n = kmalloc(sizeof(*n), GFP_KERNEL);
if (!n)
return -ENOMEM;
@@ -460,26 +335,23 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
{
unsigned long insnlo = *location;
Elf32_Addr val, vallo;
+ struct mips_hi16 *l, *next;
- /* Sign extend the addend we extract from the lo insn. */
+ /* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
if (mips_hi16_list != NULL) {
- struct mips_hi16 *l;
l = mips_hi16_list;
while (l != NULL) {
- struct mips_hi16 *next;
unsigned long insn;
/*
* The value for the HI16 had best be the same.
*/
- if (v != l->value) {
- printk(KERN_DEBUG "VPE loader: "
- "apply_r_mips_lo16/hi16: \t"
- "inconsistent value information\n");
- return -ENOEXEC;
+ if (v != l->value) {
+ pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n");
+ goto out_free;
}
/*
@@ -510,13 +382,23 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
}
/*
- * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
*location = insnlo;
return 0;
+
+out_free:
+ while (l != NULL) {
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+ mips_hi16_list = NULL;
+
+ return -ENOEXEC;
}
static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
@@ -540,7 +422,7 @@ static char *rstrs[] = {
[R_MIPS_PC16] = "MIPS_PC16"
};
-int apply_relocations(Elf32_Shdr *sechdrs,
+static int apply_relocations(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
@@ -564,20 +446,19 @@ int apply_relocations(Elf32_Shdr *sechdrs,
+ ELF32_R_SYM(r_info);
if (!sym->st_value) {
- printk(KERN_DEBUG "%s: undefined weak symbol %s\n",
- me->name, strtab + sym->st_name);
+ pr_debug("%s: undefined weak symbol %s\n",
+ me->name, strtab + sym->st_name);
/* just print the warning, dont barf */
}
v = sym->st_value;
res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
- if( res ) {
+ if (res) {
char *r = rstrs[ELF32_R_TYPE(r_info)];
- printk(KERN_WARNING "VPE loader: .text+0x%x "
- "relocation type %s for symbol \"%s\" failed\n",
- rel[i].r_offset, r ? r : "UNKNOWN",
- strtab + sym->st_name);
+ pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n",
+ rel[i].r_offset, r ? r : "UNKNOWN",
+ strtab + sym->st_name);
return res;
}
}
@@ -585,17 +466,15 @@ int apply_relocations(Elf32_Shdr *sechdrs,
return 0;
}
-void save_gp_address(unsigned int secbase, unsigned int rel)
+static inline void save_gp_address(unsigned int secbase, unsigned int rel)
{
gp_addr = secbase + rel;
gp_offs = gp_addr - (secbase & 0xffff0000);
}
/* end module-elf32.c */
-
-
/* Change all symbols so that sh_value encodes the pointer directly. */
-static void simplify_symbols(Elf_Shdr * sechdrs,
+static void simplify_symbols(Elf_Shdr *sechdrs,
unsigned int symindex,
const char *strtab,
const char *secstrings,
@@ -636,18 +515,16 @@ static void simplify_symbols(Elf_Shdr * sechdrs,
break;
case SHN_MIPS_SCOMMON:
- printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON "
- "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
- sym[i].st_shndx);
- // .sbss section
+ pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
+ strtab + sym[i].st_name, sym[i].st_shndx);
+ /* .sbss section */
break;
default:
secbase = sechdrs[sym[i].st_shndx].sh_addr;
- if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) {
+ if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0)
save_gp_address(secbase, sym[i].st_value);
- }
sym[i].st_value += secbase;
break;
@@ -656,146 +533,21 @@ static void simplify_symbols(Elf_Shdr * sechdrs,
}
#ifdef DEBUG_ELFLOADER
-static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
+static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex,
const char *strtab, struct module *mod)
{
Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
- printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n);
+ pr_debug("dump_elfsymbols: n %d\n", n);
for (i = 1; i < n; i++) {
- printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i,
- strtab + sym[i].st_name, sym[i].st_value);
+ pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name,
+ sym[i].st_value);
}
}
#endif
-/* We are prepared so configure and start the VPE... */
-static int vpe_run(struct vpe * v)
-{
- unsigned long flags, val, dmt_flag;
- struct vpe_notifications *n;
- unsigned int vpeflags;
- struct tc *t;
-
- /* check we are the Master VPE */
- local_irq_save(flags);
- val = read_c0_vpeconf0();
- if (!(val & VPECONF0_MVP)) {
- printk(KERN_WARNING
- "VPE loader: only Master VPE's are allowed to configure MT\n");
- local_irq_restore(flags);
-
- return -1;
- }
-
- dmt_flag = dmt();
- vpeflags = dvpe();
-
- if (!list_empty(&v->tc)) {
- if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
- evpe(vpeflags);
- emt(dmt_flag);
- local_irq_restore(flags);
-
- printk(KERN_WARNING
- "VPE loader: TC %d is already in use.\n",
- t->index);
- return -ENOEXEC;
- }
- } else {
- evpe(vpeflags);
- emt(dmt_flag);
- local_irq_restore(flags);
-
- printk(KERN_WARNING
- "VPE loader: No TC's associated with VPE %d\n",
- v->minor);
-
- return -ENOEXEC;
- }
-
- /* Put MVPE's into 'configuration state' */
- set_c0_mvpcontrol(MVPCONTROL_VPC);
-
- settc(t->index);
-
- /* should check it is halted, and not activated */
- if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
- evpe(vpeflags);
- emt(dmt_flag);
- local_irq_restore(flags);
-
- printk(KERN_WARNING "VPE loader: TC %d is already active!\n",
- t->index);
-
- return -ENOEXEC;
- }
-
- /* Write the address we want it to start running from in the TCPC register. */
- write_tc_c0_tcrestart((unsigned long)v->__start);
- write_tc_c0_tccontext((unsigned long)0);
-
- /*
- * Mark the TC as activated, not interrupt exempt and not dynamically
- * allocatable
- */
- val = read_tc_c0_tcstatus();
- val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
- write_tc_c0_tcstatus(val);
-
- write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
-
- /*
- * The sde-kit passes 'memsize' to __start in $a3, so set something
- * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
- * DFLT_HEAP_SIZE when you compile your program
- */
- mttgpr(6, v->ntcs);
- mttgpr(7, physical_memsize);
-
- /* set up VPE1 */
- /*
- * bind the TC to VPE 1 as late as possible so we only have the final
- * VPE registers to set up, and so an EJTAG probe can trigger on it
- */
- write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
-
- write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
-
- back_to_back_c0_hazard();
-
- /* Set up the XTC bit in vpeconf0 to point at our tc */
- write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
- | (t->index << VPECONF0_XTC_SHIFT));
-
- back_to_back_c0_hazard();
-
- /* enable this VPE */
- write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
-
- /* clear out any left overs from a previous program */
- write_vpe_c0_status(0);
- write_vpe_c0_cause(0);
-
- /* take system out of configuration state */
- clear_c0_mvpcontrol(MVPCONTROL_VPC);
-
-#ifdef CONFIG_SMP
- evpe(EVPE_ENABLE);
-#else
- evpe(vpeflags);
-#endif
- emt(dmt_flag);
- local_irq_restore(flags);
-
- list_for_each_entry(n, &v->notify, list)
- n->start(minor);
-
- return 0;
-}
-
-static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
+static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs,
unsigned int symindex, const char *strtab,
struct module *mod)
{
@@ -803,16 +555,14 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
for (i = 1; i < n; i++) {
- if (strcmp(strtab + sym[i].st_name, "__start") == 0) {
+ if (strcmp(strtab + sym[i].st_name, "__start") == 0)
v->__start = sym[i].st_value;
- }
- if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) {
+ if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0)
v->shared_ptr = (void *)sym[i].st_value;
- }
}
- if ( (v->__start == 0) || (v->shared_ptr == NULL))
+ if ((v->__start == 0) || (v->shared_ptr == NULL))
return -1;
return 0;
@@ -823,14 +573,14 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
* contents of the program (p)buffer performing relocatations/etc, free's it
* when finished.
*/
-static int vpe_elfload(struct vpe * v)
+static int vpe_elfload(struct vpe *v)
{
Elf_Ehdr *hdr;
Elf_Shdr *sechdrs;
long err = 0;
char *secstrings, *strtab = NULL;
unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
- struct module mod; // so we can re-use the relocations code
+ struct module mod; /* so we can re-use the relocations code */
memset(&mod, 0, sizeof(struct module));
strcpy(mod.name, "VPE loader");
@@ -840,12 +590,11 @@ static int vpe_elfload(struct vpe * v)
/* Sanity checks against insmoding binaries or wrong arch,
weird elf version */
- if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
+ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
|| (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
|| !elf_check_arch(hdr)
|| hdr->e_shentsize != sizeof(*sechdrs)) {
- printk(KERN_WARNING
- "VPE loader: program wrong arch or weird elf version\n");
+ pr_warn("VPE loader: program wrong arch or weird elf version\n");
return -ENOEXEC;
}
@@ -854,8 +603,7 @@ static int vpe_elfload(struct vpe * v)
relocate = 1;
if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
- printk(KERN_ERR "VPE loader: program length %u truncated\n",
- len);
+ pr_err("VPE loader: program length %u truncated\n", len);
return -ENOEXEC;
}
@@ -870,22 +618,24 @@ static int vpe_elfload(struct vpe * v)
if (relocate) {
for (i = 1; i < hdr->e_shnum; i++) {
- if (sechdrs[i].sh_type != SHT_NOBITS
- && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
- printk(KERN_ERR "VPE program length %u truncated\n",
+ if ((sechdrs[i].sh_type != SHT_NOBITS) &&
+ (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) {
+ pr_err("VPE program length %u truncated\n",
len);
return -ENOEXEC;
}
/* Mark all sections sh_addr with their address in the
temporary image. */
- sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
+ sechdrs[i].sh_addr = (size_t) hdr +
+ sechdrs[i].sh_offset;
/* Internal symbols and strings. */
if (sechdrs[i].sh_type == SHT_SYMTAB) {
symindex = i;
strindex = sechdrs[i].sh_link;
- strtab = (char *)hdr + sechdrs[strindex].sh_offset;
+ strtab = (char *)hdr +
+ sechdrs[strindex].sh_offset;
}
}
layout_sections(&mod, hdr, sechdrs, secstrings);
@@ -912,60 +662,68 @@ static int vpe_elfload(struct vpe * v)
/* Update sh_addr to point to copy in image. */
sechdrs[i].sh_addr = (unsigned long)dest;
- printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
- secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
+ pr_debug(" section sh_name %s sh_addr 0x%x\n",
+ secstrings + sechdrs[i].sh_name,
+ sechdrs[i].sh_addr);
}
- /* Fix up syms, so that st_value is a pointer to location. */
- simplify_symbols(sechdrs, symindex, strtab, secstrings,
- hdr->e_shnum, &mod);
+ /* Fix up syms, so that st_value is a pointer to location. */
+ simplify_symbols(sechdrs, symindex, strtab, secstrings,
+ hdr->e_shnum, &mod);
- /* Now do relocations. */
- for (i = 1; i < hdr->e_shnum; i++) {
- const char *strtab = (char *)sechdrs[strindex].sh_addr;
- unsigned int info = sechdrs[i].sh_info;
+ /* Now do relocations. */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ const char *strtab = (char *)sechdrs[strindex].sh_addr;
+ unsigned int info = sechdrs[i].sh_info;
- /* Not a valid relocation section? */
- if (info >= hdr->e_shnum)
- continue;
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
- /* Don't bother with non-allocated sections */
- if (!(sechdrs[info].sh_flags & SHF_ALLOC))
- continue;
+ /* Don't bother with non-allocated sections */
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+ continue;
- if (sechdrs[i].sh_type == SHT_REL)
- err = apply_relocations(sechdrs, strtab, symindex, i,
- &mod);
- else if (sechdrs[i].sh_type == SHT_RELA)
- err = apply_relocate_add(sechdrs, strtab, symindex, i,
- &mod);
- if (err < 0)
- return err;
+ if (sechdrs[i].sh_type == SHT_REL)
+ err = apply_relocations(sechdrs, strtab,
+ symindex, i, &mod);
+ else if (sechdrs[i].sh_type == SHT_RELA)
+ err = apply_relocate_add(sechdrs, strtab,
+ symindex, i, &mod);
+ if (err < 0)
+ return err;
- }
- } else {
- struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
+ }
+ } else {
+ struct elf_phdr *phdr = (struct elf_phdr *)
+ ((char *)hdr + hdr->e_phoff);
for (i = 0; i < hdr->e_phnum; i++) {
- if (phdr->p_type != PT_LOAD)
- continue;
-
- memcpy((void *)phdr->p_paddr, (char *)hdr + phdr->p_offset, phdr->p_filesz);
- memset((void *)phdr->p_paddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
- phdr++;
+ if (phdr->p_type == PT_LOAD) {
+ memcpy((void *)phdr->p_paddr,
+ (char *)hdr + phdr->p_offset,
+ phdr->p_filesz);
+ memset((void *)phdr->p_paddr + phdr->p_filesz,
+ 0, phdr->p_memsz - phdr->p_filesz);
+ }
+ phdr++;
}
for (i = 0; i < hdr->e_shnum; i++) {
- /* Internal symbols and strings. */
- if (sechdrs[i].sh_type == SHT_SYMTAB) {
- symindex = i;
- strindex = sechdrs[i].sh_link;
- strtab = (char *)hdr + sechdrs[strindex].sh_offset;
-
- /* mark the symtab's address for when we try to find the
- magic symbols */
- sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
- }
+ /* Internal symbols and strings. */
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ symindex = i;
+ strindex = sechdrs[i].sh_link;
+ strtab = (char *)hdr +
+ sechdrs[strindex].sh_offset;
+
+ /*
+ * mark symtab's address for when we try
+ * to find the magic symbols
+ */
+ sechdrs[i].sh_addr = (size_t) hdr +
+ sechdrs[i].sh_offset;
+ }
}
}
@@ -975,53 +733,19 @@ static int vpe_elfload(struct vpe * v)
if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
if (v->__start == 0) {
- printk(KERN_WARNING "VPE loader: program does not contain "
- "a __start symbol\n");
+ pr_warn("VPE loader: program does not contain a __start symbol\n");
return -ENOEXEC;
}
if (v->shared_ptr == NULL)
- printk(KERN_WARNING "VPE loader: "
- "program does not contain vpe_shared symbol.\n"
- " Unable to use AMVP (AP/SP) facilities.\n");
+ pr_warn("VPE loader: program does not contain vpe_shared symbol.\n"
+ " Unable to use AMVP (AP/SP) facilities.\n");
}
- printk(" elf loaded\n");
+ pr_info(" elf loaded\n");
return 0;
}
-static void cleanup_tc(struct tc *tc)
-{
- unsigned long flags;
- unsigned int mtflags, vpflags;
- int tmp;
-
- local_irq_save(flags);
- mtflags = dmt();
- vpflags = dvpe();
- /* Put MVPE's into 'configuration state' */
- set_c0_mvpcontrol(MVPCONTROL_VPC);
-
- settc(tc->index);
- tmp = read_tc_c0_tcstatus();
-
- /* mark not allocated and not dynamically allocatable */
- tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
- tmp |= TCSTATUS_IXMT; /* interrupt exempt */
- write_tc_c0_tcstatus(tmp);
-
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
-
- /* bind it to anything other than VPE1 */
-// write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
-
- clear_c0_mvpcontrol(MVPCONTROL_VPC);
- evpe(vpflags);
- emt(mtflags);
- local_irq_restore(flags);
-}
-
static int getcwd(char *buff, int size)
{
mm_segment_t old_fs;
@@ -1037,58 +761,53 @@ static int getcwd(char *buff, int size)
return ret;
}
-/* checks VPE is unused and gets ready to load program */
+/* checks VPE is unused and gets ready to load program */
static int vpe_open(struct inode *inode, struct file *filp)
{
enum vpe_state state;
- struct vpe_notifications *not;
+ struct vpe_notifications *notifier;
struct vpe *v;
int ret;
- if (minor != iminor(inode)) {
+ if (VPE_MODULE_MINOR != iminor(inode)) {
/* assume only 1 device at the moment. */
- printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
+ pr_warn("VPE loader: only vpe1 is supported\n");
+
return -ENODEV;
}
- if ((v = get_vpe(tclimit)) == NULL) {
- printk(KERN_WARNING "VPE loader: unable to get vpe\n");
+ v = get_vpe(aprp_cpu_index());
+ if (v == NULL) {
+ pr_warn("VPE loader: unable to get vpe\n");
+
return -ENODEV;
}
state = xchg(&v->state, VPE_STATE_INUSE);
if (state != VPE_STATE_UNUSED) {
- printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
+ pr_debug("VPE loader: tc in use dumping regs\n");
- list_for_each_entry(not, &v->notify, list) {
- not->stop(tclimit);
- }
+ list_for_each_entry(notifier, &v->notify, list)
+ notifier->stop(aprp_cpu_index());
release_progmem(v->load_addr);
- cleanup_tc(get_tc(tclimit));
+ cleanup_tc(get_tc(aprp_cpu_index()));
}
/* this of-course trashes what was there before... */
v->pbuffer = vmalloc(P_SIZE);
+ if (!v->pbuffer) {
+ pr_warn("VPE loader: unable to allocate memory\n");
+ return -ENOMEM;
+ }
v->plen = P_SIZE;
v->load_addr = NULL;
v->len = 0;
- v->uid = filp->f_uid;
- v->gid = filp->f_gid;
-
-#ifdef CONFIG_MIPS_APSP_KSPD
- /* get kspd to tell us when a syscall_exit happens */
- if (!kspd_events_reqd) {
- kspd_notify(&kspd_events);
- kspd_events_reqd++;
- }
-#endif
-
v->cwd[0] = 0;
ret = getcwd(v->cwd, VPE_PATH_MAX);
if (ret < 0)
- printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
+ pr_warn("VPE loader: open, getcwd returned %d\n", ret);
v->shared_ptr = NULL;
v->__start = 0;
@@ -1102,59 +821,53 @@ static int vpe_release(struct inode *inode, struct file *filp)
Elf_Ehdr *hdr;
int ret = 0;
- v = get_vpe(tclimit);
+ v = get_vpe(aprp_cpu_index());
if (v == NULL)
return -ENODEV;
hdr = (Elf_Ehdr *) v->pbuffer;
- if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) {
- if (vpe_elfload(v) >= 0) {
+ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
+ if ((vpe_elfload(v) >= 0) && vpe_run) {
vpe_run(v);
} else {
- printk(KERN_WARNING "VPE loader: ELF load failed.\n");
+ pr_warn("VPE loader: ELF load failed.\n");
ret = -ENOEXEC;
}
} else {
- printk(KERN_WARNING "VPE loader: only elf files are supported\n");
+ pr_warn("VPE loader: only elf files are supported\n");
ret = -ENOEXEC;
}
/* It's good to be able to run the SP and if it chokes have a look at
the /dev/rt?. But if we reset the pointer to the shared struct we
- loose what has happened. So perhaps if garbage is sent to the vpe
+ lose what has happened. So perhaps if garbage is sent to the vpe
device, use it as a trigger for the reset. Hopefully a nice
executable will be along shortly. */
if (ret < 0)
v->shared_ptr = NULL;
- // cleanup any temp buffers
- if (v->pbuffer)
- vfree(v->pbuffer);
+ vfree(v->pbuffer);
v->plen = 0;
+
return ret;
}
-static ssize_t vpe_write(struct file *file, const char __user * buffer,
- size_t count, loff_t * ppos)
+static ssize_t vpe_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
size_t ret = count;
struct vpe *v;
- if (iminor(file->f_path.dentry->d_inode) != minor)
+ if (iminor(file_inode(file)) != VPE_MODULE_MINOR)
return -ENODEV;
- v = get_vpe(tclimit);
+ v = get_vpe(aprp_cpu_index());
+
if (v == NULL)
return -ENODEV;
- if (v->pbuffer == NULL) {
- printk(KERN_ERR "VPE loader: no buffer for program\n");
- return -ENOMEM;
- }
-
if ((count + v->len) > v->plen) {
- printk(KERN_WARNING
- "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
+ pr_warn("VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
return -ENOMEM;
}
@@ -1166,434 +879,48 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
return ret;
}
-static const struct file_operations vpe_fops = {
+const struct file_operations vpe_fops = {
.owner = THIS_MODULE,
.open = vpe_open,
.release = vpe_release,
- .write = vpe_write
+ .write = vpe_write,
+ .llseek = noop_llseek,
};
-/* module wrapper entry points */
-/* give me a vpe */
-vpe_handle vpe_alloc(void)
-{
- int i;
- struct vpe *v;
-
- /* find a vpe */
- for (i = 1; i < MAX_VPES; i++) {
- if ((v = get_vpe(i)) != NULL) {
- v->state = VPE_STATE_INUSE;
- return v;
- }
- }
- return NULL;
-}
-
-EXPORT_SYMBOL(vpe_alloc);
-
-/* start running from here */
-int vpe_start(vpe_handle vpe, unsigned long start)
-{
- struct vpe *v = vpe;
-
- v->__start = start;
- return vpe_run(v);
-}
-
-EXPORT_SYMBOL(vpe_start);
-
-/* halt it for now */
-int vpe_stop(vpe_handle vpe)
-{
- struct vpe *v = vpe;
- struct tc *t;
- unsigned int evpe_flags;
-
- evpe_flags = dvpe();
-
- if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) {
-
- settc(t->index);
- write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
- }
-
- evpe(evpe_flags);
-
- return 0;
-}
-
-EXPORT_SYMBOL(vpe_stop);
-
-/* I've done with it thank you */
-int vpe_free(vpe_handle vpe)
-{
- struct vpe *v = vpe;
- struct tc *t;
- unsigned int evpe_flags;
-
- if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
- return -ENOEXEC;
- }
-
- evpe_flags = dvpe();
-
- /* Put MVPE's into 'configuration state' */
- set_c0_mvpcontrol(MVPCONTROL_VPC);
-
- settc(t->index);
- write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
-
- /* halt the TC */
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
-
- /* mark the TC unallocated */
- write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
-
- v->state = VPE_STATE_UNUSED;
-
- clear_c0_mvpcontrol(MVPCONTROL_VPC);
- evpe(evpe_flags);
-
- return 0;
-}
-
-EXPORT_SYMBOL(vpe_free);
-
void *vpe_get_shared(int index)
{
- struct vpe *v;
+ struct vpe *v = get_vpe(index);
- if ((v = get_vpe(index)) == NULL)
+ if (v == NULL)
return NULL;
return v->shared_ptr;
}
-
EXPORT_SYMBOL(vpe_get_shared);
-int vpe_getuid(int index)
-{
- struct vpe *v;
-
- if ((v = get_vpe(index)) == NULL)
- return -1;
-
- return v->uid;
-}
-
-EXPORT_SYMBOL(vpe_getuid);
-
-int vpe_getgid(int index)
-{
- struct vpe *v;
-
- if ((v = get_vpe(index)) == NULL)
- return -1;
-
- return v->gid;
-}
-
-EXPORT_SYMBOL(vpe_getgid);
-
int vpe_notify(int index, struct vpe_notifications *notify)
{
- struct vpe *v;
+ struct vpe *v = get_vpe(index);
- if ((v = get_vpe(index)) == NULL)
+ if (v == NULL)
return -1;
list_add(&notify->list, &v->notify);
return 0;
}
-
EXPORT_SYMBOL(vpe_notify);
char *vpe_getcwd(int index)
{
- struct vpe *v;
+ struct vpe *v = get_vpe(index);
- if ((v = get_vpe(index)) == NULL)
+ if (v == NULL)
return NULL;
return v->cwd;
}
-
EXPORT_SYMBOL(vpe_getcwd);
-#ifdef CONFIG_MIPS_APSP_KSPD
-static void kspd_sp_exit( int sp_id)
-{
- cleanup_tc(get_tc(sp_id));
-}
-#endif
-
-static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct vpe *vpe = get_vpe(tclimit);
- struct vpe_notifications *not;
-
- list_for_each_entry(not, &vpe->notify, list) {
- not->stop(tclimit);
- }
-
- release_progmem(vpe->load_addr);
- cleanup_tc(get_tc(tclimit));
- vpe_stop(vpe);
- vpe_free(vpe);
-
- return len;
-}
-
-static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr,
- char *buf)
-{
- struct vpe *vpe = get_vpe(tclimit);
-
- return sprintf(buf, "%d\n", vpe->ntcs);
-}
-
-static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct vpe *vpe = get_vpe(tclimit);
- unsigned long new;
- char *endp;
-
- new = simple_strtoul(buf, &endp, 0);
- if (endp == buf)
- goto out_einval;
-
- if (new == 0 || new > (hw_tcs - tclimit))
- goto out_einval;
-
- vpe->ntcs = new;
-
- return len;
-
-out_einval:
- return -EINVAL;;
-}
-
-static struct device_attribute vpe_class_attributes[] = {
- __ATTR(kill, S_IWUSR, NULL, store_kill),
- __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs),
- {}
-};
-
-static void vpe_device_release(struct device *cd)
-{
- kfree(cd);
-}
-
-struct class vpe_class = {
- .name = "vpe",
- .owner = THIS_MODULE,
- .dev_release = vpe_device_release,
- .dev_attrs = vpe_class_attributes,
-};
-
-struct device vpe_device;
-
-static int __init vpe_module_init(void)
-{
- unsigned int mtflags, vpflags;
- unsigned long flags, val;
- struct vpe *v = NULL;
- struct tc *t;
- int tc, err;
-
- if (!cpu_has_mipsmt) {
- printk("VPE loader: not a MIPS MT capable processor\n");
- return -ENODEV;
- }
-
- if (vpelimit == 0) {
- printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
- "initializing VPE loader.\nPass maxvpes=<n> argument as "
- "kernel argument\n");
-
- return -ENODEV;
- }
-
- if (tclimit == 0) {
- printk(KERN_WARNING "No TCs reserved for AP/SP, not "
- "initializing VPE loader.\nPass maxtcs=<n> argument as "
- "kernel argument\n");
-
- return -ENODEV;
- }
-
- major = register_chrdev(0, module_name, &vpe_fops);
- if (major < 0) {
- printk("VPE loader: unable to register character device\n");
- return major;
- }
-
- err = class_register(&vpe_class);
- if (err) {
- printk(KERN_ERR "vpe_class registration failed\n");
- goto out_chrdev;
- }
-
- device_initialize(&vpe_device);
- vpe_device.class = &vpe_class,
- vpe_device.parent = NULL,
- strlcpy(vpe_device.bus_id, "vpe1", BUS_ID_SIZE);
- vpe_device.devt = MKDEV(major, minor);
- err = device_add(&vpe_device);
- if (err) {
- printk(KERN_ERR "Adding vpe_device failed\n");
- goto out_class;
- }
-
- local_irq_save(flags);
- mtflags = dmt();
- vpflags = dvpe();
-
- /* Put MVPE's into 'configuration state' */
- set_c0_mvpcontrol(MVPCONTROL_VPC);
-
- /* dump_mtregs(); */
-
- val = read_c0_mvpconf0();
- hw_tcs = (val & MVPCONF0_PTC) + 1;
- hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
-
- for (tc = tclimit; tc < hw_tcs; tc++) {
- /*
- * Must re-enable multithreading temporarily or in case we
- * reschedule send IPIs or similar we might hang.
- */
- clear_c0_mvpcontrol(MVPCONTROL_VPC);
- evpe(vpflags);
- emt(mtflags);
- local_irq_restore(flags);
- t = alloc_tc(tc);
- if (!t) {
- err = -ENOMEM;
- goto out;
- }
-
- local_irq_save(flags);
- mtflags = dmt();
- vpflags = dvpe();
- set_c0_mvpcontrol(MVPCONTROL_VPC);
-
- /* VPE's */
- if (tc < hw_tcs) {
- settc(tc);
-
- if ((v = alloc_vpe(tc)) == NULL) {
- printk(KERN_WARNING "VPE: unable to allocate VPE\n");
-
- goto out_reenable;
- }
-
- v->ntcs = hw_tcs - tclimit;
-
- /* add the tc to the list of this vpe's tc's. */
- list_add(&t->tc, &v->tc);
-
- /* deactivate all but vpe0 */
- if (tc >= tclimit) {
- unsigned long tmp = read_vpe_c0_vpeconf0();
-
- tmp &= ~VPECONF0_VPA;
-
- /* master VPE */
- tmp |= VPECONF0_MVP;
- write_vpe_c0_vpeconf0(tmp);
- }
-
- /* disable multi-threading with TC's */
- write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
-
- if (tc >= vpelimit) {
- /*
- * Set config to be the same as vpe0,
- * particularly kseg0 coherency alg
- */
- write_vpe_c0_config(read_c0_config());
- }
- }
-
- /* TC's */
- t->pvpe = v; /* set the parent vpe */
-
- if (tc >= tclimit) {
- unsigned long tmp;
-
- settc(tc);
-
- /* Any TC that is bound to VPE0 gets left as is - in case
- we are running SMTC on VPE0. A TC that is bound to any
- other VPE gets bound to VPE0, ideally I'd like to make
- it homeless but it doesn't appear to let me bind a TC
- to a non-existent VPE. Which is perfectly reasonable.
-
- The (un)bound state is visible to an EJTAG probe so may
- notify GDB...
- */
-
- if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
- /* tc is bound >vpe0 */
- write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
-
- t->pvpe = get_vpe(0); /* set the parent vpe */
- }
-
- /* halt the TC */
- write_tc_c0_tchalt(TCHALT_H);
- mips_ihb();
-
- tmp = read_tc_c0_tcstatus();
-
- /* mark not activated and not dynamically allocatable */
- tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
- tmp |= TCSTATUS_IXMT; /* interrupt exempt */
- write_tc_c0_tcstatus(tmp);
- }
- }
-
-out_reenable:
- /* release config state */
- clear_c0_mvpcontrol(MVPCONTROL_VPC);
-
- evpe(vpflags);
- emt(mtflags);
- local_irq_restore(flags);
-
-#ifdef CONFIG_MIPS_APSP_KSPD
- kspd_events.kspd_sp_exit = kspd_sp_exit;
-#endif
- return 0;
-
-out_class:
- class_unregister(&vpe_class);
-out_chrdev:
- unregister_chrdev(major, module_name);
-
-out:
- return err;
-}
-
-static void __exit vpe_module_exit(void)
-{
- struct vpe *v, *n;
-
- list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
- if (v->state != VPE_STATE_UNUSED) {
- release_vpe(v);
- }
- }
-
- device_del(&vpe_device);
- unregister_chrdev(major, module_name);
-}
-
module_init(vpe_module_init);
module_exit(vpe_module_exit);
MODULE_DESCRIPTION("MIPS VPE Loader");
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
new file mode 100644
index 00000000000..2a03abb5bd2
--- /dev/null
+++ b/arch/mips/kernel/watch.c
@@ -0,0 +1,196 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 David Daney
+ */
+
+#include <linux/sched.h>
+
+#include <asm/processor.h>
+#include <asm/watch.h>
+
+/*
+ * Install the watch registers for the current thread. A maximum of
+ * four registers are installed although the machine may have more.
+ */
+void mips_install_watch_registers(void)
+{
+ struct mips3264_watch_reg_state *watches =
+ &current->thread.watch.mips3264;
+ switch (current_cpu_data.watch_reg_use_cnt) {
+ default:
+ BUG();
+ case 4:
+ write_c0_watchlo3(watches->watchlo[3]);
+ /* Write 1 to the I, R, and W bits to clear them, and
+ 1 to G so all ASIDs are trapped. */
+ write_c0_watchhi3(0x40000007 | watches->watchhi[3]);
+ case 3:
+ write_c0_watchlo2(watches->watchlo[2]);
+ write_c0_watchhi2(0x40000007 | watches->watchhi[2]);
+ case 2:
+ write_c0_watchlo1(watches->watchlo[1]);
+ write_c0_watchhi1(0x40000007 | watches->watchhi[1]);
+ case 1:
+ write_c0_watchlo0(watches->watchlo[0]);
+ write_c0_watchhi0(0x40000007 | watches->watchhi[0]);
+ }
+}
+
+/*
+ * Read back the watchhi registers so the user space debugger has
+ * access to the I, R, and W bits. A maximum of four registers are
+ * read although the machine may have more.
+ */
+void mips_read_watch_registers(void)
+{
+ struct mips3264_watch_reg_state *watches =
+ &current->thread.watch.mips3264;
+ switch (current_cpu_data.watch_reg_use_cnt) {
+ default:
+ BUG();
+ case 4:
+ watches->watchhi[3] = (read_c0_watchhi3() & 0x0fff);
+ case 3:
+ watches->watchhi[2] = (read_c0_watchhi2() & 0x0fff);
+ case 2:
+ watches->watchhi[1] = (read_c0_watchhi1() & 0x0fff);
+ case 1:
+ watches->watchhi[0] = (read_c0_watchhi0() & 0x0fff);
+ }
+ if (current_cpu_data.watch_reg_use_cnt == 1 &&
+ (watches->watchhi[0] & 7) == 0) {
+ /* Pathological case of release 1 architecture that
+ * doesn't set the condition bits. We assume that
+ * since we got here, the watch condition was met and
+ * signal that the conditions requested in watchlo
+ * were met. */
+ watches->watchhi[0] |= (watches->watchlo[0] & 7);
+ }
+ }
+
+/*
+ * Disable all watch registers. Although only four registers are
+ * installed, all are cleared to eliminate the possibility of endless
+ * looping in the watch handler.
+ */
+void mips_clear_watch_registers(void)
+{
+ switch (current_cpu_data.watch_reg_count) {
+ default:
+ BUG();
+ case 8:
+ write_c0_watchlo7(0);
+ case 7:
+ write_c0_watchlo6(0);
+ case 6:
+ write_c0_watchlo5(0);
+ case 5:
+ write_c0_watchlo4(0);
+ case 4:
+ write_c0_watchlo3(0);
+ case 3:
+ write_c0_watchlo2(0);
+ case 2:
+ write_c0_watchlo1(0);
+ case 1:
+ write_c0_watchlo0(0);
+ }
+}
+
+void mips_probe_watch_registers(struct cpuinfo_mips *c)
+{
+ unsigned int t;
+
+ if ((c->options & MIPS_CPU_WATCH) == 0)
+ return;
+ /*
+ * Check which of the I,R and W bits are supported, then
+ * disable the register.
+ */
+ write_c0_watchlo0(7);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo0();
+ write_c0_watchlo0(0);
+ c->watch_reg_masks[0] = t & 7;
+
+ /* Write the mask bits and read them back to determine which
+ * can be used. */
+ c->watch_reg_count = 1;
+ c->watch_reg_use_cnt = 1;
+ t = read_c0_watchhi0();
+ write_c0_watchhi0(t | 0xff8);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi0();
+ c->watch_reg_masks[0] |= (t & 0xff8);
+ if ((t & 0x80000000) == 0)
+ return;
+
+ write_c0_watchlo1(7);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo1();
+ write_c0_watchlo1(0);
+ c->watch_reg_masks[1] = t & 7;
+
+ c->watch_reg_count = 2;
+ c->watch_reg_use_cnt = 2;
+ t = read_c0_watchhi1();
+ write_c0_watchhi1(t | 0xff8);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi1();
+ c->watch_reg_masks[1] |= (t & 0xff8);
+ if ((t & 0x80000000) == 0)
+ return;
+
+ write_c0_watchlo2(7);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo2();
+ write_c0_watchlo2(0);
+ c->watch_reg_masks[2] = t & 7;
+
+ c->watch_reg_count = 3;
+ c->watch_reg_use_cnt = 3;
+ t = read_c0_watchhi2();
+ write_c0_watchhi2(t | 0xff8);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi2();
+ c->watch_reg_masks[2] |= (t & 0xff8);
+ if ((t & 0x80000000) == 0)
+ return;
+
+ write_c0_watchlo3(7);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo3();
+ write_c0_watchlo3(0);
+ c->watch_reg_masks[3] = t & 7;
+
+ c->watch_reg_count = 4;
+ c->watch_reg_use_cnt = 4;
+ t = read_c0_watchhi3();
+ write_c0_watchhi3(t | 0xff8);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi3();
+ c->watch_reg_masks[3] |= (t & 0xff8);
+ if ((t & 0x80000000) == 0)
+ return;
+
+ /* We use at most 4, but probe and report up to 8. */
+ c->watch_reg_count = 5;
+ t = read_c0_watchhi4();
+ if ((t & 0x80000000) == 0)
+ return;
+
+ c->watch_reg_count = 6;
+ t = read_c0_watchhi5();
+ if ((t & 0x80000000) == 0)
+ return;
+
+ c->watch_reg_count = 7;
+ t = read_c0_watchhi6();
+ if ((t & 0x80000000) == 0)
+ return;
+
+ c->watch_reg_count = 8;
+}