aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/.gitignore1
-rw-r--r--arch/sh/kernel/Makefile54
-rw-r--r--arch/sh/kernel/Makefile_3227
-rw-r--r--arch/sh/kernel/Makefile_6423
-rw-r--r--arch/sh/kernel/asm-offsets.c39
-rw-r--r--arch/sh/kernel/cf-enabler.c165
-rw-r--r--arch/sh/kernel/cpu/Makefile5
-rw-r--r--arch/sh/kernel/cpu/adc.c12
-rw-r--r--arch/sh/kernel/cpu/clock-cpg.c78
-rw-r--r--arch/sh/kernel/cpu/clock.c346
-rw-r--r--arch/sh/kernel/cpu/fpu.c86
-rw-r--r--arch/sh/kernel/cpu/init.c184
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile2
-rw-r--r--arch/sh/kernel/cpu/irq/imask.c71
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c167
-rw-r--r--arch/sh/kernel/cpu/irq/intc.c608
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c50
-rw-r--r--arch/sh/kernel/cpu/pfc.c33
-rw-r--r--arch/sh/kernel/cpu/proc.c150
-rw-r--r--arch/sh/kernel/cpu/sh2/clock-sh7619.c52
-rw-r--r--arch/sh/kernel/cpu/sh2/entry.S138
-rw-r--r--arch/sh/kernel/cpu/sh2/ex.S15
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c6
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c188
-rw-r--r--arch/sh/kernel/cpu/sh2a/Makefile19
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7201.c85
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7203.c52
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7206.c48
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7264.c153
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7269.c184
-rw-r--r--arch/sh/kernel/cpu/sh2a/entry.S250
-rw-r--r--arch/sh/kernel/cpu/sh2a/ex.S73
-rw-r--r--arch/sh/kernel/cpu/sh2a/fpu.c115
-rw-r--r--arch/sh/kernel/cpu/sh2a/opcode_helper.c1
-rw-r--r--arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c30
-rw-r--r--arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c30
-rw-r--r--arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c31
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c22
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-mxg.c178
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7201.c428
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7203.c408
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7206.c323
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7264.c570
-rw-r--r--arch/sh/kernel/cpu/sh2a/setup-sh7269.c586
-rw-r--r--arch/sh/kernel/cpu/sh3/Makefile26
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh3.c32
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7705.c32
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7706.c32
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7709.c43
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7710.c32
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7712.c24
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S485
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S44
-rw-r--r--arch/sh/kernel/cpu/sh3/pinmux-sh7720.c30
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c30
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh770x.c33
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh7710.c20
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh7720.c37
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh3.c71
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c172
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c231
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c180
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7720.c240
-rw-r--r--arch/sh/kernel/cpu/sh3/swsusp.S147
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile9
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c79
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4.c32
-rw-r--r--arch/sh/kernel/cpu/sh4/fpu.c180
-rw-r--r--arch/sh/kernel/cpu/sh4/perf_event.c268
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c176
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c124
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c238
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c201
-rw-r--r--arch/sh/kernel/cpu/sh4/softfloat.c104
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c54
-rw-r--r--arch/sh/kernel/cpu/sh4a/Makefile35
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7343.c314
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7366.c282
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7722.c774
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7723.c313
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c377
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7734.c260
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c155
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7763.c79
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7770.c32
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7780.c77
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7785.c251
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7786.c192
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-shx3.c196
-rw-r--r--arch/sh/kernel/cpu/sh4a/intc-shx3.c34
-rw-r--r--arch/sh/kernel/cpu/sh4a/perf_event.c302
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c20
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c30
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c35
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c35
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c35
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c30
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c35
-rw-r--r--arch/sh/kernel/cpu/sh4a/pinmux-shx3.c29
-rw-r--r--arch/sh/kernel/cpu/sh4a/serial-sh7722.c23
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c431
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c263
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c533
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c661
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c1305
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7734.c629
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c1247
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7763.c324
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7770.c576
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c387
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c490
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c848
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-shx3.c236
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c156
-rw-r--r--arch/sh/kernel/cpu/sh4a/ubc.c133
-rw-r--r--arch/sh/kernel/cpu/sh5/Makefile8
-rw-r--r--arch/sh/kernel/cpu/sh5/clock-sh5.c79
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S254
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c68
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c8
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c123
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c63
-rw-r--r--arch/sh/kernel/cpu/shmobile/Makefile7
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c101
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm.c156
-rw-r--r--arch/sh/kernel/cpu/shmobile/sleep.S405
-rw-r--r--arch/sh/kernel/cpu/ubc.S59
-rw-r--r--arch/sh/kernel/cpufreq.c158
-rw-r--r--arch/sh/kernel/crash_dump.c1
-rw-r--r--arch/sh/kernel/debugtraps.S12
-rw-r--r--arch/sh/kernel/disassemble.c573
-rw-r--r--arch/sh/kernel/dma-nommu.c82
-rw-r--r--arch/sh/kernel/dump_task.c32
-rw-r--r--arch/sh/kernel/dumpstack.c160
-rw-r--r--arch/sh/kernel/dwarf.c1210
-rw-r--r--arch/sh/kernel/early_printk.c232
-rw-r--r--arch/sh/kernel/entry-common.S203
-rw-r--r--arch/sh/kernel/ftrace.c398
-rw-r--r--arch/sh/kernel/head_32.S245
-rw-r--r--arch/sh/kernel/head_64.S11
-rw-r--r--arch/sh/kernel/hw_breakpoint.c421
-rw-r--r--arch/sh/kernel/idle.c61
-rw-r--r--arch/sh/kernel/init_task.c36
-rw-r--r--arch/sh/kernel/io.c116
-rw-r--r--arch/sh/kernel/io_generic.c224
-rw-r--r--arch/sh/kernel/io_trapped.c58
-rw-r--r--arch/sh/kernel/iomap.c165
-rw-r--r--arch/sh/kernel/ioport.c43
-rw-r--r--arch/sh/kernel/irq.c193
-rw-r--r--arch/sh/kernel/irq_32.c57
-rw-r--r--arch/sh/kernel/irq_64.c51
-rw-r--r--arch/sh/kernel/kdebugfs.c16
-rw-r--r--arch/sh/kernel/kgdb.c390
-rw-r--r--arch/sh/kernel/kgdb_jmp.S33
-rw-r--r--arch/sh/kernel/kgdb_stub.c1061
-rw-r--r--arch/sh/kernel/kprobes.c585
-rw-r--r--arch/sh/kernel/localtimer.c66
-rw-r--r--arch/sh/kernel/machine_kexec.c143
-rw-r--r--arch/sh/kernel/machvec.c21
-rw-r--r--arch/sh/kernel/module.c88
-rw-r--r--arch/sh/kernel/nmi_debug.c77
-rw-r--r--arch/sh/kernel/perf_callchain.c41
-rw-r--r--arch/sh/kernel/perf_event.c400
-rw-r--r--arch/sh/kernel/pm.c88
-rw-r--r--arch/sh/kernel/process.c76
-rw-r--r--arch/sh/kernel/process_32.c404
-rw-r--r--arch/sh/kernel/process_64.c314
-rw-r--r--arch/sh/kernel/ptrace.c33
-rw-r--r--arch/sh/kernel/ptrace_32.c578
-rw-r--r--arch/sh/kernel/ptrace_64.c476
-rw-r--r--arch/sh/kernel/reboot.c102
-rw-r--r--arch/sh/kernel/relocate_kernel.S203
-rw-r--r--arch/sh/kernel/return_address.c59
-rw-r--r--arch/sh/kernel/semaphore.c139
-rw-r--r--arch/sh/kernel/setup.c419
-rw-r--r--arch/sh/kernel/sh_bios.c178
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c138
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c34
-rw-r--r--arch/sh/kernel/signal_32.c307
-rw-r--r--arch/sh/kernel/signal_64.c418
-rw-r--r--arch/sh/kernel/smp.c265
-rw-r--r--arch/sh/kernel/stacktrace.c80
-rw-r--r--arch/sh/kernel/swsusp.c38
-rw-r--r--arch/sh/kernel/sys_sh.c270
-rw-r--r--arch/sh/kernel/sys_sh32.c37
-rw-r--r--arch/sh/kernel/sys_sh64.c66
-rw-r--r--arch/sh/kernel/syscalls_32.S59
-rw-r--r--arch/sh/kernel/syscalls_64.S39
-rw-r--r--arch/sh/kernel/time.c115
-rw-r--r--arch/sh/kernel/time_32.c280
-rw-r--r--arch/sh/kernel/time_64.c522
-rw-r--r--arch/sh/kernel/timers/Makefile10
-rw-r--r--arch/sh/kernel/timers/timer-cmt.c189
-rw-r--r--arch/sh/kernel/timers/timer-mtu2.c197
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c222
-rw-r--r--arch/sh/kernel/timers/timer.c55
-rw-r--r--arch/sh/kernel/topology.c35
-rw-r--r--arch/sh/kernel/traps.c137
-rw-r--r--arch/sh/kernel/traps_32.c490
-rw-r--r--arch/sh/kernel/traps_64.c659
-rw-r--r--arch/sh/kernel/unwinder.c164
-rw-r--r--arch/sh/kernel/vmlinux.lds.S88
-rw-r--r--arch/sh/kernel/vmlinux_32.lds.S152
-rw-r--r--arch/sh/kernel/vmlinux_64.lds.S164
-rw-r--r--arch/sh/kernel/vsyscall/Makefile2
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-sigreturn.S35
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-trapa.S21
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c13
208 files changed, 26751 insertions, 12269 deletions
diff --git a/arch/sh/kernel/.gitignore b/arch/sh/kernel/.gitignore
new file mode 100644
index 00000000000..c5f676c3c22
--- /dev/null
+++ b/arch/sh/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 349d833deab..2ccf36c824c 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -1,5 +1,51 @@
-ifeq ($(CONFIG_SUPERH32),y)
-include ${srctree}/arch/sh/kernel/Makefile_32
-else
-include ${srctree}/arch/sh/kernel/Makefile_64
+#
+# Makefile for the Linux/SuperH kernel.
+#
+
+extra-y := head_$(BITS).o vmlinux.lds
+
+ifdef CONFIG_FUNCTION_TRACER
+# Do not profile debug and lowlevel utilities
+CFLAGS_REMOVE_ftrace.o = -pg
endif
+
+CFLAGS_REMOVE_return_address.o = -pg
+
+obj-y := debugtraps.o dma-nommu.o dumpstack.o \
+ idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
+ machvec.o nmi_debug.o process.o \
+ process_$(BITS).o ptrace.o ptrace_$(BITS).o \
+ reboot.o return_address.o \
+ setup.o signal_$(BITS).o sys_sh.o \
+ syscalls_$(BITS).o time.o topology.o traps.o \
+ traps_$(BITS).o unwinder.o
+
+ifndef CONFIG_GENERIC_IOMAP
+obj-y += iomap.o
+obj-$(CONFIG_HAS_IOPORT_MAP) += ioport.o
+endif
+
+obj-$(CONFIG_SUPERH32) += sys_sh32.o
+obj-y += cpu/
+obj-$(CONFIG_VSYSCALL) += vsyscall/
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_DUMP_CODE) += disassemble.o
+obj-$(CONFIG_HIBERNATION) += swsusp.o
+obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o
+
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
+
+ccflags-y := -Werror
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
deleted file mode 100644
index 62bf373266f..00000000000
--- a/arch/sh/kernel/Makefile_32
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Makefile for the Linux/SuperH kernel.
-#
-
-extra-y := head_32.o init_task.o vmlinux.lds
-
-obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
- ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \
- syscalls_32.o time_32.o topology.o traps.o traps_32.o
-
-obj-y += cpu/ timers/
-obj-$(CONFIG_VSYSCALL) += vsyscall/
-obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
-obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
-obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o
-obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
-obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
-obj-$(CONFIG_PM) += pm.o
-obj-$(CONFIG_STACKTRACE) += stacktrace.o
-obj-$(CONFIG_BINFMT_ELF) += dump_task.o
-obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
-
-EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
deleted file mode 100644
index e01283d49cb..00000000000
--- a/arch/sh/kernel/Makefile_64
+++ /dev/null
@@ -1,23 +0,0 @@
-extra-y := head_64.o init_task.o vmlinux.lds
-
-obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
- ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \
- syscalls_64.o time_64.o topology.o traps.o traps_64.o
-
-obj-y += cpu/ timers/
-obj-$(CONFIG_VSYSCALL) += vsyscall/
-obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
-obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
-obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o
-obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
-obj-$(CONFIG_MODULES) += sh_ksyms_64.o module.o
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
-obj-$(CONFIG_PM) += pm.o
-obj-$(CONFIG_STACKTRACE) += stacktrace.o
-obj-$(CONFIG_BINFMT_ELF) += dump_task.o
-obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
-
-EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c
index dc6725c51a8..08a2be775b6 100644
--- a/arch/sh/kernel/asm-offsets.c
+++ b/arch/sh/kernel/asm-offsets.c
@@ -11,12 +11,11 @@
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/mm.h>
-#include <asm/thread_info.h>
-
-#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
-#define BLANK() asm volatile("\n->" : : )
+#include <asm/thread_info.h>
+#include <asm/suspend.h>
int main(void)
{
@@ -27,6 +26,36 @@ int main(void)
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
+ DEFINE(TI_SIZE, sizeof(struct thread_info));
+
+#ifdef CONFIG_HIBERNATION
+ DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
+ DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
+ DEFINE(PBE_NEXT, offsetof(struct pbe, next));
+ DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs));
+#endif
+ DEFINE(SH_SLEEP_MODE, offsetof(struct sh_sleep_data, mode));
+ DEFINE(SH_SLEEP_SF_PRE, offsetof(struct sh_sleep_data, sf_pre));
+ DEFINE(SH_SLEEP_SF_POST, offsetof(struct sh_sleep_data, sf_post));
+ DEFINE(SH_SLEEP_RESUME, offsetof(struct sh_sleep_data, resume));
+ DEFINE(SH_SLEEP_VBR, offsetof(struct sh_sleep_data, vbr));
+ DEFINE(SH_SLEEP_SPC, offsetof(struct sh_sleep_data, spc));
+ DEFINE(SH_SLEEP_SR, offsetof(struct sh_sleep_data, sr));
+ DEFINE(SH_SLEEP_SP, offsetof(struct sh_sleep_data, sp));
+ DEFINE(SH_SLEEP_BASE_ADDR, offsetof(struct sh_sleep_data, addr));
+ DEFINE(SH_SLEEP_BASE_DATA, offsetof(struct sh_sleep_data, data));
+ DEFINE(SH_SLEEP_REG_STBCR, offsetof(struct sh_sleep_regs, stbcr));
+ DEFINE(SH_SLEEP_REG_BAR, offsetof(struct sh_sleep_regs, bar));
+ DEFINE(SH_SLEEP_REG_PTEH, offsetof(struct sh_sleep_regs, pteh));
+ DEFINE(SH_SLEEP_REG_PTEL, offsetof(struct sh_sleep_regs, ptel));
+ DEFINE(SH_SLEEP_REG_TTB, offsetof(struct sh_sleep_regs, ttb));
+ DEFINE(SH_SLEEP_REG_TEA, offsetof(struct sh_sleep_regs, tea));
+ DEFINE(SH_SLEEP_REG_MMUCR, offsetof(struct sh_sleep_regs, mmucr));
+ DEFINE(SH_SLEEP_REG_PTEA, offsetof(struct sh_sleep_regs, ptea));
+ DEFINE(SH_SLEEP_REG_PASCR, offsetof(struct sh_sleep_regs, pascr));
+ DEFINE(SH_SLEEP_REG_IRMCR, offsetof(struct sh_sleep_regs, irmcr));
+ DEFINE(SH_SLEEP_REG_CCR, offsetof(struct sh_sleep_regs, ccr));
+ DEFINE(SH_SLEEP_REG_RAMCR, offsetof(struct sh_sleep_regs, ramcr));
return 0;
}
diff --git a/arch/sh/kernel/cf-enabler.c b/arch/sh/kernel/cf-enabler.c
deleted file mode 100644
index 1c3b99642e1..00000000000
--- a/arch/sh/kernel/cf-enabler.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/* $Id: cf-enabler.c,v 1.4 2004/02/22 22:44:36 kkojima Exp $
- *
- * linux/drivers/block/cf-enabler.c
- *
- * Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2000 Toshiharu Nozawa
- * Copyright (C) 2001 A&D Co., Ltd.
- *
- * Enable the CF configuration.
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-
-/*
- * You can connect Compact Flash directly to the bus of SuperH.
- * This is the enabler for that.
- *
- * SIM: How generic is this really? It looks pretty board, or at
- * least SH sub-type, specific to me.
- * I know it doesn't work on the Overdrive!
- */
-
-/*
- * 0xB8000000 : Attribute
- * 0xB8001000 : Common Memory
- * 0xBA000000 : I/O
- */
-#if defined(CONFIG_CPU_SH4)
-/* SH4 can't access PCMCIA interface through P2 area.
- * we must remap it with appropriate attribute bit of the page set.
- * this part is based on Greg Banks' hd64465_ss.c implementation - Masahiro Abe */
-
-#if defined(CONFIG_CF_AREA6)
-#define slot_no 0
-#else
-#define slot_no 1
-#endif
-
-/* use this pointer to access to directly connected compact flash io area*/
-void *cf_io_base;
-
-static int __init allocate_cf_area(void)
-{
- pgprot_t prot;
- unsigned long paddrbase, psize;
-
- /* open I/O area window */
- paddrbase = virt_to_phys((void*)CONFIG_CF_BASE_ADDR);
- psize = PAGE_SIZE;
- prot = PAGE_KERNEL_PCC(slot_no, _PAGE_PCC_IO16);
- cf_io_base = p3_ioremap(paddrbase, psize, prot.pgprot);
- if (!cf_io_base) {
- printk("allocate_cf_area : can't open CF I/O window!\n");
- return -ENOMEM;
- }
-/* printk("p3_ioremap(paddr=0x%08lx, psize=0x%08lx, prot=0x%08lx)=0x%08lx\n",
- paddrbase, psize, prot.pgprot, cf_io_base);*/
-
- /* XXX : do we need attribute and common-memory area also? */
-
- return 0;
-}
-#endif
-
-static int __init cf_init_default(void)
-{
-/* You must have enabled the card, and set the level interrupt
- * before reaching this point. Possibly in boot ROM or boot loader.
- */
-#if defined(CONFIG_CPU_SH4)
- allocate_cf_area();
-#endif
-
- return 0;
-}
-
-#if defined(CONFIG_SH_SOLUTION_ENGINE)
-#include <asm/se.h>
-#elif defined(CONFIG_SH_7722_SOLUTION_ENGINE)
-#include <asm/se7722.h>
-#endif
-
-/*
- * SolutionEngine Seriese
- *
- * about MS770xSE
- * 0xB8400000 : Common Memory
- * 0xB8500000 : Attribute
- * 0xB8600000 : I/O
- *
- * about MS7722SE
- * 0xB0400000 : Common Memory
- * 0xB0500000 : Attribute
- * 0xB0600000 : I/O
- */
-
-#if defined(CONFIG_SH_SOLUTION_ENGINE) || defined(CONFIG_SH_7722_SOLUTION_ENGINE)
-static int __init cf_init_se(void)
-{
- if ((ctrl_inw(MRSHPC_CSR) & 0x000c) != 0)
- return 0; /* Not detected */
-
- if ((ctrl_inw(MRSHPC_CSR) & 0x0080) == 0) {
- ctrl_outw(0x0674, MRSHPC_CPWCR); /* Card Vcc is 3.3v? */
- } else {
- ctrl_outw(0x0678, MRSHPC_CPWCR); /* Card Vcc is 5V */
- }
-
- /*
- * PC-Card window open
- * flag == COMMON/ATTRIBUTE/IO
- */
- /* common window open */
- ctrl_outw(0x8a84, MRSHPC_MW0CR1);
- if((ctrl_inw(MRSHPC_CSR) & 0x4000) != 0)
- /* common mode & bus width 16bit SWAP = 1*/
- ctrl_outw(0x0b00, MRSHPC_MW0CR2);
- else
- /* common mode & bus width 16bit SWAP = 0*/
- ctrl_outw(0x0300, MRSHPC_MW0CR2);
-
- /* attribute window open */
- ctrl_outw(0x8a85, MRSHPC_MW1CR1);
- if ((ctrl_inw(MRSHPC_CSR) & 0x4000) != 0)
- /* attribute mode & bus width 16bit SWAP = 1*/
- ctrl_outw(0x0a00, MRSHPC_MW1CR2);
- else
- /* attribute mode & bus width 16bit SWAP = 0*/
- ctrl_outw(0x0200, MRSHPC_MW1CR2);
-
- /* I/O window open */
- ctrl_outw(0x8a86, MRSHPC_IOWCR1);
- ctrl_outw(0x0008, MRSHPC_CDCR); /* I/O card mode */
- if ((ctrl_inw(MRSHPC_CSR) & 0x4000) != 0)
- ctrl_outw(0x0a00, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 1*/
- else
- ctrl_outw(0x0200, MRSHPC_IOWCR2); /* bus width 16bit SWAP = 0*/
-
- ctrl_outw(0x2000, MRSHPC_ICR);
- ctrl_outb(0x00, PA_MRSHPC_MW2 + 0x206);
- ctrl_outb(0x42, PA_MRSHPC_MW2 + 0x200);
- return 0;
-}
-#else
-static int __init cf_init_se(void)
-{
- return -1;
-}
-#endif
-
-int __init cf_init(void)
-{
- if( mach_is_se() || mach_is_7722se() ){
- return cf_init_se();
- }
-
- return cf_init_default();
-}
-
-__initcall (cf_init);
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index f471d242774..accc7ca722e 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -11,10 +11,11 @@ obj-$(CONFIG_CPU_SH5) = sh5/
# Special cases for family ancestry.
obj-$(CONFIG_CPU_SH4A) += sh4a/
+obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/
# Common interfaces.
-obj-$(CONFIG_UBC_WAKEUP) += ubc.o
obj-$(CONFIG_SH_ADC) += adc.o
+obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o
-obj-y += irq/ init.o clock.o
+obj-y += irq/ init.o clock.o fpu.o pfc.o proc.o
diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c
index da3d6877f93..d307571d54b 100644
--- a/arch/sh/kernel/cpu/adc.c
+++ b/arch/sh/kernel/cpu/adc.c
@@ -18,19 +18,19 @@ int adc_single(unsigned int channel)
off = (channel & 0x03) << 2;
- csr = ctrl_inb(ADCSR);
+ csr = __raw_readb(ADCSR);
csr = channel | ADCSR_ADST | ADCSR_CKS;
- ctrl_outb(csr, ADCSR);
+ __raw_writeb(csr, ADCSR);
do {
- csr = ctrl_inb(ADCSR);
+ csr = __raw_readb(ADCSR);
} while ((csr & ADCSR_ADF) == 0);
csr &= ~(ADCSR_ADF | ADCSR_ADST);
- ctrl_outb(csr, ADCSR);
+ __raw_writeb(csr, ADCSR);
- return (((ctrl_inb(ADDRAH + off) << 8) |
- ctrl_inb(ADDRAL + off)) >> 6);
+ return (((__raw_readb(ADDRAH + off) << 8) |
+ __raw_readb(ADDRAL + off)) >> 6);
}
EXPORT_SYMBOL(adc_single);
diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c
new file mode 100644
index 00000000000..8525a671266
--- /dev/null
+++ b/arch/sh/kernel/cpu/clock-cpg.c
@@ -0,0 +1,78 @@
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+
+static struct clk master_clk = {
+ .flags = CLK_ENABLE_ON_INIT,
+ .rate = CONFIG_SH_PCLK_FREQ,
+};
+
+static struct clk peripheral_clk = {
+ .parent = &master_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk bus_clk = {
+ .parent = &master_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk cpu_clk = {
+ .parent = &master_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+/*
+ * The ordering of these clocks matters, do not change it.
+ */
+static struct clk *onchip_clocks[] = {
+ &master_clk,
+ &peripheral_clk,
+ &bus_clk,
+ &cpu_clk,
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("master_clk", &master_clk),
+ CLKDEV_CON_ID("peripheral_clk", &peripheral_clk),
+ CLKDEV_CON_ID("bus_clk", &bus_clk),
+ CLKDEV_CON_ID("cpu_clk", &cpu_clk),
+};
+
+int __init __deprecated cpg_clk_init(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
+ struct clk *clk = onchip_clocks[i];
+ arch_init_clk_ops(&clk->ops, i);
+ if (clk->ops)
+ ret |= clk_register(clk);
+ }
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ clk_add_alias("fck", "sh-tmu-sh3.0", "peripheral_clk", NULL);
+ clk_add_alias("fck", "sh-tmu.0", "peripheral_clk", NULL);
+ clk_add_alias("fck", "sh-tmu.1", "peripheral_clk", NULL);
+ clk_add_alias("fck", "sh-tmu.2", "peripheral_clk", NULL);
+ clk_add_alias("fck", "sh-mtu2", "peripheral_clk", NULL);
+ clk_add_alias("fck", "sh-cmt-16.0", "peripheral_clk", NULL);
+ clk_add_alias("fck", "sh-cmt-32.0", "peripheral_clk", NULL);
+ clk_add_alias("sci_ick", NULL, "peripheral_clk", NULL);
+
+ return ret;
+}
+
+/*
+ * Placeholder for compatibility, until the lazy CPUs do this
+ * on their own.
+ */
+int __init __weak arch_clk_init(void)
+{
+ return cpg_clk_init();
+}
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index b5f1e23ed57..4187cf4fe18 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -1,11 +1,11 @@
/*
* arch/sh/kernel/cpu/clock.c - SuperH clock framework
*
- * Copyright (C) 2005, 2006, 2007 Paul Mundt
+ * Copyright (C) 2005 - 2009 Paul Mundt
*
* This clock framework is derived from the OMAP version by:
*
- * Copyright (C) 2004 - 2005 Nokia Corporation
+ * Copyright (C) 2004 - 2008 Nokia Corporation
* Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
*
* Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
@@ -16,344 +16,36 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/kref.h>
-#include <linux/seq_file.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/proc_fs.h>
+#include <linux/clk.h>
#include <asm/clock.h>
-#include <asm/timer.h>
+#include <asm/machvec.h>
-static LIST_HEAD(clock_list);
-static DEFINE_SPINLOCK(clock_lock);
-static DEFINE_MUTEX(clock_list_sem);
-
-/*
- * Each subtype is expected to define the init routines for these clocks,
- * as each subtype (or processor family) will have these clocks at the
- * very least. These are all provided through the CPG, which even some of
- * the more quirky parts (such as ST40, SH4-202, etc.) still have.
- *
- * The processor-specific code is expected to register any additional
- * clock sources that are of interest.
- */
-static struct clk master_clk = {
- .name = "master_clk",
- .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
- .rate = CONFIG_SH_PCLK_FREQ,
-};
-
-static struct clk module_clk = {
- .name = "module_clk",
- .parent = &master_clk,
- .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
-};
-
-static struct clk bus_clk = {
- .name = "bus_clk",
- .parent = &master_clk,
- .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES,
-};
-
-static struct clk cpu_clk = {
- .name = "cpu_clk",
- .parent = &master_clk,
- .flags = CLK_ALWAYS_ENABLED,
-};
-
-/*
- * The ordering of these clocks matters, do not change it.
- */
-static struct clk *onchip_clocks[] = {
- &master_clk,
- &module_clk,
- &bus_clk,
- &cpu_clk,
-};
-
-static void propagate_rate(struct clk *clk)
-{
- struct clk *clkp;
-
- list_for_each_entry(clkp, &clock_list, node) {
- if (likely(clkp->parent != clk))
- continue;
- if (likely(clkp->ops && clkp->ops->recalc))
- clkp->ops->recalc(clkp);
- if (unlikely(clkp->flags & CLK_RATE_PROPAGATES))
- propagate_rate(clkp);
- }
-}
-
-int __clk_enable(struct clk *clk)
-{
- /*
- * See if this is the first time we're enabling the clock, some
- * clocks that are always enabled still require "special"
- * initialization. This is especially true if the clock mode
- * changes and the clock needs to hunt for the proper set of
- * divisors to use before it can effectively recalc.
- */
- if (unlikely(atomic_read(&clk->kref.refcount) == 1))
- if (clk->ops && clk->ops->init)
- clk->ops->init(clk);
-
- kref_get(&clk->kref);
-
- if (clk->flags & CLK_ALWAYS_ENABLED)
- return 0;
-
- if (likely(clk->ops && clk->ops->enable))
- clk->ops->enable(clk);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__clk_enable);
-
-int clk_enable(struct clk *clk)
+int __init clk_init(void)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&clock_lock, flags);
- ret = __clk_enable(clk);
- spin_unlock_irqrestore(&clock_lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(clk_enable);
-
-static void clk_kref_release(struct kref *kref)
-{
- /* Nothing to do */
-}
-
-void __clk_disable(struct clk *clk)
-{
- int count = kref_put(&clk->kref, clk_kref_release);
-
- if (clk->flags & CLK_ALWAYS_ENABLED)
- return;
-
- if (!count) { /* count reaches zero, disable the clock */
- if (likely(clk->ops && clk->ops->disable))
- clk->ops->disable(clk);
- }
-}
-EXPORT_SYMBOL_GPL(__clk_disable);
-
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&clock_lock, flags);
- __clk_disable(clk);
- spin_unlock_irqrestore(&clock_lock, flags);
-}
-EXPORT_SYMBOL_GPL(clk_disable);
-
-int clk_register(struct clk *clk)
-{
- mutex_lock(&clock_list_sem);
-
- list_add(&clk->node, &clock_list);
- kref_init(&clk->kref);
-
- mutex_unlock(&clock_list_sem);
-
- if (clk->flags & CLK_ALWAYS_ENABLED) {
- pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name);
- if (clk->ops && clk->ops->init)
- clk->ops->init(clk);
- if (clk->ops && clk->ops->enable)
- clk->ops->enable(clk);
- pr_debug( "Enabled.");
+ ret = arch_clk_init();
+ if (unlikely(ret)) {
+ pr_err("%s: CPU clock registration failed.\n", __func__);
+ return ret;
}
- return 0;
-}
-EXPORT_SYMBOL_GPL(clk_register);
-
-void clk_unregister(struct clk *clk)
-{
- mutex_lock(&clock_list_sem);
- list_del(&clk->node);
- mutex_unlock(&clock_list_sem);
-}
-EXPORT_SYMBOL_GPL(clk_unregister);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-EXPORT_SYMBOL_GPL(clk_get_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- return clk_set_rate_ex(clk, rate, 0);
-}
-EXPORT_SYMBOL_GPL(clk_set_rate);
-
-int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
-{
- int ret = -EOPNOTSUPP;
-
- if (likely(clk->ops && clk->ops->set_rate)) {
- unsigned long flags;
-
- spin_lock_irqsave(&clock_lock, flags);
- ret = clk->ops->set_rate(clk, rate, algo_id);
- spin_unlock_irqrestore(&clock_lock, flags);
- }
-
- if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
- propagate_rate(clk);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(clk_set_rate_ex);
-
-void clk_recalc_rate(struct clk *clk)
-{
- if (likely(clk->ops && clk->ops->recalc)) {
- unsigned long flags;
-
- spin_lock_irqsave(&clock_lock, flags);
- clk->ops->recalc(clk);
- spin_unlock_irqrestore(&clock_lock, flags);
- }
-
- if (unlikely(clk->flags & CLK_RATE_PROPAGATES))
- propagate_rate(clk);
-}
-EXPORT_SYMBOL_GPL(clk_recalc_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- if (likely(clk->ops && clk->ops->round_rate)) {
- unsigned long flags, rounded;
-
- spin_lock_irqsave(&clock_lock, flags);
- rounded = clk->ops->round_rate(clk, rate);
- spin_unlock_irqrestore(&clock_lock, flags);
-
- return rounded;
- }
-
- return clk_get_rate(clk);
-}
-EXPORT_SYMBOL_GPL(clk_round_rate);
-
-/*
- * Returns a clock. Note that we first try to use device id on the bus
- * and clock name. If this fails, we try to use clock name only.
- */
-struct clk *clk_get(struct device *dev, const char *id)
-{
- struct clk *p, *clk = ERR_PTR(-ENOENT);
- int idno;
-
- if (dev == NULL || dev->bus != &platform_bus_type)
- idno = -1;
- else
- idno = to_platform_device(dev)->id;
-
- mutex_lock(&clock_list_sem);
- list_for_each_entry(p, &clock_list, node) {
- if (p->id == idno &&
- strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
- clk = p;
- goto found;
+ if (sh_mv.mv_clk_init) {
+ ret = sh_mv.mv_clk_init();
+ if (unlikely(ret)) {
+ pr_err("%s: machvec clock initialization failed.\n",
+ __func__);
+ return ret;
}
}
- list_for_each_entry(p, &clock_list, node) {
- if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
- clk = p;
- break;
- }
- }
-
-found:
- mutex_unlock(&clock_list_sem);
-
- return clk;
-}
-EXPORT_SYMBOL_GPL(clk_get);
-
-void clk_put(struct clk *clk)
-{
- if (clk && !IS_ERR(clk))
- module_put(clk->owner);
-}
-EXPORT_SYMBOL_GPL(clk_put);
-
-void __init __attribute__ ((weak))
-arch_init_clk_ops(struct clk_ops **ops, int type)
-{
-}
-
-void __init __attribute__ ((weak))
-arch_clk_init(void)
-{
-}
-
-static int show_clocks(char *buf, char **start, off_t off,
- int len, int *eof, void *data)
-{
- struct clk *clk;
- char *p = buf;
-
- list_for_each_entry_reverse(clk, &clock_list, node) {
- unsigned long rate = clk_get_rate(clk);
-
- /*
- * Don't bother listing dummy clocks with no ancestry
- * that only support enable and disable ops.
- */
- if (unlikely(!rate && !clk->parent))
- continue;
-
- p += sprintf(p, "%-12s\t: %ld.%02ldMHz\n", clk->name,
- rate / 1000000, (rate % 1000000) / 10000);
- }
-
- return p - buf;
-}
-
-int __init clk_init(void)
-{
- int i, ret = 0;
-
- BUG_ON(!master_clk.rate);
-
- for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
- struct clk *clk = onchip_clocks[i];
-
- arch_init_clk_ops(&clk->ops, i);
- ret |= clk_register(clk);
- }
-
- arch_clk_init();
-
/* Kick the child clocks.. */
- propagate_rate(&master_clk);
- propagate_rate(&bus_clk);
+ recalculate_root_clocks();
+
+ /* Enable the necessary init clocks */
+ clk_enable_init_clocks();
return ret;
}
-static int __init clk_proc_init(void)
-{
- struct proc_dir_entry *p;
- p = create_proc_read_entry("clocks", S_IRUSR, NULL,
- show_clocks, NULL);
- if (unlikely(!p))
- return -EINVAL;
- return 0;
-}
-subsys_initcall(clk_proc_init);
diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c
new file mode 100644
index 00000000000..4e332244ea7
--- /dev/null
+++ b/arch/sh/kernel/cpu/fpu.c
@@ -0,0 +1,86 @@
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <asm/processor.h>
+#include <asm/fpu.h>
+#include <asm/traps.h>
+
+int init_fpu(struct task_struct *tsk)
+{
+ if (tsk_used_math(tsk)) {
+ if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
+ unlazy_fpu(tsk, task_pt_regs(tsk));
+ return 0;
+ }
+
+ /*
+ * Memory allocation at the first usage of the FPU and other state.
+ */
+ if (!tsk->thread.xstate) {
+ tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
+ GFP_KERNEL);
+ if (!tsk->thread.xstate)
+ return -ENOMEM;
+ }
+
+ if (boot_cpu_data.flags & CPU_HAS_FPU) {
+ struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
+ memset(fp, 0, xstate_size);
+ fp->fpscr = FPSCR_INIT;
+ } else {
+ struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
+ memset(fp, 0, xstate_size);
+ fp->fpscr = FPSCR_INIT;
+ }
+
+ set_stopped_child_used_math(tsk);
+ return 0;
+}
+
+#ifdef CONFIG_SH_FPU
+void __fpu_state_restore(void)
+{
+ struct task_struct *tsk = current;
+
+ restore_fpu(tsk);
+
+ task_thread_info(tsk)->status |= TS_USEDFPU;
+ tsk->thread.fpu_counter++;
+}
+
+void fpu_state_restore(struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+
+ if (unlikely(!user_mode(regs))) {
+ printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
+ BUG();
+ return;
+ }
+
+ if (!tsk_used_math(tsk)) {
+ local_irq_enable();
+ /*
+ * does a slab alloc which can sleep
+ */
+ if (init_fpu(tsk)) {
+ /*
+ * ran out of memory!
+ */
+ do_group_exit(SIGKILL);
+ return;
+ }
+ local_irq_disable();
+ }
+
+ grab_fpu(regs);
+
+ __fpu_state_restore();
+}
+
+BUILD_TRAP_HANDLER(fpu_state_restore)
+{
+ TRAP_HANDLER_DECL;
+
+ fpu_state_restore(regs);
+}
+#endif /* CONFIG_SH_FPU */
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 75fb03d3567..0d7360d549c 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -3,7 +3,7 @@
*
* CPU init code
*
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2009 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -18,28 +18,38 @@
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/page.h>
-#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
#include <asm/elf.h>
#include <asm/io.h>
#include <asm/smp.h>
-#ifdef CONFIG_SUPERH32
-#include <asm/ubc.h>
+#include <asm/sh_bios.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_SH_FPU
+#define cpu_has_fpu 1
+#else
+#define cpu_has_fpu 0
+#endif
+
+#ifdef CONFIG_SH_DSP
+#define cpu_has_dsp 1
+#else
+#define cpu_has_dsp 0
#endif
/*
* Generic wrapper for command line arguments to disable on-chip
* peripherals (nofpu, nodsp, and so forth).
*/
-#define onchip_setup(x) \
-static int x##_disabled __initdata = 0; \
- \
-static int __init x##_setup(char *opts) \
-{ \
- x##_disabled = 1; \
- return 1; \
-} \
+#define onchip_setup(x) \
+static int x##_disabled = !cpu_has_##x; \
+ \
+static int x##_setup(char *opts) \
+{ \
+ x##_disabled = 1; \
+ return 1; \
+} \
__setup("no" __stringify(x), x##_setup);
onchip_setup(fpu);
@@ -49,29 +59,60 @@ onchip_setup(dsp);
#define CPUOPM 0xff2f0000
#define CPUOPM_RABD (1 << 5)
-static void __init speculative_execution_init(void)
+static void speculative_execution_init(void)
{
/* Clear RABD */
- ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
+ __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
/* Flush the update */
- (void)ctrl_inl(CPUOPM);
+ (void)__raw_readl(CPUOPM);
ctrl_barrier();
}
#else
#define speculative_execution_init() do { } while (0)
#endif
+#ifdef CONFIG_CPU_SH4A
+#define EXPMASK 0xff2f0004
+#define EXPMASK_RTEDS (1 << 0)
+#define EXPMASK_BRDSSLP (1 << 1)
+#define EXPMASK_MMCAW (1 << 4)
+
+static void expmask_init(void)
+{
+ unsigned long expmask = __raw_readl(EXPMASK);
+
+ /*
+ * Future proofing.
+ *
+ * Disable support for slottable sleep instruction, non-nop
+ * instructions in the rte delay slot, and associative writes to
+ * the memory-mapped cache array.
+ */
+ expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
+
+ __raw_writel(expmask, EXPMASK);
+ ctrl_barrier();
+}
+#else
+#define expmask_init() do { } while (0)
+#endif
+
+/* 2nd-level cache init */
+void __attribute__ ((weak)) l2_cache_init(void)
+{
+}
+
/*
* Generic first-level cache init
*/
#ifdef CONFIG_SUPERH32
-static void __uses_jump_to_uncached cache_init(void)
+static void cache_init(void)
{
unsigned long ccr, flags;
jump_to_uncached();
- ccr = ctrl_inl(CCR);
+ ccr = __raw_readl(SH_CCR);
/*
* At this point we don't know whether the cache is enabled or not - a
@@ -115,7 +156,7 @@ static void __uses_jump_to_uncached cache_init(void)
for (addr = addrstart;
addr < addrstart + waysize;
addr += current_cpu_data.dcache.linesz)
- ctrl_outl(0, addr);
+ __raw_writel(0, addr);
addrstart += current_cpu_data.dcache.way_incr;
} while (--ways);
@@ -146,7 +187,9 @@ static void __uses_jump_to_uncached cache_init(void)
flags &= ~CCR_CACHE_ENABLE;
#endif
- ctrl_outl(flags, CCR);
+ l2_cache_init();
+
+ __raw_writel(flags, SH_CCR);
back_to_cached();
}
#else
@@ -174,8 +217,20 @@ static void detect_cache_shape(void)
l2_cache_shape = -1; /* No S-cache */
}
+static void fpu_init(void)
+{
+ /* Disable the FPU */
+ if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
+ printk("FPU Disabled\n");
+ current_cpu_data.flags &= ~CPU_HAS_FPU;
+ }
+
+ disable_fpu();
+ clear_used_math();
+}
+
#ifdef CONFIG_SH_DSP
-static void __init release_dsp(void)
+static void release_dsp(void)
{
unsigned long sr;
@@ -189,7 +244,7 @@ static void __init release_dsp(void)
);
}
-static void __init dsp_init(void)
+static void dsp_init(void)
{
unsigned long sr;
@@ -211,34 +266,41 @@ static void __init dsp_init(void)
if (sr & SR_DSP)
current_cpu_data.flags |= CPU_HAS_DSP;
+ /* Disable the DSP */
+ if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
+ printk("DSP Disabled\n");
+ current_cpu_data.flags &= ~CPU_HAS_DSP;
+ }
+
/* Now that we've determined the DSP status, clear the DSP bit. */
release_dsp();
}
+#else
+static inline void dsp_init(void) { }
#endif /* CONFIG_SH_DSP */
/**
- * sh_cpu_init
+ * cpu_init
*
- * This is our initial entry point for each CPU, and is invoked on the boot
- * CPU prior to calling start_kernel(). For SMP, a combination of this and
- * start_secondary() will bring up each processor to a ready state prior
- * to hand forking the idle loop.
+ * This is our initial entry point for each CPU, and is invoked on the
+ * boot CPU prior to calling start_kernel(). For SMP, a combination of
+ * this and start_secondary() will bring up each processor to a ready
+ * state prior to hand forking the idle loop.
*
- * We do all of the basic processor init here, including setting up the
- * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is
- * hit (and subsequently platform_setup()) things like determining the
- * CPU subtype and initial configuration will all be done.
+ * We do all of the basic processor init here, including setting up
+ * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
+ * subsequently platform_setup()) things like determining the CPU
+ * subtype and initial configuration will all be done.
*
* Each processor family is still responsible for doing its own probing
- * and cache configuration in detect_cpu_and_cache_system().
+ * and cache configuration in cpu_probe().
*/
-
-asmlinkage void __init sh_cpu_init(void)
+asmlinkage void cpu_init(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
/* First, probe the CPU */
- detect_cpu_and_cache_system();
+ cpu_probe();
if (current_cpu_data.type == CPU_SH_NONE)
panic("Unknown CPU");
@@ -269,18 +331,8 @@ asmlinkage void __init sh_cpu_init(void)
detect_cache_shape();
}
- /* Disable the FPU */
- if (fpu_disabled) {
- printk("FPU Disabled\n");
- current_cpu_data.flags &= ~CPU_HAS_FPU;
- disable_fpu();
- }
-
- /* FPU initialization */
- if ((current_cpu_data.flags & CPU_HAS_FPU)) {
- clear_thread_flag(TIF_USEDFPU);
- clear_used_math();
- }
+ fpu_init();
+ dsp_init();
/*
* Initialize the per-CPU ASID cache very early, since the
@@ -288,28 +340,26 @@ asmlinkage void __init sh_cpu_init(void)
*/
current_cpu_data.asid_cache = NO_CONTEXT;
-#ifdef CONFIG_SH_DSP
- /* Probe for DSP */
- dsp_init();
+ current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
- /* Disable the DSP */
- if (dsp_disabled) {
- printk("DSP Disabled\n");
- current_cpu_data.flags &= ~CPU_HAS_DSP;
- release_dsp();
- }
-#endif
+ speculative_execution_init();
+ expmask_init();
- /*
- * Some brain-damaged loaders decided it would be a good idea to put
- * the UBC to sleep. This causes some issues when it comes to things
- * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So ..
- * we wake it up and hope that all is well.
- */
-#ifdef CONFIG_SUPERH32
- if (raw_smp_processor_id() == 0)
- ubc_wakeup();
-#endif
+ /* Do the rest of the boot processor setup */
+ if (raw_smp_processor_id() == 0) {
+ /* Save off the BIOS VBR, if there is one */
+ sh_bios_vbr_init();
- speculative_execution_init();
+ /*
+ * Setup VBR for boot CPU. Secondary CPUs do this through
+ * start_secondary().
+ */
+ per_cpu_trap_init();
+
+ /*
+ * Boot processor to setup the FP and extended state
+ * context info.
+ */
+ init_thread_xstate();
+ }
}
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 462a8f6dfee..f0c7025a67d 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,8 +1,6 @@
#
# Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
#
-obj-y += intc.o
-
obj-$(CONFIG_SUPERH32) += imask.o
obj-$(CONFIG_CPU_SH5) += intc-sh5.o
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index 301b505c427..e7f1745bd12 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -18,38 +18,16 @@
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/irq.h>
-#include <asm/system.h>
+#include <linux/bitmap.h>
#include <asm/irq.h>
/* Bitmap of IRQ masked */
-static unsigned long imask_mask = 0x7fff;
-static int interrupt_priority = 0;
-
-static void enable_imask_irq(unsigned int irq);
-static void disable_imask_irq(unsigned int irq);
-static void shutdown_imask_irq(unsigned int irq);
-static void mask_and_ack_imask(unsigned int);
-static void end_imask_irq(unsigned int irq);
-
#define IMASK_PRIORITY 15
-static unsigned int startup_imask_irq(unsigned int irq)
-{
- /* Nothing to do */
- return 0; /* never anything pending */
-}
+static DECLARE_BITMAP(imask_mask, IMASK_PRIORITY);
+static int interrupt_priority;
-static struct hw_interrupt_type imask_irq_type = {
- .typename = "SR.IMASK",
- .startup = startup_imask_irq,
- .shutdown = shutdown_imask_irq,
- .enable = enable_imask_irq,
- .disable = disable_imask_irq,
- .ack = mask_and_ack_imask,
- .end = end_imask_irq
-};
-
-void static inline set_interrupt_registers(int ip)
+static inline void set_interrupt_registers(int ip)
{
unsigned long __dummy;
@@ -72,42 +50,35 @@ void static inline set_interrupt_registers(int ip)
: "t");
}
-static void disable_imask_irq(unsigned int irq)
+static void mask_imask_irq(struct irq_data *data)
{
- clear_bit(irq, &imask_mask);
+ unsigned int irq = data->irq;
+
+ clear_bit(irq, imask_mask);
if (interrupt_priority < IMASK_PRIORITY - irq)
interrupt_priority = IMASK_PRIORITY - irq;
-
set_interrupt_registers(interrupt_priority);
}
-static void enable_imask_irq(unsigned int irq)
+static void unmask_imask_irq(struct irq_data *data)
{
- set_bit(irq, &imask_mask);
- interrupt_priority = IMASK_PRIORITY - ffz(imask_mask);
+ unsigned int irq = data->irq;
+ set_bit(irq, imask_mask);
+ interrupt_priority = IMASK_PRIORITY -
+ find_first_zero_bit(imask_mask, IMASK_PRIORITY);
set_interrupt_registers(interrupt_priority);
}
-static void mask_and_ack_imask(unsigned int irq)
-{
- disable_imask_irq(irq);
-}
-
-static void end_imask_irq(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
- enable_imask_irq(irq);
-}
-
-static void shutdown_imask_irq(unsigned int irq)
-{
- /* Nothing to do */
-}
+static struct irq_chip imask_irq_chip = {
+ .name = "SR.IMASK",
+ .irq_mask = mask_imask_irq,
+ .irq_unmask = unmask_imask_irq,
+ .irq_mask_ack = mask_imask_irq,
+};
void make_imask_irq(unsigned int irq)
{
- disable_irq_nosync(irq);
- irq_desc[irq].chip = &imask_irq_type;
- enable_irq(irq);
+ irq_set_chip_and_handler_name(irq, &imask_irq_chip, handle_level_irq,
+ "level");
}
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
index d6e0e2bdaad..9e056a3a0c7 100644
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -20,7 +20,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
-#include <asm/cpu/irq.h>
+#include <cpu/irq.h>
#include <asm/page.h>
/*
@@ -76,39 +76,11 @@ int intc_evt_to_irq[(0xE20/0x20)+1] = {
};
static unsigned long intc_virt;
-
-static unsigned int startup_intc_irq(unsigned int irq);
-static void shutdown_intc_irq(unsigned int irq);
-static void enable_intc_irq(unsigned int irq);
-static void disable_intc_irq(unsigned int irq);
-static void mask_and_ack_intc(unsigned int);
-static void end_intc_irq(unsigned int irq);
-
-static struct hw_interrupt_type intc_irq_type = {
- .typename = "INTC",
- .startup = startup_intc_irq,
- .shutdown = shutdown_intc_irq,
- .enable = enable_intc_irq,
- .disable = disable_intc_irq,
- .ack = mask_and_ack_intc,
- .end = end_intc_irq
-};
-
static int irlm; /* IRL mode */
-static unsigned int startup_intc_irq(unsigned int irq)
-{
- enable_intc_irq(irq);
- return 0; /* never anything pending */
-}
-
-static void shutdown_intc_irq(unsigned int irq)
-{
- disable_intc_irq(irq);
-}
-
-static void enable_intc_irq(unsigned int irq)
+static void enable_intc_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
@@ -123,11 +95,12 @@ static void enable_intc_irq(unsigned int irq)
bitmask = 1 << (irq - 32);
}
- ctrl_outl(bitmask, reg);
+ __raw_writel(bitmask, reg);
}
-static void disable_intc_irq(unsigned int irq)
+static void disable_intc_irq(struct irq_data *data)
{
+ unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
@@ -139,107 +112,77 @@ static void disable_intc_irq(unsigned int irq)
bitmask = 1 << (irq - 32);
}
- ctrl_outl(bitmask, reg);
-}
-
-static void mask_and_ack_intc(unsigned int irq)
-{
- disable_intc_irq(irq);
-}
-
-static void end_intc_irq(unsigned int irq)
-{
- enable_intc_irq(irq);
-}
-
-/* For future use, if we ever support IRLM=0) */
-void make_intc_irq(unsigned int irq)
-{
- disable_irq_nosync(irq);
- irq_desc[irq].chip = &intc_irq_type;
- disable_intc_irq(irq);
+ __raw_writel(bitmask, reg);
}
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
-static int IRQ_to_vectorN[NR_INTC_IRQS] = {
- 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */
- -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */
- 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */
- 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */
- 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */
- -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */
-
+static struct irq_chip intc_irq_type = {
+ .name = "INTC",
+ .irq_enable = enable_intc_irq,
+ .irq_disable = disable_intc_irq,
};
-int intc_irq_describe(char* p, int irq)
-{
- if (irq < NR_INTC_IRQS)
- return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20);
- else
- return 0;
-}
-#endif
-
void __init plat_irq_setup(void)
{
- unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
+ unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
unsigned long reg;
- unsigned long data;
int i;
- intc_virt = onchip_remap(INTC_BASE, 1024, "INTC");
+ intc_virt = (unsigned long)ioremap_nocache(INTC_BASE, 1024);
if (!intc_virt) {
panic("Unable to remap INTC\n");
}
/* Set default: per-line enable/disable, priority driven ack/eoi */
- for (i = 0; i < NR_INTC_IRQS; i++) {
- if (platform_int_priority[i] != NO_PRIORITY) {
- irq_desc[i].chip = &intc_irq_type;
- }
- }
+ for (i = 0; i < NR_INTC_IRQS; i++)
+ irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
/* Disable all interrupts and set all priorities to 0 to avoid trouble */
- ctrl_outl(-1, INTC_INTDSB_0);
- ctrl_outl(-1, INTC_INTDSB_1);
+ __raw_writel(-1, INTC_INTDSB_0);
+ __raw_writel(-1, INTC_INTDSB_1);
for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
- ctrl_outl( NO_PRIORITY, reg);
-
-
- /* Set IRLM */
- /* If all the priorities are set to 'no priority', then
- * assume we are using encoded mode.
- */
- irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \
- platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3];
-
- if (irlm == NO_PRIORITY) {
- /* IRLM = 0 */
- reg = INTC_ICR_CLEAR;
- i = IRQ_INTA;
- printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
- } else {
- /* IRLM = 1 */
- reg = INTC_ICR_SET;
- i = IRQ_IRL0;
- }
- ctrl_outl(INTC_ICR_IRLM, reg);
-
- /* Set interrupt priorities according to platform description */
- for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
- data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4);
- if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
- /* Upon the 7th, set Priority Register */
- ctrl_outl(data, reg);
- data = 0;
- reg += 8;
+ __raw_writel( NO_PRIORITY, reg);
+
+
+#ifdef CONFIG_SH_CAYMAN
+ {
+ unsigned long data;
+
+ /* Set IRLM */
+ /* If all the priorities are set to 'no priority', then
+ * assume we are using encoded mode.
+ */
+ irlm = platform_int_priority[IRQ_IRL0] +
+ platform_int_priority[IRQ_IRL1] +
+ platform_int_priority[IRQ_IRL2] +
+ platform_int_priority[IRQ_IRL3];
+ if (irlm == NO_PRIORITY) {
+ /* IRLM = 0 */
+ reg = INTC_ICR_CLEAR;
+ i = IRQ_INTA;
+ printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
+ } else {
+ /* IRLM = 1 */
+ reg = INTC_ICR_SET;
+ i = IRQ_IRL0;
+ }
+ __raw_writel(INTC_ICR_IRLM, reg);
+
+ /* Set interrupt priorities according to platform description */
+ for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
+ data |= platform_int_priority[i] <<
+ ((i % INTC_INTPRI_PPREG) * 4);
+ if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
+ /* Upon the 7th, set Priority Register */
+ __raw_writel(data, reg);
+ data = 0;
+ reg += 8;
+ }
}
}
+#endif
/*
* And now let interrupts come in.
diff --git a/arch/sh/kernel/cpu/irq/intc.c b/arch/sh/kernel/cpu/irq/intc.c
deleted file mode 100644
index 84806b2027f..00000000000
--- a/arch/sh/kernel/cpu/irq/intc.c
+++ /dev/null
@@ -1,608 +0,0 @@
-/*
- * Shared interrupt handling code for IPR and INTC2 types of IRQs.
- *
- * Copyright (C) 2007 Magnus Damm
- *
- * Based on intc2.c and ipr.c
- *
- * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
- * Copyright (C) 2000 Kazumoto Kojima
- * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
- * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
- * Copyright (C) 2005, 2006 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/bootmem.h>
-
-#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
- ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
- ((addr_e) << 16) | ((addr_d << 24)))
-
-#define _INTC_SHIFT(h) (h & 0x1f)
-#define _INTC_WIDTH(h) ((h >> 5) & 0xf)
-#define _INTC_FN(h) ((h >> 9) & 0xf)
-#define _INTC_MODE(h) ((h >> 13) & 0x7)
-#define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
-#define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
-
-struct intc_handle_int {
- unsigned int irq;
- unsigned long handle;
-};
-
-struct intc_desc_int {
- unsigned long *reg;
-#ifdef CONFIG_SMP
- unsigned long *smp;
-#endif
- unsigned int nr_reg;
- struct intc_handle_int *prio;
- unsigned int nr_prio;
- struct intc_handle_int *sense;
- unsigned int nr_sense;
- struct irq_chip chip;
-};
-
-#ifdef CONFIG_SMP
-#define IS_SMP(x) x.smp
-#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
-#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
-#else
-#define IS_SMP(x) 0
-#define INTC_REG(d, x, c) (d->reg[(x)])
-#define SMP_NR(d, x) 1
-#endif
-
-static unsigned int intc_prio_level[NR_IRQS]; /* for now */
-
-static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
-{
- struct irq_chip *chip = get_irq_chip(irq);
- return (void *)((char *)chip - offsetof(struct intc_desc_int, chip));
-}
-
-static inline unsigned int set_field(unsigned int value,
- unsigned int field_value,
- unsigned int handle)
-{
- unsigned int width = _INTC_WIDTH(handle);
- unsigned int shift = _INTC_SHIFT(handle);
-
- value &= ~(((1 << width) - 1) << shift);
- value |= field_value << shift;
- return value;
-}
-
-static void write_8(unsigned long addr, unsigned long h, unsigned long data)
-{
- ctrl_outb(set_field(0, data, h), addr);
-}
-
-static void write_16(unsigned long addr, unsigned long h, unsigned long data)
-{
- ctrl_outw(set_field(0, data, h), addr);
-}
-
-static void write_32(unsigned long addr, unsigned long h, unsigned long data)
-{
- ctrl_outl(set_field(0, data, h), addr);
-}
-
-static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
-{
- ctrl_outb(set_field(ctrl_inb(addr), data, h), addr);
-}
-
-static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
-{
- ctrl_outw(set_field(ctrl_inw(addr), data, h), addr);
-}
-
-static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
-{
- ctrl_outl(set_field(ctrl_inl(addr), data, h), addr);
-}
-
-enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
-
-static void (*intc_reg_fns[])(unsigned long addr,
- unsigned long h,
- unsigned long data) = {
- [REG_FN_WRITE_BASE + 0] = write_8,
- [REG_FN_WRITE_BASE + 1] = write_16,
- [REG_FN_WRITE_BASE + 3] = write_32,
- [REG_FN_MODIFY_BASE + 0] = modify_8,
- [REG_FN_MODIFY_BASE + 1] = modify_16,
- [REG_FN_MODIFY_BASE + 3] = modify_32,
-};
-
-enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
- MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
- MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
- MODE_PRIO_REG, /* Priority value written to enable interrupt */
- MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
-};
-
-static void intc_mode_field(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq)
-{
- fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
-}
-
-static void intc_mode_zero(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq)
-{
- fn(addr, handle, 0);
-}
-
-static void intc_mode_prio(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq)
-{
- fn(addr, handle, intc_prio_level[irq]);
-}
-
-static void (*intc_enable_fns[])(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq) = {
- [MODE_ENABLE_REG] = intc_mode_field,
- [MODE_MASK_REG] = intc_mode_zero,
- [MODE_DUAL_REG] = intc_mode_field,
- [MODE_PRIO_REG] = intc_mode_prio,
- [MODE_PCLR_REG] = intc_mode_prio,
-};
-
-static void (*intc_disable_fns[])(unsigned long addr,
- unsigned long handle,
- void (*fn)(unsigned long,
- unsigned long,
- unsigned long),
- unsigned int irq) = {
- [MODE_ENABLE_REG] = intc_mode_zero,
- [MODE_MASK_REG] = intc_mode_field,
- [MODE_DUAL_REG] = intc_mode_field,
- [MODE_PRIO_REG] = intc_mode_zero,
- [MODE_PCLR_REG] = intc_mode_field,
-};
-
-static inline void _intc_enable(unsigned int irq, unsigned long handle)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- unsigned long addr;
- unsigned int cpu;
-
- for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
- addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
- intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
- [_INTC_FN(handle)], irq);
- }
-}
-
-static void intc_enable(unsigned int irq)
-{
- _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
-}
-
-static void intc_disable(unsigned int irq)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- unsigned long handle = (unsigned long) get_irq_chip_data(irq);
- unsigned long addr;
- unsigned int cpu;
-
- for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
- addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
- intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
- [_INTC_FN(handle)], irq);
- }
-}
-
-static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
- unsigned int nr_hp,
- unsigned int irq)
-{
- int i;
-
- /* this doesn't scale well, but...
- *
- * this function should only be used for cerain uncommon
- * operations such as intc_set_priority() and intc_set_sense()
- * and in those rare cases performance doesn't matter that much.
- * keeping the memory footprint low is more important.
- *
- * one rather simple way to speed this up and still keep the
- * memory footprint down is to make sure the array is sorted
- * and then perform a bisect to lookup the irq.
- */
-
- for (i = 0; i < nr_hp; i++) {
- if ((hp + i)->irq != irq)
- continue;
-
- return hp + i;
- }
-
- return NULL;
-}
-
-int intc_set_priority(unsigned int irq, unsigned int prio)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- struct intc_handle_int *ihp;
-
- if (!intc_prio_level[irq] || prio <= 1)
- return -EINVAL;
-
- ihp = intc_find_irq(d->prio, d->nr_prio, irq);
- if (ihp) {
- if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
- return -EINVAL;
-
- intc_prio_level[irq] = prio;
-
- /*
- * only set secondary masking method directly
- * primary masking method is using intc_prio_level[irq]
- * priority level will be set during next enable()
- */
-
- if (_INTC_FN(ihp->handle) != REG_FN_ERR)
- _intc_enable(irq, ihp->handle);
- }
- return 0;
-}
-
-#define VALID(x) (x | 0x80)
-
-static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
- [IRQ_TYPE_EDGE_FALLING] = VALID(0),
- [IRQ_TYPE_EDGE_RISING] = VALID(1),
- [IRQ_TYPE_LEVEL_LOW] = VALID(2),
- [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
-};
-
-static int intc_set_sense(unsigned int irq, unsigned int type)
-{
- struct intc_desc_int *d = get_intc_desc(irq);
- unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
- struct intc_handle_int *ihp;
- unsigned long addr;
-
- if (!value)
- return -EINVAL;
-
- ihp = intc_find_irq(d->sense, d->nr_sense, irq);
- if (ihp) {
- addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
- intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
- }
- return 0;
-}
-
-static unsigned int __init intc_get_reg(struct intc_desc_int *d,
- unsigned long address)
-{
- unsigned int k;
-
- for (k = 0; k < d->nr_reg; k++) {
- if (d->reg[k] == address)
- return k;
- }
-
- BUG();
- return 0;
-}
-
-static intc_enum __init intc_grp_id(struct intc_desc *desc,
- intc_enum enum_id)
-{
- struct intc_group *g = desc->groups;
- unsigned int i, j;
-
- for (i = 0; g && enum_id && i < desc->nr_groups; i++) {
- g = desc->groups + i;
-
- for (j = 0; g->enum_ids[j]; j++) {
- if (g->enum_ids[j] != enum_id)
- continue;
-
- return g->enum_id;
- }
- }
-
- return 0;
-}
-
-static unsigned int __init intc_mask_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id, int do_grps)
-{
- struct intc_mask_reg *mr = desc->mask_regs;
- unsigned int i, j, fn, mode;
- unsigned long reg_e, reg_d;
-
- for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) {
- mr = desc->mask_regs + i;
-
- for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
- if (mr->enum_ids[j] != enum_id)
- continue;
-
- if (mr->set_reg && mr->clr_reg) {
- fn = REG_FN_WRITE_BASE;
- mode = MODE_DUAL_REG;
- reg_e = mr->clr_reg;
- reg_d = mr->set_reg;
- } else {
- fn = REG_FN_MODIFY_BASE;
- if (mr->set_reg) {
- mode = MODE_ENABLE_REG;
- reg_e = mr->set_reg;
- reg_d = mr->set_reg;
- } else {
- mode = MODE_MASK_REG;
- reg_e = mr->clr_reg;
- reg_d = mr->clr_reg;
- }
- }
-
- fn += (mr->reg_width >> 3) - 1;
- return _INTC_MK(fn, mode,
- intc_get_reg(d, reg_e),
- intc_get_reg(d, reg_d),
- 1,
- (mr->reg_width - 1) - j);
- }
- }
-
- if (do_grps)
- return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
-
- return 0;
-}
-
-static unsigned int __init intc_prio_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id, int do_grps)
-{
- struct intc_prio_reg *pr = desc->prio_regs;
- unsigned int i, j, fn, mode, bit;
- unsigned long reg_e, reg_d;
-
- for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) {
- pr = desc->prio_regs + i;
-
- for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) {
- if (pr->enum_ids[j] != enum_id)
- continue;
-
- if (pr->set_reg && pr->clr_reg) {
- fn = REG_FN_WRITE_BASE;
- mode = MODE_PCLR_REG;
- reg_e = pr->set_reg;
- reg_d = pr->clr_reg;
- } else {
- fn = REG_FN_MODIFY_BASE;
- mode = MODE_PRIO_REG;
- if (!pr->set_reg)
- BUG();
- reg_e = pr->set_reg;
- reg_d = pr->set_reg;
- }
-
- fn += (pr->reg_width >> 3) - 1;
- bit = pr->reg_width - ((j + 1) * pr->field_width);
-
- BUG_ON(bit < 0);
-
- return _INTC_MK(fn, mode,
- intc_get_reg(d, reg_e),
- intc_get_reg(d, reg_d),
- pr->field_width, bit);
- }
- }
-
- if (do_grps)
- return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
-
- return 0;
-}
-
-static unsigned int __init intc_sense_data(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id)
-{
- struct intc_sense_reg *sr = desc->sense_regs;
- unsigned int i, j, fn, bit;
-
- for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) {
- sr = desc->sense_regs + i;
-
- for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
- if (sr->enum_ids[j] != enum_id)
- continue;
-
- fn = REG_FN_MODIFY_BASE;
- fn += (sr->reg_width >> 3) - 1;
- bit = sr->reg_width - ((j + 1) * sr->field_width);
-
- BUG_ON(bit < 0);
-
- return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
- 0, sr->field_width, bit);
- }
- }
-
- return 0;
-}
-
-static void __init intc_register_irq(struct intc_desc *desc,
- struct intc_desc_int *d,
- intc_enum enum_id,
- unsigned int irq)
-{
- struct intc_handle_int *hp;
- unsigned int data[2], primary;
-
- /* Prefer single interrupt source bitmap over other combinations:
- * 1. bitmap, single interrupt source
- * 2. priority, single interrupt source
- * 3. bitmap, multiple interrupt sources (groups)
- * 4. priority, multiple interrupt sources (groups)
- */
-
- data[0] = intc_mask_data(desc, d, enum_id, 0);
- data[1] = intc_prio_data(desc, d, enum_id, 0);
-
- primary = 0;
- if (!data[0] && data[1])
- primary = 1;
-
- data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
- data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
-
- if (!data[primary])
- primary ^= 1;
-
- BUG_ON(!data[primary]); /* must have primary masking method */
-
- disable_irq_nosync(irq);
- set_irq_chip_and_handler_name(irq, &d->chip,
- handle_level_irq, "level");
- set_irq_chip_data(irq, (void *)data[primary]);
-
- /* set priority level
- * - this needs to be at least 2 for 5-bit priorities on 7780
- */
- intc_prio_level[irq] = 2;
-
- /* enable secondary masking method if present */
- if (data[!primary])
- _intc_enable(irq, data[!primary]);
-
- /* add irq to d->prio list if priority is available */
- if (data[1]) {
- hp = d->prio + d->nr_prio;
- hp->irq = irq;
- hp->handle = data[1];
-
- if (primary) {
- /*
- * only secondary priority should access registers, so
- * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
- */
-
- hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
- hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
- }
- d->nr_prio++;
- }
-
- /* add irq to d->sense list if sense is available */
- data[0] = intc_sense_data(desc, d, enum_id);
- if (data[0]) {
- (d->sense + d->nr_sense)->irq = irq;
- (d->sense + d->nr_sense)->handle = data[0];
- d->nr_sense++;
- }
-
- /* irq should be disabled by default */
- d->chip.mask(irq);
-}
-
-static unsigned int __init save_reg(struct intc_desc_int *d,
- unsigned int cnt,
- unsigned long value,
- unsigned int smp)
-{
- if (value) {
- d->reg[cnt] = value;
-#ifdef CONFIG_SMP
- d->smp[cnt] = smp;
-#endif
- return 1;
- }
-
- return 0;
-}
-
-
-void __init register_intc_controller(struct intc_desc *desc)
-{
- unsigned int i, k, smp;
- struct intc_desc_int *d;
-
- d = alloc_bootmem(sizeof(*d));
-
- d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0;
- d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0;
- d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0;
-
- d->reg = alloc_bootmem(d->nr_reg * sizeof(*d->reg));
-#ifdef CONFIG_SMP
- d->smp = alloc_bootmem(d->nr_reg * sizeof(*d->smp));
-#endif
- k = 0;
-
- if (desc->mask_regs) {
- for (i = 0; i < desc->nr_mask_regs; i++) {
- smp = IS_SMP(desc->mask_regs[i]);
- k += save_reg(d, k, desc->mask_regs[i].set_reg, smp);
- k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp);
- }
- }
-
- if (desc->prio_regs) {
- d->prio = alloc_bootmem(desc->nr_vectors * sizeof(*d->prio));
-
- for (i = 0; i < desc->nr_prio_regs; i++) {
- smp = IS_SMP(desc->prio_regs[i]);
- k += save_reg(d, k, desc->prio_regs[i].set_reg, smp);
- k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp);
- }
- }
-
- if (desc->sense_regs) {
- d->sense = alloc_bootmem(desc->nr_vectors * sizeof(*d->sense));
-
- for (i = 0; i < desc->nr_sense_regs; i++) {
- k += save_reg(d, k, desc->sense_regs[i].reg, 0);
- }
- }
-
- BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
-
- d->chip.name = desc->name;
- d->chip.mask = intc_disable;
- d->chip.unmask = intc_enable;
- d->chip.mask_ack = intc_disable;
- d->chip.set_type = intc_set_sense;
-
- for (i = 0; i < desc->nr_vectors; i++) {
- struct intc_vect *vect = desc->vectors + i;
-
- intc_register_irq(desc, d, vect->enum_id, evt2irq(vect->vect));
- }
-}
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 56ea7b269b5..5de6dff5c21 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -17,31 +17,34 @@
* for more details.
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
+#include <linux/topology.h>
-static inline struct ipr_desc *get_ipr_desc(unsigned int irq)
+static inline struct ipr_desc *get_ipr_desc(struct irq_data *data)
{
- struct irq_chip *chip = get_irq_chip(irq);
- return (void *)((char *)chip - offsetof(struct ipr_desc, chip));
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ return container_of(chip, struct ipr_desc, chip);
}
-static void disable_ipr_irq(unsigned int irq)
+static void disable_ipr_irq(struct irq_data *data)
{
- struct ipr_data *p = get_irq_chip_data(irq);
- unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
+ struct ipr_data *p = irq_data_get_irq_chip_data(data);
+ unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
/* Set the priority in IPR to 0 */
- ctrl_outw(ctrl_inw(addr) & (0xffff ^ (0xf << p->shift)), addr);
+ __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
+ (void)__raw_readw(addr); /* Read back to flush write posting */
}
-static void enable_ipr_irq(unsigned int irq)
+static void enable_ipr_irq(struct irq_data *data)
{
- struct ipr_data *p = get_irq_chip_data(irq);
- unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx];
+ struct ipr_data *p = irq_data_get_irq_chip_data(data);
+ unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
/* Set priority in IPR back to original value */
- ctrl_outw(ctrl_inw(addr) | (p->priority << p->shift), addr);
+ __raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
}
/*
@@ -53,21 +56,28 @@ void register_ipr_controller(struct ipr_desc *desc)
{
int i;
- desc->chip.mask = disable_ipr_irq;
- desc->chip.unmask = enable_ipr_irq;
- desc->chip.mask_ack = disable_ipr_irq;
+ desc->chip.irq_mask = disable_ipr_irq;
+ desc->chip.irq_unmask = enable_ipr_irq;
for (i = 0; i < desc->nr_irqs; i++) {
struct ipr_data *p = desc->ipr_data + i;
+ int res;
BUG_ON(p->ipr_idx >= desc->nr_offsets);
BUG_ON(!desc->ipr_offsets[p->ipr_idx]);
+ res = irq_alloc_desc_at(p->irq, numa_node_id());
+ if (unlikely(res != p->irq && res != -EEXIST)) {
+ printk(KERN_INFO "can not get irq_desc for %d\n",
+ p->irq);
+ continue;
+ }
+
disable_irq_nosync(p->irq);
- set_irq_chip_and_handler_name(p->irq, &desc->chip,
- handle_level_irq, "level");
- set_irq_chip_data(p->irq, p);
- disable_ipr_irq(p->irq);
+ irq_set_chip_and_handler_name(p->irq, &desc->chip,
+ handle_level_irq, "level");
+ irq_set_chip_data(p->irq, p);
+ disable_ipr_irq(irq_get_irq_data(p->irq));
}
}
EXPORT_SYMBOL(register_ipr_controller);
diff --git a/arch/sh/kernel/cpu/pfc.c b/arch/sh/kernel/cpu/pfc.c
new file mode 100644
index 00000000000..d766564ef7c
--- /dev/null
+++ b/arch/sh/kernel/cpu/pfc.c
@@ -0,0 +1,33 @@
+/*
+ * SH Pin Function Control Initialization
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <cpu/pfc.h>
+
+static struct platform_device sh_pfc_device = {
+ .id = -1,
+};
+
+int __init sh_pfc_register(const char *name,
+ struct resource *resource, u32 num_resources)
+{
+ sh_pfc_device.name = name;
+ sh_pfc_device.num_resources = num_resources;
+ sh_pfc_device.resource = resource;
+
+ return platform_device_register(&sh_pfc_device);
+}
diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
new file mode 100644
index 00000000000..9e6624c9108
--- /dev/null
+++ b/arch/sh/kernel/cpu/proc.c
@@ -0,0 +1,150 @@
+#include <linux/seq_file.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/machvec.h>
+#include <asm/processor.h>
+
+static const char *cpu_name[] = {
+ [CPU_SH7201] = "SH7201",
+ [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
+ [CPU_SH7264] = "SH7264", [CPU_SH7269] = "SH7269",
+ [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
+ [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
+ [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
+ [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
+ [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
+ [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
+ [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
+ [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
+ [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
+ [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
+ [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
+ [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
+ [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
+ [CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
+ [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
+ [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
+ [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
+ [CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
+ [CPU_SH7372] = "SH7372", [CPU_SH7734] = "SH7734",
+ [CPU_SH_NONE] = "Unknown"
+};
+
+const char *get_cpu_subtype(struct sh_cpuinfo *c)
+{
+ return cpu_name[c->type];
+}
+EXPORT_SYMBOL(get_cpu_subtype);
+
+#ifdef CONFIG_PROC_FS
+/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
+static const char *cpu_flags[] = {
+ "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
+ "ptea", "llsc", "l2", "op32", "pteaex", NULL
+};
+
+static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
+{
+ unsigned long i;
+
+ seq_printf(m, "cpu flags\t:");
+
+ if (!c->flags) {
+ seq_printf(m, " %s\n", cpu_flags[0]);
+ return;
+ }
+
+ for (i = 0; cpu_flags[i]; i++)
+ if ((c->flags & (1 << i)))
+ seq_printf(m, " %s", cpu_flags[i+1]);
+
+ seq_printf(m, "\n");
+}
+
+static void show_cacheinfo(struct seq_file *m, const char *type,
+ struct cache_info info)
+{
+ unsigned int cache_size;
+
+ cache_size = info.ways * info.sets * info.linesz;
+
+ seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
+ type, cache_size >> 10, info.ways);
+}
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ struct sh_cpuinfo *c = v;
+ unsigned int cpu = c - cpu_data;
+
+ if (!cpu_online(cpu))
+ return 0;
+
+ if (cpu == 0)
+ seq_printf(m, "machine\t\t: %s\n", get_system_type());
+ else
+ seq_printf(m, "\n");
+
+ seq_printf(m, "processor\t: %d\n", cpu);
+ seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
+ seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
+ if (c->cut_major == -1)
+ seq_printf(m, "cut\t\t: unknown\n");
+ else if (c->cut_minor == -1)
+ seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
+ else
+ seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
+
+ show_cpuflags(m, c);
+
+ seq_printf(m, "cache type\t: ");
+
+ /*
+ * Check for what type of cache we have, we support both the
+ * unified cache on the SH-2 and SH-3, as well as the harvard
+ * style cache on the SH-4.
+ */
+ if (c->icache.flags & SH_CACHE_COMBINED) {
+ seq_printf(m, "unified\n");
+ show_cacheinfo(m, "cache", c->icache);
+ } else {
+ seq_printf(m, "split (harvard)\n");
+ show_cacheinfo(m, "icache", c->icache);
+ show_cacheinfo(m, "dcache", c->dcache);
+ }
+
+ /* Optional secondary cache */
+ if (c->flags & CPU_HAS_L2_CACHE)
+ show_cacheinfo(m, "scache", c->scache);
+
+ seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits);
+
+ seq_printf(m, "bogomips\t: %lu.%02lu\n",
+ c->loops_per_jiffy/(500000/HZ),
+ (c->loops_per_jiffy/(5000/HZ)) % 100);
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
index d2c15791799..e80252ae5bc 100644
--- a/arch/sh/kernel/cpu/sh2/clock-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
@@ -14,68 +14,64 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/io.h>
#include <asm/clock.h>
#include <asm/freq.h>
-#include <asm/io.h>
+#include <asm/processor.h>
static const int pll1rate[] = {1,2};
static const int pfc_divisors[] = {1,2,0,4};
-
-#if (CONFIG_SH_CLK_MD == 1) || (CONFIG_SH_CLK_MD == 2)
-#define PLL2 (4)
-#elif (CONFIG_SH_CLK_MD == 5) || (CONFIG_SH_CLK_MD == 6)
-#define PLL2 (2)
-#else
-#error "Illigal Clock Mode!"
-#endif
+static unsigned int pll2_mult;
static void master_clk_init(struct clk *clk)
{
- clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+ clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
}
-static struct clk_ops sh7619_master_clk_ops = {
+static struct sh_clk_ops sh7619_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
}
-static struct clk_ops sh7619_module_clk_ops = {
+static struct sh_clk_ops sh7619_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+ return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
}
-static struct clk_ops sh7619_bus_clk_ops = {
+static struct sh_clk_ops sh7619_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
-{
- clk->rate = clk->parent->rate;
-}
-
-static struct clk_ops sh7619_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
+static struct sh_clk_ops sh7619_cpu_clk_ops = {
+ .recalc = followparent_recalc,
};
-static struct clk_ops *sh7619_clk_ops[] = {
+static struct sh_clk_ops *sh7619_clk_ops[] = {
&sh7619_master_clk_ops,
&sh7619_module_clk_ops,
&sh7619_bus_clk_ops,
&sh7619_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
+ if (test_mode_pin(MODE_PIN2 | MODE_PIN0) ||
+ test_mode_pin(MODE_PIN2 | MODE_PIN1))
+ pll2_mult = 2;
+ else if (test_mode_pin(MODE_PIN0) || test_mode_pin(MODE_PIN1))
+ pll2_mult = 4;
+
+ BUG_ON(!pll2_mult);
+
if (idx < ARRAY_SIZE(sh7619_clk_ops))
*ops = sh7619_clk_ops[idx];
}
-
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
index 0fc89069d8c..c8a4331d9b8 100644
--- a/arch/sh/kernel/cpu/sh2/entry.S
+++ b/arch/sh/kernel/cpu/sh2/entry.S
@@ -3,7 +3,7 @@
*
* The SH-2 exception entry
*
- * Copyright (C) 2005,2006 Yoshinori Sato
+ * Copyright (C) 2005-2008 Yoshinori Sato
* Copyright (C) 2005 AXE,Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -14,7 +14,7 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
-#include <asm/cpu/mmu_context.h>
+#include <cpu/mmu_context.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/page.h>
@@ -36,43 +36,41 @@ OFF_TRA = (16*4+6*4)
#include <asm/entry-macros.S>
ENTRY(exception_handler)
- ! already saved r0/r1
+ ! stack
+ ! r0 <- point sp
+ ! r1
+ ! pc
+ ! sr
+ ! r0 = temporary
+ ! r1 = vector (pseudo EXPEVT / INTEVT / TRA)
mov.l r2,@-sp
mov.l r3,@-sp
- mov r0,r1
cli
mov.l $cpu_mode,r2
mov.l @r2,r0
mov.l @(5*4,r15),r3 ! previous SR
- shll2 r3 ! set "S" flag
- rotl r0 ! T <- "S" flag
- rotl r0 ! "S" flag is LSB
- rotcr r3 ! T -> r3:b30
- shlr r3
- shlr r0
- bt/s 1f
- mov.l r3,@(5*4,r15) ! copy cpu mode to SR
+ or r0,r3 ! set MD
+ tst r0,r0
+ bf/s 1f ! previous mode check
+ mov.l r3,@(5*4,r15) ! update SR
! switch to kernel mode
- mov #1,r0
- rotr r0
- rotr r0
+ mov.l __md_bit,r0
mov.l r0,@r2 ! enter kernel mode
mov.l $current_thread_info,r2
mov.l @r2,r2
- mov #0x20,r0
+ mov #(THREAD_SIZE >> 8),r0
shll8 r0
add r2,r0
mov r15,r2 ! r2 = user stack top
mov r0,r15 ! switch kernel stack
- add #-4,r15 ! dummy
mov.l r1,@-r15 ! TRA
sts.l macl, @-r15
sts.l mach, @-r15
stc.l gbr, @-r15
- mov.l @(4*4,r2),r0
- mov.l @(5*4,r2),r1
- mov.l r1,@-r15 ! original SR
+ mov.l @(5*4,r2),r0
+ mov.l r0,@-r15 ! original SR
sts.l pr,@-r15
+ mov.l @(4*4,r2),r0
mov.l r0,@-r15 ! original PC
mov r2,r3
add #(4+2)*4,r3 ! rewind r0 - r3 + exception frame
@@ -88,14 +86,15 @@ ENTRY(exception_handler)
mov.l r6,@-r15
mov.l r5,@-r15
mov.l r4,@-r15
+ mov r1,r9 ! save TRA
mov r2,r8 ! copy user -> kernel stack
- mov.l @r8+,r3
+ mov.l @(0,r8),r3
mov.l r3,@-r15
- mov.l @r8+,r2
+ mov.l @(4,r8),r2
mov.l r2,@-r15
- mov.l @r8+,r1
+ mov.l @(12,r8),r1
mov.l r1,@-r15
- mov.l @r8+,r0
+ mov.l @(8,r8),r0
bra 2f
mov.l r0,@-r15
1:
@@ -107,10 +106,11 @@ ENTRY(exception_handler)
mov.l r0,@-r15
mov.l @r2+,r0 ! old R2
mov.l r0,@-r15
- mov.l @r2+,r0 ! old R1
- mov.l r0,@-r15
- mov.l @r2+,r0 ! old R0
+ mov.l @(4,r2),r0 ! old R1
mov.l r0,@-r15
+ mov.l @r2,r0 ! old R0
+ mov.l r0,@-r15
+ add #8,r2
mov.l @r2+,r3 ! old PC
mov.l @r2+,r0 ! old SR
add #-4,r2 ! exception frame stub (sr)
@@ -135,14 +135,12 @@ ENTRY(exception_handler)
mov.l r6,@-r2
mov.l r5,@-r2
mov.l r4,@-r2
+ mov r1,r9
mov.l @(OFF_R0,r15),r0
mov.l @(OFF_R1,r15),r1
mov.l @(OFF_R2,r15),r2
mov.l @(OFF_R3,r15),r3
2:
- mov #OFF_TRA,r8
- add r15,r8
- mov.l @r8,r9
mov #64,r8
cmp/hs r8,r9
bt interrupt_entry ! vec >= 64 is interrupt
@@ -150,26 +148,14 @@ ENTRY(exception_handler)
cmp/hs r8,r9
bt trap_entry ! 64 > vec >= 32 is trap
-#if defined(CONFIG_SH_FPU)
- mov #13,r8
- cmp/eq r8,r9
- bt 10f ! fpu
- nop
-#endif
-
mov.l 4f,r8
mov r9,r4
shll2 r9
add r9,r8
- mov.l @r8,r8
- mov #0,r9
- cmp/eq r9,r8
+ mov.l @r8,r8 ! exception handler address
+ tst r8,r8
bf 3f
mov.l 8f,r8 ! unhandled exception
-#if defined(CONFIG_SH_FPU)
-10:
- mov.l 9f, r8 ! unhandled exception
-#endif
3:
mov.l 5f,r10
jmp @r8
@@ -188,10 +174,7 @@ interrupt_entry:
5: .long ret_from_exception
6: .long ret_from_irq
7: .long do_IRQ
-8: .long do_exception_error
-#ifdef CONFIG_SH_FPU
-9: .long fpu_error_trap_handler
-#endif
+8: .long exception_error
trap_entry:
mov #0x30,r8
@@ -200,24 +183,9 @@ trap_entry:
add #-0x10,r9 ! convert SH2 to SH3/4 ABI
1:
shll2 r9 ! TRA
- mov #OFF_TRA,r8
- add r15,r8
- mov.l r9,@r8
- mov r9,r8
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 2f, r9
- jsr @r9
- nop
-#endif
- sti
- bra system_call
- nop
+ bra system_call ! jump common systemcall entry
+ mov r9,r8
- .align 2
-#ifdef CONFIG_TRACE_IRQFLAGS
-2: .long trace_hardirqs_on
-#endif
-
#if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */
ENTRY(sh_bios_handler)
@@ -240,7 +208,7 @@ ENTRY(sh_bios_handler)
mov.l @r2,r2
stc sr,r3
mov.l r2,@r0
- mov.l r3,@r0
+ mov.l r3,@(4,r0)
mov.l r1,@(8,r0)
mov.l @r15+, r0
mov.l @r15+, r1
@@ -259,8 +227,9 @@ ENTRY(sh_bios_handler)
mov.l @r15+, r14
add #8,r15
lds.l @r15+, pr
+ mov.l @r15+,r15
rte
- mov.l @r15+,r15
+ nop
.align 2
1: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
@@ -272,22 +241,30 @@ ENTRY(address_error_trap_handler)
mov.l 1f,r0
jmp @r0
mov #0,r5 ! writeaccess is unknown
- .align 2
+ .align 2
1: .long do_address_error
restore_all:
- cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 1f, r0
- jsr @r0
- nop
-#endif
+ stc sr,r0
+ or #0xf0,r0
+ ldc r0,sr ! all interrupt block (same BL = 1)
+ ! restore special register
+ ! overlap exception frame
+ mov r15,r0
+ add #17*4,r0
+ lds.l @r0+,pr
+ add #4,r0
+ ldc.l @r0+,gbr
+ lds.l @r0+,mach
+ lds.l @r0+,macl
mov r15,r0
mov.l $cpu_mode,r2
mov #OFF_SR,r3
mov.l @(r0,r3),r1
- mov.l r1,@r2
+ mov.l __md_bit,r3
+ and r1,r3 ! copy MD bit
+ mov.l r3,@r2
shll2 r1 ! clear MD bit
shlr2 r1
mov.l @(OFF_SP,r0),r2
@@ -297,12 +274,6 @@ restore_all:
mov #OFF_PC,r3
mov.l @(r0,r3),r1
mov.l r1,@r2 ! set pc
- add #4*16+4,r0
- lds.l @r0+,pr
- add #4,r0 ! skip sr
- ldc.l @r0+,gbr
- lds.l @r0+,mach
- lds.l @r0+,macl
get_current_thread_info r0, r1
mov.l $current_thread_info,r1
mov.l r0,@r1
@@ -326,9 +297,8 @@ restore_all:
nop
.align 2
-#ifdef CONFIG_TRACE_IRQFLAGS
-1: .long trace_hardirqs_off
-#endif
+__md_bit:
+ .long 0x40000000
$current_thread_info:
.long __current_thread_info
$cpu_mode:
diff --git a/arch/sh/kernel/cpu/sh2/ex.S b/arch/sh/kernel/cpu/sh2/ex.S
index 6d285af7846..85b0bf81fc1 100644
--- a/arch/sh/kernel/cpu/sh2/ex.S
+++ b/arch/sh/kernel/cpu/sh2/ex.S
@@ -18,16 +18,17 @@
exception_entry:
no = 0
.rept 256
- mov.l r0,@-sp
- mov #no,r0
+ mov.l r1,@-sp
bra exception_trampoline
- and #0xff,r0
+ mov #no,r1
no = no + 1
.endr
exception_trampoline:
- mov.l r1,@-sp
- mov.l $exception_handler,r1
- jmp @r1
+ mov.l r0,@-sp
+ mov.l $exception_handler,r0
+ extu.b r1,r1
+ jmp @r0
+ extu.w r1,r1
.align 2
$exception_entry:
@@ -41,6 +42,6 @@ $exception_handler:
ENTRY(vbr_base)
vector = 0
.rept 256
- .long exception_entry + vector * 8
+ .long exception_entry + vector * 6
vector = vector + 1
.endr
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index 5916d9096b9..6c687ae812e 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -13,7 +13,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-int __init detect_cpu_and_cache_system(void)
+void cpu_probe(void)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
boot_cpu_data.type = CPU_SH7619;
@@ -29,7 +29,5 @@ int __init detect_cpu_and_cache_system(void)
*/
boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
boot_cpu_data.icache = boot_cpu_data.dcache;
-
- return 0;
+ boot_cpu_data.family = CPU_FAMILY_SH2;
}
-
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index cc530f4d84d..58c19adae90 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -2,6 +2,7 @@
* SH7619 Setup
*
* Copyright (C) 2006 Yoshinori Sato
+ * Copyright (C) 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -11,6 +12,9 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_eth.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
enum {
UNUSED = 0,
@@ -18,15 +22,10 @@ enum {
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
WDT, EDMAC, CMT0, CMT1,
- SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
- SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
- SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
+ SCIF0, SCIF1, SCIF2,
HIF_HIFI, HIF_HIFBI,
DMAC0, DMAC1, DMAC2, DMAC3,
SIOF,
-
- /* interrupt groups */
- SCIF0, SCIF1, SCIF2,
};
static struct intc_vect vectors[] __initdata = {
@@ -36,24 +35,18 @@ static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ6, 82), INTC_IRQ(IRQ7, 83),
INTC_IRQ(WDT, 84), INTC_IRQ(EDMAC, 85),
INTC_IRQ(CMT0, 86), INTC_IRQ(CMT1, 87),
- INTC_IRQ(SCIF0_ERI, 88), INTC_IRQ(SCIF0_RXI, 89),
- INTC_IRQ(SCIF0_BRI, 90), INTC_IRQ(SCIF0_TXI, 91),
- INTC_IRQ(SCIF1_ERI, 92), INTC_IRQ(SCIF1_RXI, 93),
- INTC_IRQ(SCIF1_BRI, 94), INTC_IRQ(SCIF1_TXI, 95),
- INTC_IRQ(SCIF2_ERI, 96), INTC_IRQ(SCIF2_RXI, 97),
- INTC_IRQ(SCIF2_BRI, 98), INTC_IRQ(SCIF2_TXI, 99),
+ INTC_IRQ(SCIF0, 88), INTC_IRQ(SCIF0, 89),
+ INTC_IRQ(SCIF0, 90), INTC_IRQ(SCIF0, 91),
+ INTC_IRQ(SCIF1, 92), INTC_IRQ(SCIF1, 93),
+ INTC_IRQ(SCIF1, 94), INTC_IRQ(SCIF1, 95),
+ INTC_IRQ(SCIF2, 96), INTC_IRQ(SCIF2, 97),
+ INTC_IRQ(SCIF2, 98), INTC_IRQ(SCIF2, 99),
INTC_IRQ(HIF_HIFI, 100), INTC_IRQ(HIF_HIFBI, 101),
INTC_IRQ(DMAC0, 104), INTC_IRQ(DMAC1, 105),
INTC_IRQ(DMAC2, 106), INTC_IRQ(DMAC3, 107),
INTC_IRQ(SIOF, 108),
};
-static struct intc_group groups[] __initdata = {
- INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
- INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
- INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xf8140006, 0, 16, 4, /* IPRA */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xf8140008, 0, 16, 4, /* IPRB */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
@@ -64,40 +57,127 @@ static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xf8080008, 0, 16, 4, /* IPRG */ { SIOF } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, groups,
+static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL,
NULL, prio_registers, NULL);
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xf8400000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 88, 89, 91, 90},
- }, {
- .mapbase = 0xf8410000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 92, 93, 95, 94},
- }, {
- .mapbase = 0xf8420000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 96, 97, 99, 98},
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xf8400000, 0x100),
+ DEFINE_RES_IRQ(88),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xf8410000, 0x100),
+ DEFINE_RES_IRQ(92),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xf8420000, 0x100),
+ DEFINE_RES_IRQ(96),
+};
+
+static struct platform_device scif2_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct sh_eth_plat_data eth_platform_data = {
+ .phy = 1,
+ .edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
+static struct resource eth_resources[] = {
+ [0] = {
+ .start = 0xfb000000,
+ .end = 0xfb0001c7,
+ .flags = IORESOURCE_MEM,
},
+ [1] = {
+ .start = 85,
+ .end = 85,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device eth_device = {
+ .name = "sh7619-ether",
+ .id = -1,
+ .dev = {
+ .platform_data = &eth_platform_data,
+ },
+ .num_resources = ARRAY_SIZE(eth_resources),
+ .resource = eth_resources,
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 3,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0xf84a0070, 0x10),
+ DEFINE_RES_IRQ(86),
+ DEFINE_RES_IRQ(87),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-16",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
};
static struct platform_device *sh7619_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &eth_device,
+ &cmt_device,
};
static int __init sh7619_devices_setup(void)
@@ -105,9 +185,27 @@ static int __init sh7619_devices_setup(void)
return platform_add_devices(sh7619_devices,
ARRAY_SIZE(sh7619_devices));
}
-__initcall(sh7619_devices_setup);
+arch_initcall(sh7619_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
+
+static struct platform_device *sh7619_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &cmt_device,
+};
+
+#define STBCR3 0xf80a0000
+
+void __init plat_early_device_setup(void)
+{
+ /* enable CMT clock */
+ __raw_writeb(__raw_readb(STBCR3) & ~0x10, STBCR3);
+
+ early_platform_add_devices(sh7619_early_devices,
+ ARRAY_SIZE(sh7619_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
index b279cdc3a23..990195d9845 100644
--- a/arch/sh/kernel/cpu/sh2a/Makefile
+++ b/arch/sh/kernel/cpu/sh2a/Makefile
@@ -4,10 +4,21 @@
obj-y := common.o probe.o opcode_helper.o
-common-y += $(addprefix ../sh2/, ex.o entry.o)
+common-y += ex.o entry.o
obj-$(CONFIG_SH_FPU) += fpu.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7201) += setup-sh7201.o clock-sh7201.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7264) += setup-sh7264.o clock-sh7264.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7269) += setup-sh7269.o clock-sh7269.o
+obj-$(CONFIG_CPU_SUBTYPE_MXG) += setup-mxg.o clock-sh7206.o
+
+# Pinmux setup
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7203) := pinmux-sh7203.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7264) := pinmux-sh7264.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7269) := pinmux-sh7269.o
+
+obj-$(CONFIG_GPIOLIB) += $(pinmux-y)
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
new file mode 100644
index 00000000000..532a36c7232
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
@@ -0,0 +1,85 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+ *
+ * SH7201 support for the clock framework
+ *
+ * Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk>
+ *
+ * Based on clock-sh4.c
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static const int pll1rate[]={1,2,3,4,6,8};
+static const int pfc_divisors[]={1,2,3,4,6,8,12};
+#define ifc_divisors pfc_divisors
+
+static unsigned int pll2_mult;
+
+static void master_clk_init(struct clk *clk)
+{
+ clk->rate = 10000000 * pll2_mult *
+ pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct sh_clk_ops sh7201_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7201_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7201_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007);
+ return clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct sh_clk_ops sh7201_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh7201_clk_ops[] = {
+ &sh7201_master_clk_ops,
+ &sh7201_module_clk_ops,
+ &sh7201_bus_clk_ops,
+ &sh7201_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ if (test_mode_pin(MODE_PIN1 | MODE_PIN0))
+ pll2_mult = 1;
+ else if (test_mode_pin(MODE_PIN1))
+ pll2_mult = 2;
+ else
+ pll2_mult = 4;
+
+ if (idx < ARRAY_SIZE(sh7201_clk_ops))
+ *ops = sh7201_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
index fb781329848..529f719b6e3 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c
@@ -25,65 +25,57 @@ static const int pll1rate[]={8,12,16,0};
static const int pfc_divisors[]={1,2,3,4,6,8,12};
#define ifc_divisors pfc_divisors
-#if (CONFIG_SH_CLK_MD == 0)
-#define PLL2 (1)
-#elif (CONFIG_SH_CLK_MD == 1)
-#define PLL2 (2)
-#elif (CONFIG_SH_CLK_MD == 2)
-#define PLL2 (4)
-#elif (CONFIG_SH_CLK_MD == 3)
-#define PLL2 (4)
-#else
-#error "Illegal Clock Mode!"
-#endif
+static unsigned int pll2_mult;
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0003] * PLL2 ;
+ clk->rate *= pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0003] * pll2_mult;
}
-static struct clk_ops sh7203_master_clk_ops = {
+static struct sh_clk_ops sh7203_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
}
-static struct clk_ops sh7203_module_clk_ops = {
+static struct sh_clk_ops sh7203_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
- clk->rate = clk->parent->rate / pfc_divisors[idx-2];
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx-2];
}
-static struct clk_ops sh7203_bus_clk_ops = {
+static struct sh_clk_ops sh7203_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
-{
- clk->rate = clk->parent->rate;
-}
-
-static struct clk_ops sh7203_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
+static struct sh_clk_ops sh7203_cpu_clk_ops = {
+ .recalc = followparent_recalc,
};
-static struct clk_ops *sh7203_clk_ops[] = {
+static struct sh_clk_ops *sh7203_clk_ops[] = {
&sh7203_master_clk_ops,
&sh7203_module_clk_ops,
&sh7203_bus_clk_ops,
&sh7203_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
+ if (test_mode_pin(MODE_PIN1))
+ pll2_mult = 4;
+ else if (test_mode_pin(MODE_PIN0))
+ pll2_mult = 2;
+ else
+ pll2_mult = 1;
+
if (idx < ARRAY_SIZE(sh7203_clk_ops))
*ops = sh7203_clk_ops[idx];
}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
index 82d7f991ef6..17778983467 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
@@ -22,64 +22,62 @@ static const int pll1rate[]={1,2,3,4,6,8};
static const int pfc_divisors[]={1,2,3,4,6,8,12};
#define ifc_divisors pfc_divisors
-#if (CONFIG_SH_CLK_MD == 2)
-#define PLL2 (4)
-#elif (CONFIG_SH_CLK_MD == 6)
-#define PLL2 (2)
-#elif (CONFIG_SH_CLK_MD == 7)
-#define PLL2 (1)
-#else
-#error "Illigal Clock Mode!"
-#endif
+static unsigned int pll2_mult;
static void master_clk_init(struct clk *clk)
{
- clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+ clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
-static struct clk_ops sh7206_master_clk_ops = {
+static struct sh_clk_ops sh7206_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
}
-static struct clk_ops sh7206_module_clk_ops = {
+static struct sh_clk_ops sh7206_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+ return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
-static struct clk_ops sh7206_bus_clk_ops = {
+static struct sh_clk_ops sh7206_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
+static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FREQCR) & 0x0007);
- clk->rate = clk->parent->rate / ifc_divisors[idx];
+ int idx = (__raw_readw(FREQCR) & 0x0007);
+ return clk->parent->rate / ifc_divisors[idx];
}
-static struct clk_ops sh7206_cpu_clk_ops = {
+static struct sh_clk_ops sh7206_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
-static struct clk_ops *sh7206_clk_ops[] = {
+static struct sh_clk_ops *sh7206_clk_ops[] = {
&sh7206_master_clk_ops,
&sh7206_module_clk_ops,
&sh7206_bus_clk_ops,
&sh7206_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
+ if (test_mode_pin(MODE_PIN2 | MODE_PIN1 | MODE_PIN0))
+ pll2_mult = 1;
+ else if (test_mode_pin(MODE_PIN2 | MODE_PIN1))
+ pll2_mult = 2;
+ else if (test_mode_pin(MODE_PIN1))
+ pll2_mult = 4;
+
if (idx < ARRAY_SIZE(sh7206_clk_ops))
*ops = sh7206_clk_ops[idx];
}
-
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7264.c b/arch/sh/kernel/cpu/sh2a/clock-sh7264.c
new file mode 100644
index 00000000000..8638fba6cd7
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7264.c
@@ -0,0 +1,153 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7264.c
+ *
+ * SH7264 clock framework support
+ *
+ * Copyright (C) 2012 Phil Edworthy
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+
+/* SH7264 registers */
+#define FRQCR 0xfffe0010
+#define STBCR3 0xfffe0408
+#define STBCR4 0xfffe040c
+#define STBCR5 0xfffe0410
+#define STBCR6 0xfffe0414
+#define STBCR7 0xfffe0418
+#define STBCR8 0xfffe041c
+
+static const unsigned int pll1rate[] = {8, 12};
+
+static unsigned int pll1_div;
+
+/* Fixed 32 KHz root clock for RTC */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 18000000,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ unsigned long rate = clk->parent->rate / pll1_div;
+ return rate * pll1rate[(__raw_readw(FRQCR) >> 8) & 1];
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &pll_clk,
+};
+
+static int div2[] = { 1, 2, 3, 4, 6, 8, 12 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_P,
+ DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+/* The mask field specifies the div2 entries that are valid */
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 4, 0x7, CLK_ENABLE_REG_16BIT
+ | CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x78, CLK_ENABLE_REG_16BIT),
+};
+
+enum { MSTP77, MSTP74, MSTP72,
+ MSTP60,
+ MSTP35, MSTP34, MSTP33, MSTP32, MSTP30,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ [MSTP77] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 7, 0), /* SCIF */
+ [MSTP74] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 4, 0), /* VDC */
+ [MSTP72] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 2, 0), /* CMT */
+ [MSTP60] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR6, 0, 0), /* USB */
+ [MSTP35] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 6, 0), /* MTU2 */
+ [MSTP34] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 4, 0), /* SDHI0 */
+ [MSTP33] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 3, 0), /* SDHI1 */
+ [MSTP32] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 2, 0), /* ADC */
+ [MSTP30] = SH_CLK_MSTP8(&r_clk, STBCR3, 0, 0), /* RTC */
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("sci_ick", &mstp_clks[MSTP77]),
+ CLKDEV_CON_ID("vdc3", &mstp_clks[MSTP74]),
+ CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[MSTP72]),
+ CLKDEV_CON_ID("usb0", &mstp_clks[MSTP60]),
+ CLKDEV_ICK_ID("fck", "sh-mtu2", &mstp_clks[MSTP35]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP34]),
+ CLKDEV_CON_ID("sdhi1", &mstp_clks[MSTP33]),
+ CLKDEV_CON_ID("adc0", &mstp_clks[MSTP32]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP30]),
+};
+
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
+
+ if (test_mode_pin(MODE_PIN0)) {
+ if (test_mode_pin(MODE_PIN1))
+ pll1_div = 3;
+ else
+ pll1_div = 4;
+ } else
+ pll1_div = 1;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7269.c b/arch/sh/kernel/cpu/sh2a/clock-sh7269.c
new file mode 100644
index 00000000000..f8a5c2abdfb
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7269.c
@@ -0,0 +1,184 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7269.c
+ *
+ * SH7269 clock framework support
+ *
+ * Copyright (C) 2012 Phil Edworthy
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+
+/* SH7269 registers */
+#define FRQCR 0xfffe0010
+#define STBCR3 0xfffe0408
+#define STBCR4 0xfffe040c
+#define STBCR5 0xfffe0410
+#define STBCR6 0xfffe0414
+#define STBCR7 0xfffe0418
+
+#define PLL_RATE 20
+
+/* Fixed 32 KHz root clock for RTC */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 13340000,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ return clk->parent->rate * PLL_RATE;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long peripheral0_recalc(struct clk *clk)
+{
+ return clk->parent->rate / 8;
+}
+
+static struct sh_clk_ops peripheral0_clk_ops = {
+ .recalc = peripheral0_recalc,
+};
+
+static struct clk peripheral0_clk = {
+ .ops = &peripheral0_clk_ops,
+ .parent = &pll_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long peripheral1_recalc(struct clk *clk)
+{
+ return clk->parent->rate / 4;
+}
+
+static struct sh_clk_ops peripheral1_clk_ops = {
+ .recalc = peripheral1_recalc,
+};
+
+static struct clk peripheral1_clk = {
+ .ops = &peripheral1_clk_ops,
+ .parent = &pll_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &pll_clk,
+ &peripheral0_clk,
+ &peripheral1_clk,
+};
+
+static int div2[] = { 1, 2, 0, 4 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_B,
+ DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+/* The mask field specifies the div2 entries that are valid */
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 8, 0xB, CLK_ENABLE_REG_16BIT
+ | CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 4, 0xA, CLK_ENABLE_REG_16BIT
+ | CLK_ENABLE_ON_INIT),
+};
+
+enum { MSTP72,
+ MSTP60,
+ MSTP47, MSTP46, MSTP45, MSTP44, MSTP43, MSTP42, MSTP41, MSTP40,
+ MSTP35, MSTP32, MSTP30,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ [MSTP72] = SH_CLK_MSTP8(&peripheral0_clk, STBCR7, 2, 0), /* CMT */
+ [MSTP60] = SH_CLK_MSTP8(&peripheral1_clk, STBCR6, 0, 0), /* USB */
+ [MSTP47] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 7, 0), /* SCIF0 */
+ [MSTP46] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 6, 0), /* SCIF1 */
+ [MSTP45] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 5, 0), /* SCIF2 */
+ [MSTP44] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 4, 0), /* SCIF3 */
+ [MSTP43] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 3, 0), /* SCIF4 */
+ [MSTP42] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 2, 0), /* SCIF5 */
+ [MSTP41] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 1, 0), /* SCIF6 */
+ [MSTP40] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 0, 0), /* SCIF7 */
+ [MSTP35] = SH_CLK_MSTP8(&peripheral0_clk, STBCR3, 5, 0), /* MTU2 */
+ [MSTP32] = SH_CLK_MSTP8(&peripheral1_clk, STBCR3, 2, 0), /* ADC */
+ [MSTP30] = SH_CLK_MSTP8(&r_clk, STBCR3, 0, 0), /* RTC */
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+ CLKDEV_CON_ID("peripheral_clk", &peripheral1_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+
+ /* MSTP clocks */
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP47]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP46]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP45]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP44]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP43]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP42]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.6", &mstp_clks[MSTP41]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.7", &mstp_clks[MSTP40]),
+ CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[MSTP72]),
+ CLKDEV_CON_ID("usb0", &mstp_clks[MSTP60]),
+ CLKDEV_ICK_ID("fck", "sh-mtu2", &mstp_clks[MSTP35]),
+ CLKDEV_CON_ID("adc0", &mstp_clks[MSTP32]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP30]),
+};
+
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh2a/entry.S b/arch/sh/kernel/cpu/sh2a/entry.S
new file mode 100644
index 00000000000..222742ddc0d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/entry.S
@@ -0,0 +1,250 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/entry.S
+ *
+ * The SH-2A exception entry
+ *
+ * Copyright (C) 2008 Yoshinori Sato
+ * Based on arch/sh/kernel/cpu/sh2/entry.S
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <cpu/mmu_context.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/page.h>
+
+/* Offsets to the stack */
+OFF_R0 = 0 /* Return value. New ABI also arg4 */
+OFF_R1 = 4 /* New ABI: arg5 */
+OFF_R2 = 8 /* New ABI: arg6 */
+OFF_R3 = 12 /* New ABI: syscall_nr */
+OFF_R4 = 16 /* New ABI: arg0 */
+OFF_R5 = 20 /* New ABI: arg1 */
+OFF_R6 = 24 /* New ABI: arg2 */
+OFF_R7 = 28 /* New ABI: arg3 */
+OFF_SP = (15*4)
+OFF_PC = (16*4)
+OFF_SR = (16*4+2*4)
+OFF_TRA = (16*4+6*4)
+
+#include <asm/entry-macros.S>
+
+ENTRY(exception_handler)
+ ! stack
+ ! r0 <- point sp
+ ! r1
+ ! pc
+ ! sr
+ ! r0 = temporary
+ ! r1 = vector (pseudo EXPEVT / INTEVT / TRA)
+ mov.l r2,@-sp
+ cli
+ mov.l $cpu_mode,r2
+ bld.b #6,@(0,r2) !previus SR.MD
+ bst.b #6,@(4*4,r15) !set cpu mode to SR.MD
+ bt 1f
+ ! switch to kernel mode
+ bset.b #6,@(0,r2) !set SR.MD
+ mov.l $current_thread_info,r2
+ mov.l @r2,r2
+ mov #(THREAD_SIZE >> 8),r0
+ shll8 r0
+ add r2,r0 ! r0 = kernel stack tail
+ mov r15,r2 ! r2 = user stack top
+ mov r0,r15 ! switch kernel stack
+ mov.l r1,@-r15 ! TRA
+ sts.l macl, @-r15
+ sts.l mach, @-r15
+ stc.l gbr, @-r15
+ mov.l @(4*4,r2),r0
+ mov.l r0,@-r15 ! original SR
+ sts.l pr,@-r15
+ mov.l @(3*4,r2),r0
+ mov.l r0,@-r15 ! original PC
+ mov r2,r0
+ add #(3+2)*4,r0 ! rewind r0 - r3 + exception frame
+ lds r0,pr ! pr = original SP
+ movmu.l r3,@-r15 ! save regs
+ mov r2,r8 ! r8 = previus stack top
+ mov r1,r9 ! r9 = interrupt vector
+ ! restore previous stack
+ mov.l @r8+,r2
+ mov.l @r8+,r0
+ mov.l @r8+,r1
+ bra 2f
+ movml.l r2,@-r15
+1:
+ ! in kernel exception
+ mov r15,r2
+ add #-((OFF_TRA + 4) - OFF_PC) + 5*4,r15
+ movmu.l r3,@-r15
+ mov r2,r8 ! r8 = previous stack top
+ mov r1,r9 ! r9 = interrupt vector
+ ! restore exception frame & regs
+ mov.l @r8+,r2 ! old R2
+ mov.l @r8+,r0 ! old R0
+ mov.l @r8+,r1 ! old R1
+ mov.l @r8+,r10 ! old PC
+ mov.l @r8+,r11 ! old SR
+ movml.l r2,@-r15
+ mov.l r10,@(OFF_PC,r15)
+ mov.l r11,@(OFF_SR,r15)
+ mov.l r8,@(OFF_SP,r15) ! save old sp
+ mov r15,r8
+ add #OFF_TRA + 4,r8
+ mov.l r9,@-r8
+ sts.l macl,@-r8
+ sts.l mach,@-r8
+ stc.l gbr,@-r8
+ add #-4,r8
+ sts.l pr,@-r8
+2:
+ ! dispatch exception / interrupt
+ mov #64,r8
+ cmp/hs r8,r9
+ bt interrupt_entry ! vec >= 64 is interrupt
+ mov #32,r8
+ cmp/hs r8,r9
+ bt trap_entry ! 64 > vec >= 32 is trap
+
+ mov.l 4f,r8
+ mov r9,r4
+ shll2 r9
+ add r9,r8
+ mov.l @r8,r8 ! exception handler address
+ tst r8,r8
+ bf 3f
+ mov.l 8f,r8 ! unhandled exception
+3:
+ mov.l 5f,r10
+ jmp @r8
+ lds r10,pr
+
+interrupt_entry:
+ mov r9,r4
+ mov r15,r5
+ mov.l 7f,r8
+ mov.l 6f,r9
+ jmp @r8
+ lds r9,pr
+
+ .align 2
+4: .long exception_handling_table
+5: .long ret_from_exception
+6: .long ret_from_irq
+7: .long do_IRQ
+8: .long exception_error
+
+trap_entry:
+ mov #0x30,r8
+ cmp/ge r8,r9 ! vector 0x20-0x2f is systemcall
+ bt 1f
+ add #-0x10,r9 ! convert SH2 to SH3/4 ABI
+1:
+ shll2 r9 ! TRA
+ bra system_call ! jump common systemcall entry
+ mov r9,r8
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+ /* Unwind the stack and jmp to the debug entry */
+ENTRY(sh_bios_handler)
+ mov r15,r0
+ add #(22-4)*4-4,r0
+ ldc.l @r0+,gbr
+ lds.l @r0+,mach
+ lds.l @r0+,macl
+ mov r15,r0
+ mov.l @(OFF_SP,r0),r1
+ mov.l @(OFF_SR,r2),r3
+ mov.l r3,@-r1
+ mov.l @(OFF_SP,r2),r3
+ mov.l r3,@-r1
+ mov r15,r0
+ add #(22-4)*4-8,r0
+ mov.l 1f,r2
+ mov.l @r2,r2
+ stc sr,r3
+ mov.l r2,@r0
+ mov.l r3,@(4,r0)
+ mov.l r1,@(8,r0)
+ movml.l @r15+,r14
+ add #8,r15
+ lds.l @r15+, pr
+ mov.l @r15+,r15
+ rte
+ nop
+ .align 2
+1: .long gdb_vbr_vector
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
+ENTRY(address_error_trap_handler)
+ mov r15,r4 ! regs
+ mov.l @(OFF_PC,r15),r6 ! pc
+ mov.l 1f,r0
+ jmp @r0
+ mov #0,r5 ! writeaccess is unknown
+
+ .align 2
+1: .long do_address_error
+
+restore_all:
+ stc sr,r0
+ or #0xf0,r0
+ ldc r0,sr ! all interrupt block (same BL = 1)
+ ! restore special register
+ ! overlap exception frame
+ mov r15,r0
+ add #17*4,r0
+ lds.l @r0+,pr
+ add #4,r0
+ ldc.l @r0+,gbr
+ lds.l @r0+,mach
+ lds.l @r0+,macl
+ mov r15,r0
+ mov.l $cpu_mode,r2
+ bld.b #6,@(OFF_SR,r15)
+ bst.b #6,@(0,r2) ! save CPU mode
+ mov.l @(OFF_SR,r0),r1
+ shll2 r1
+ shlr2 r1 ! clear MD bit
+ mov.l @(OFF_SP,r0),r2
+ add #-8,r2
+ mov.l r2,@(OFF_SP,r0) ! point exception frame top
+ mov.l r1,@(4,r2) ! set sr
+ mov.l @(OFF_PC,r0),r1
+ mov.l r1,@r2 ! set pc
+ get_current_thread_info r0, r1
+ mov.l $current_thread_info,r1
+ mov.l r0,@r1
+ movml.l @r15+,r14
+ mov.l @r15,r15
+ rte
+ nop
+
+ .align 2
+$current_thread_info:
+ .long __current_thread_info
+$cpu_mode:
+ .long __cpu_mode
+
+! common exception handler
+#include "../../entry-common.S"
+
+ .data
+! cpu operation mode
+! bit30 = MD (compatible SH3/4)
+__cpu_mode:
+ .long 0x40000000
+
+ .section .bss
+__current_thread_info:
+ .long 0
+
+ENTRY(exception_handling_table)
+ .space 4*32
diff --git a/arch/sh/kernel/cpu/sh2a/ex.S b/arch/sh/kernel/cpu/sh2a/ex.S
new file mode 100644
index 00000000000..4568066700c
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/ex.S
@@ -0,0 +1,73 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/ex.S
+ *
+ * The SH-2A exception vector table
+ *
+ * Copyright (C) 2008 Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+
+!
+! convert Exception Vector to Exception Number
+!
+
+! exception no 0 to 255
+exception_entry0:
+no = 0
+ .rept 256
+ mov.l r1,@-sp
+ bra exception_trampoline0
+ mov #no,r1
+no = no + 1
+ .endr
+exception_trampoline0:
+ mov.l r0,@-sp
+ mov.l 1f,r0
+ extu.b r1,r1
+ jmp @r0
+ extu.w r1,r1
+
+ .align 2
+1: .long exception_handler
+
+! exception no 256 to 511
+exception_entry1:
+no = 0
+ .rept 256
+ mov.l r1,@-sp
+ bra exception_trampoline1
+ mov #no,r1
+no = no + 1
+ .endr
+exception_trampoline1:
+ mov.l r0,@-sp
+ extu.b r1,r1
+ movi20 #0x100,r0
+ add r0,r1
+ mov.l 1f,r0
+ jmp @r0
+ extu.w r1,r1
+
+ .align 2
+1: .long exception_handler
+
+ !
+! Exception Vector Base
+!
+ .align 2
+ENTRY(vbr_base)
+vector = 0
+ .rept 256
+ .long exception_entry0 + vector * 6
+vector = vector + 1
+ .endr
+vector = 0
+ .rept 256
+ .long exception_entry1 + vector * 6
+vector = vector + 1
+ .endr
diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c
index 5627c0b3ffa..98bbaa447c9 100644
--- a/arch/sh/kernel/cpu/sh2a/fpu.c
+++ b/arch/sh/kernel/cpu/sh2a/fpu.c
@@ -14,6 +14,7 @@
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/fpu.h>
+#include <asm/traps.h>
/* The PR (precision) bit in the FP Status Register must be clear when
* an frchg instruction is executed, otherwise the instruction is undefined.
@@ -25,14 +26,11 @@
/*
* Save FPU registers onto task structure.
- * Assume called with FPU enabled (SR.FD=0).
*/
-void
-save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+void save_fpu(struct task_struct *tsk)
{
unsigned long dummy;
- clear_tsk_thread_flag(tsk, TIF_USEDFPU);
enable_fpu();
asm volatile("sts.l fpul, @-%0\n\t"
"sts.l fpscr, @-%0\n\t"
@@ -54,17 +52,15 @@ save_fpu(struct task_struct *tsk, struct pt_regs *regs)
"fmov.s fr0, @-%0\n\t"
"lds %3, fpscr\n\t"
: "=r" (dummy)
- : "0" ((char *)(&tsk->thread.fpu.hard.status)),
+ : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)),
"r" (FPSCR_RCHG),
"r" (FPSCR_INIT)
: "memory");
disable_fpu();
- release_fpu(regs);
}
-static void
-restore_fpu(struct task_struct *tsk)
+void restore_fpu(struct task_struct *tsk)
{
unsigned long dummy;
@@ -88,45 +84,12 @@ restore_fpu(struct task_struct *tsk)
"lds.l @%0+, fpscr\n\t"
"lds.l @%0+, fpul\n\t"
: "=r" (dummy)
- : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG)
+ : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
: "memory");
disable_fpu();
}
/*
- * Load the FPU with signalling NANS. This bit pattern we're using
- * has the property that no matter wether considered as single or as
- * double precission represents signaling NANS.
- */
-
-static void
-fpu_init(void)
-{
- enable_fpu();
- asm volatile("lds %0, fpul\n\t"
- "fsts fpul, fr0\n\t"
- "fsts fpul, fr1\n\t"
- "fsts fpul, fr2\n\t"
- "fsts fpul, fr3\n\t"
- "fsts fpul, fr4\n\t"
- "fsts fpul, fr5\n\t"
- "fsts fpul, fr6\n\t"
- "fsts fpul, fr7\n\t"
- "fsts fpul, fr8\n\t"
- "fsts fpul, fr9\n\t"
- "fsts fpul, fr10\n\t"
- "fsts fpul, fr11\n\t"
- "fsts fpul, fr12\n\t"
- "fsts fpul, fr13\n\t"
- "fsts fpul, fr14\n\t"
- "fsts fpul, fr15\n\t"
- "lds %2, fpscr\n\t"
- : /* no output */
- : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT));
- disable_fpu();
-}
-
-/*
* Emulate arithmetic ops on denormalized number for some FPU insns.
*/
@@ -300,7 +263,7 @@ static int denormal_addf(int hx, int hy)
iy = hy & 0x7fffffff;
if (iy < 0x00800000) {
ix = denormal_subf1(ix, iy);
- if (ix < 0) {
+ if ((int) ix < 0) {
ix = -ix;
sign ^= 0x80000000;
}
@@ -385,7 +348,7 @@ static long long denormal_addd(long long hx, long long hy)
iy = hy & 0x7fffffffffffffffLL;
if (iy < 0x0010000000000000LL) {
ix = denormal_subd1(ix, iy);
- if (ix < 0) {
+ if ((int) ix < 0) {
ix = -ix;
sign ^= 0x8000000000000000LL;
}
@@ -493,9 +456,9 @@ ieee_fpe_handler (struct pt_regs *regs)
if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
struct task_struct *tsk = current;
- if ((tsk->thread.fpu.hard.fpscr & FPSCR_FPU_ERROR)) {
+ if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) {
/* FPU error */
- denormal_to_double (&tsk->thread.fpu.hard,
+ denormal_to_double (&tsk->thread.xstate->hardfpu,
(finsn >> 8) & 0xf);
} else
return 0;
@@ -510,9 +473,9 @@ ieee_fpe_handler (struct pt_regs *regs)
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
- hx = tsk->thread.fpu.hard.fp_regs[n];
- hy = tsk->thread.fpu.hard.fp_regs[m];
- fpscr = tsk->thread.fpu.hard.fpscr;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & (1 << 19);
if ((fpscr & FPSCR_FPU_ERROR)
@@ -522,15 +485,15 @@ ieee_fpe_handler (struct pt_regs *regs)
/* FPU error because of denormal */
llx = ((long long) hx << 32)
- | tsk->thread.fpu.hard.fp_regs[n+1];
+ | tsk->thread.xstate->hardfpu.fp_regs[n+1];
lly = ((long long) hy << 32)
- | tsk->thread.fpu.hard.fp_regs[m+1];
+ | tsk->thread.xstate->hardfpu.fp_regs[m+1];
if ((hx & 0x7fffffff) >= 0x00100000)
llx = denormal_muld(lly, llx);
else
llx = denormal_muld(llx, lly);
- tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
- tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_FPU_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
@@ -539,7 +502,7 @@ ieee_fpe_handler (struct pt_regs *regs)
hx = denormal_mulf(hy, hx);
else
hx = denormal_mulf(hx, hy);
- tsk->thread.fpu.hard.fp_regs[n] = hx;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
@@ -553,9 +516,9 @@ ieee_fpe_handler (struct pt_regs *regs)
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
- hx = tsk->thread.fpu.hard.fp_regs[n];
- hy = tsk->thread.fpu.hard.fp_regs[m];
- fpscr = tsk->thread.fpu.hard.fpscr;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & (1 << 19);
if ((fpscr & FPSCR_FPU_ERROR)
@@ -565,15 +528,15 @@ ieee_fpe_handler (struct pt_regs *regs)
/* FPU error because of denormal */
llx = ((long long) hx << 32)
- | tsk->thread.fpu.hard.fp_regs[n+1];
+ | tsk->thread.xstate->hardfpu.fp_regs[n+1];
lly = ((long long) hy << 32)
- | tsk->thread.fpu.hard.fp_regs[m+1];
+ | tsk->thread.xstate->hardfpu.fp_regs[m+1];
if ((finsn & 0xf00f) == 0xf000)
llx = denormal_addd(llx, lly);
else
llx = denormal_addd(llx, lly ^ (1LL << 63));
- tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
- tsk->thread.fpu.hard.fp_regs[n+1] = llx & 0xffffffff;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_FPU_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
@@ -582,7 +545,7 @@ ieee_fpe_handler (struct pt_regs *regs)
hx = denormal_addf(hx, hy);
else
hx = denormal_addf(hx, hy ^ 0x80000000);
- tsk->thread.fpu.hard.fp_regs[n] = hx;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
@@ -598,37 +561,15 @@ BUILD_TRAP_HANDLER(fpu_error)
struct task_struct *tsk = current;
TRAP_HANDLER_DECL;
- save_fpu(tsk, regs);
+ __unlazy_fpu(tsk, regs);
if (ieee_fpe_handler(regs)) {
- tsk->thread.fpu.hard.fpscr &=
+ tsk->thread.xstate->hardfpu.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
grab_fpu(regs);
restore_fpu(tsk);
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ task_thread_info(tsk)->status |= TS_USEDFPU;
return;
}
force_sig(SIGFPE, tsk);
}
-
-BUILD_TRAP_HANDLER(fpu_state_restore)
-{
- struct task_struct *tsk = current;
- TRAP_HANDLER_DECL;
-
- grab_fpu(regs);
- if (!user_mode(regs)) {
- printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
- return;
- }
-
- if (used_math()) {
- /* Using the FPU again. */
- restore_fpu(tsk);
- } else {
- /* First time FPU user. */
- fpu_init();
- set_used_math();
- }
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
-}
diff --git a/arch/sh/kernel/cpu/sh2a/opcode_helper.c b/arch/sh/kernel/cpu/sh2a/opcode_helper.c
index 9704b7926d8..72aa61c81e4 100644
--- a/arch/sh/kernel/cpu/sh2a/opcode_helper.c
+++ b/arch/sh/kernel/cpu/sh2a/opcode_helper.c
@@ -10,7 +10,6 @@
* for more details.
*/
#include <linux/kernel.h>
-#include <asm/system.h>
/*
* Instructions on SH are generally fixed at 16-bits, however, SH-2A
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c
new file mode 100644
index 00000000000..eef17dcc3a4
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c
@@ -0,0 +1,30 @@
+/*
+ * SH7203 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7203_pfc_resources[] = {
+ [0] = {
+ .start = 0xfffe3800,
+ .end = 0xfffe3a9f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7203", sh7203_pfc_resources,
+ ARRAY_SIZE(sh7203_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c
new file mode 100644
index 00000000000..569decbd6d9
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c
@@ -0,0 +1,30 @@
+/*
+ * SH7264 Pinmux
+ *
+ * Copyright (C) 2012 Renesas Electronics Europe Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7264_pfc_resources[] = {
+ [0] = {
+ .start = 0xfffe3800,
+ .end = 0xfffe393f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7264", sh7264_pfc_resources,
+ ARRAY_SIZE(sh7264_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
new file mode 100644
index 00000000000..4c17fb6970b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
@@ -0,0 +1,31 @@
+/*
+ * SH7269 Pinmux
+ *
+ * Copyright (C) 2012 Renesas Electronics Europe Ltd
+ * Copyright (C) 2012 Phil Edworthy
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7269_pfc_resources[] = {
+ [0] = {
+ .start = 0xfffe3800,
+ .end = 0xfffe391f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7269", sh7269_pfc_resources,
+ ARRAY_SIZE(sh7269_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 6910e266446..3f87971082f 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -13,21 +13,33 @@
#include <asm/processor.h>
#include <asm/cache.h>
-int __init detect_cpu_and_cache_system(void)
+void cpu_probe(void)
{
+ boot_cpu_data.family = CPU_FAMILY_SH2A;
+
/* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */
boot_cpu_data.flags |= CPU_HAS_OP32;
-#if defined(CONFIG_CPU_SUBTYPE_SH7203)
+#if defined(CONFIG_CPU_SUBTYPE_SH7201)
+ boot_cpu_data.type = CPU_SH7201;
+ boot_cpu_data.flags |= CPU_HAS_FPU;
+#elif defined(CONFIG_CPU_SUBTYPE_SH7203)
boot_cpu_data.type = CPU_SH7203;
- /* SH7203 has an FPU.. */
boot_cpu_data.flags |= CPU_HAS_FPU;
#elif defined(CONFIG_CPU_SUBTYPE_SH7263)
boot_cpu_data.type = CPU_SH7263;
boot_cpu_data.flags |= CPU_HAS_FPU;
+#elif defined(CONFIG_CPU_SUBTYPE_SH7264)
+ boot_cpu_data.type = CPU_SH7264;
+ boot_cpu_data.flags |= CPU_HAS_FPU;
+#elif defined(CONFIG_CPU_SUBTYPE_SH7269)
+ boot_cpu_data.type = CPU_SH7269;
+ boot_cpu_data.flags |= CPU_HAS_FPU;
#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
boot_cpu_data.type = CPU_SH7206;
- /* While SH7206 has a DSP.. */
+ boot_cpu_data.flags |= CPU_HAS_DSP;
+#elif defined(CONFIG_CPU_SUBTYPE_MXG)
+ boot_cpu_data.type = CPU_MXG;
boot_cpu_data.flags |= CPU_HAS_DSP;
#endif
@@ -45,6 +57,4 @@ int __init detect_cpu_and_cache_system(void)
* on the cache info.
*/
boot_cpu_data.icache = boot_cpu_data.dcache;
-
- return 0;
}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
new file mode 100644
index 00000000000..26fcdbd4127
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c
@@ -0,0 +1,178 @@
+/*
+ * Renesas MX-G (R8A03022BG) Setup
+ *
+ * Copyright (C) 2008, 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ IRQ8, IRQ9, IRQ10, IRQ11, IRQ12, IRQ13, IRQ14, IRQ15,
+
+ PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
+ SINT8, SINT7, SINT6, SINT5, SINT4, SINT3, SINT2, SINT1,
+
+ SCIF0, SCIF1,
+
+ MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3, MTU2_GROUP4, MTU2_GROUP5,
+ MTU2_TGI3B, MTU2_TGI3C,
+
+ /* interrupt groups */
+ PINT,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
+ INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
+ INTC_IRQ(IRQ8, 72), INTC_IRQ(IRQ9, 73),
+ INTC_IRQ(IRQ10, 74), INTC_IRQ(IRQ11, 75),
+ INTC_IRQ(IRQ12, 76), INTC_IRQ(IRQ13, 77),
+ INTC_IRQ(IRQ14, 78), INTC_IRQ(IRQ15, 79),
+
+ INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
+ INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
+ INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
+ INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
+
+ INTC_IRQ(SINT8, 94), INTC_IRQ(SINT7, 95),
+ INTC_IRQ(SINT6, 96), INTC_IRQ(SINT5, 97),
+ INTC_IRQ(SINT4, 98), INTC_IRQ(SINT3, 99),
+ INTC_IRQ(SINT2, 100), INTC_IRQ(SINT1, 101),
+
+ INTC_IRQ(SCIF0, 220), INTC_IRQ(SCIF0, 221),
+ INTC_IRQ(SCIF0, 222), INTC_IRQ(SCIF0, 223),
+ INTC_IRQ(SCIF1, 224), INTC_IRQ(SCIF1, 225),
+ INTC_IRQ(SCIF1, 226), INTC_IRQ(SCIF1, 227),
+
+ INTC_IRQ(MTU2_GROUP1, 228), INTC_IRQ(MTU2_GROUP1, 229),
+ INTC_IRQ(MTU2_GROUP1, 230), INTC_IRQ(MTU2_GROUP1, 231),
+ INTC_IRQ(MTU2_GROUP1, 232), INTC_IRQ(MTU2_GROUP1, 233),
+
+ INTC_IRQ(MTU2_GROUP2, 234), INTC_IRQ(MTU2_GROUP2, 235),
+ INTC_IRQ(MTU2_GROUP2, 236), INTC_IRQ(MTU2_GROUP2, 237),
+ INTC_IRQ(MTU2_GROUP2, 238), INTC_IRQ(MTU2_GROUP2, 239),
+
+ INTC_IRQ(MTU2_GROUP3, 240), INTC_IRQ(MTU2_GROUP3, 241),
+ INTC_IRQ(MTU2_GROUP3, 242), INTC_IRQ(MTU2_GROUP3, 243),
+
+ INTC_IRQ(MTU2_TGI3B, 244),
+ INTC_IRQ(MTU2_TGI3C, 245),
+
+ INTC_IRQ(MTU2_GROUP4, 246), INTC_IRQ(MTU2_GROUP4, 247),
+ INTC_IRQ(MTU2_GROUP4, 248), INTC_IRQ(MTU2_GROUP4, 249),
+ INTC_IRQ(MTU2_GROUP4, 250), INTC_IRQ(MTU2_GROUP4, 251),
+
+ INTC_IRQ(MTU2_GROUP5, 252), INTC_IRQ(MTU2_GROUP5, 253),
+ INTC_IRQ(MTU2_GROUP5, 254), INTC_IRQ(MTU2_GROUP5, 255),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
+ PINT4, PINT5, PINT6, PINT7),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffd9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfffd941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfffd941c, 0, 16, 4, /* IPR03 */ { IRQ8, IRQ9, IRQ10, IRQ11 } },
+ { 0xfffd941e, 0, 16, 4, /* IPR04 */ { IRQ12, IRQ13, IRQ14, IRQ15 } },
+ { 0xfffd9420, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
+ { 0xfffd9800, 0, 16, 4, /* IPR06 */ { } },
+ { 0xfffd9802, 0, 16, 4, /* IPR07 */ { } },
+ { 0xfffd9804, 0, 16, 4, /* IPR08 */ { } },
+ { 0xfffd9806, 0, 16, 4, /* IPR09 */ { } },
+ { 0xfffd9808, 0, 16, 4, /* IPR10 */ { } },
+ { 0xfffd980a, 0, 16, 4, /* IPR11 */ { } },
+ { 0xfffd980c, 0, 16, 4, /* IPR12 */ { } },
+ { 0xfffd980e, 0, 16, 4, /* IPR13 */ { } },
+ { 0xfffd9810, 0, 16, 4, /* IPR14 */ { 0, 0, 0, SCIF0 } },
+ { 0xfffd9812, 0, 16, 4, /* IPR15 */
+ { SCIF1, MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3 } },
+ { 0xfffd9814, 0, 16, 4, /* IPR16 */
+ { MTU2_TGI3B, MTU2_TGI3C, MTU2_GROUP4, MTU2_GROUP5 } },
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfffd9408, 0, 16, /* PINTER */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "mxg", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct resource mtu2_resources[] = {
+ DEFINE_RES_MEM(0xff801000, 0x400),
+ DEFINE_RES_IRQ_NAMED(228, "tgi0a"),
+ DEFINE_RES_IRQ_NAMED(234, "tgi1a"),
+ DEFINE_RES_IRQ_NAMED(240, "tgi2a"),
+};
+
+static struct platform_device mtu2_device = {
+ .name = "sh-mtu2",
+ .id = -1,
+ .resource = mtu2_resources,
+ .num_resources = ARRAY_SIZE(mtu2_resources),
+};
+
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xff804000, 0x100),
+ DEFINE_RES_IRQ(220),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct platform_device *mxg_devices[] __initdata = {
+ &scif0_device,
+ &mtu2_device,
+};
+
+static int __init mxg_devices_setup(void)
+{
+ return platform_add_devices(mxg_devices,
+ ARRAY_SIZE(mxg_devices));
+}
+arch_initcall(mxg_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct platform_device *mxg_early_devices[] __initdata = {
+ &scif0_device,
+ &mtu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(mxg_early_devices,
+ ARRAY_SIZE(mxg_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
new file mode 100644
index 00000000000..abc0ce9fb80
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c
@@ -0,0 +1,428 @@
+/*
+ * SH7201 setup
+ *
+ * Copyright (C) 2008 Peter Griffin pgriffin@mpc-data.co.uk
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
+
+ ADC_ADI,
+
+ MTU20_ABCD, MTU20_VEF, MTU21_AB, MTU21_VU, MTU22_AB, MTU22_VU,
+ MTU23_ABCD, MTU24_ABCD, MTU25_UVW, MTU2_TCI3V, MTU2_TCI4V,
+
+ RTC, WDT,
+
+ IIC30, IIC31, IIC32,
+
+ DMAC0_DMINT0, DMAC1_DMINT1,
+ DMAC2_DMINT2, DMAC3_DMINT3,
+
+ SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7,
+
+ DMAC0_DMINTA, DMAC4_DMINT4, DMAC5_DMINT5, DMAC6_DMINT6,
+ DMAC7_DMINT7,
+
+ RCAN0, RCAN1,
+
+ SSI0_SSII, SSI1_SSII,
+
+ TMR0, TMR1,
+
+ /* interrupt groups */
+ PINT,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
+ INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
+
+ INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
+ INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
+ INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
+ INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
+
+ INTC_IRQ(ADC_ADI, 92),
+
+ INTC_IRQ(MTU20_ABCD, 108), INTC_IRQ(MTU20_ABCD, 109),
+ INTC_IRQ(MTU20_ABCD, 110), INTC_IRQ(MTU20_ABCD, 111),
+
+ INTC_IRQ(MTU20_VEF, 112), INTC_IRQ(MTU20_VEF, 113),
+ INTC_IRQ(MTU20_VEF, 114),
+
+ INTC_IRQ(MTU21_AB, 116), INTC_IRQ(MTU21_AB, 117),
+ INTC_IRQ(MTU21_VU, 120), INTC_IRQ(MTU21_VU, 121),
+
+ INTC_IRQ(MTU22_AB, 124), INTC_IRQ(MTU22_AB, 125),
+ INTC_IRQ(MTU22_VU, 128), INTC_IRQ(MTU22_VU, 129),
+
+ INTC_IRQ(MTU23_ABCD, 132), INTC_IRQ(MTU23_ABCD, 133),
+ INTC_IRQ(MTU23_ABCD, 134), INTC_IRQ(MTU23_ABCD, 135),
+
+ INTC_IRQ(MTU2_TCI3V, 136),
+
+ INTC_IRQ(MTU24_ABCD, 140), INTC_IRQ(MTU24_ABCD, 141),
+ INTC_IRQ(MTU24_ABCD, 142), INTC_IRQ(MTU24_ABCD, 143),
+
+ INTC_IRQ(MTU2_TCI4V, 144),
+
+ INTC_IRQ(MTU25_UVW, 148), INTC_IRQ(MTU25_UVW, 149),
+ INTC_IRQ(MTU25_UVW, 150),
+
+ INTC_IRQ(RTC, 152), INTC_IRQ(RTC, 153),
+ INTC_IRQ(RTC, 154),
+
+ INTC_IRQ(WDT, 156),
+
+ INTC_IRQ(IIC30, 157), INTC_IRQ(IIC30, 158),
+ INTC_IRQ(IIC30, 159), INTC_IRQ(IIC30, 160),
+ INTC_IRQ(IIC30, 161),
+
+ INTC_IRQ(IIC31, 164), INTC_IRQ(IIC31, 165),
+ INTC_IRQ(IIC31, 166), INTC_IRQ(IIC31, 167),
+ INTC_IRQ(IIC31, 168),
+
+ INTC_IRQ(IIC32, 170), INTC_IRQ(IIC32, 171),
+ INTC_IRQ(IIC32, 172), INTC_IRQ(IIC32, 173),
+ INTC_IRQ(IIC32, 174),
+
+ INTC_IRQ(DMAC0_DMINT0, 176), INTC_IRQ(DMAC1_DMINT1, 177),
+ INTC_IRQ(DMAC2_DMINT2, 178), INTC_IRQ(DMAC3_DMINT3, 179),
+
+ INTC_IRQ(SCIF0, 180), INTC_IRQ(SCIF0, 181),
+ INTC_IRQ(SCIF0, 182), INTC_IRQ(SCIF0, 183),
+ INTC_IRQ(SCIF1, 184), INTC_IRQ(SCIF1, 185),
+ INTC_IRQ(SCIF1, 186), INTC_IRQ(SCIF1, 187),
+ INTC_IRQ(SCIF2, 188), INTC_IRQ(SCIF2, 189),
+ INTC_IRQ(SCIF2, 190), INTC_IRQ(SCIF2, 191),
+ INTC_IRQ(SCIF3, 192), INTC_IRQ(SCIF3, 193),
+ INTC_IRQ(SCIF3, 194), INTC_IRQ(SCIF3, 195),
+ INTC_IRQ(SCIF4, 196), INTC_IRQ(SCIF4, 197),
+ INTC_IRQ(SCIF4, 198), INTC_IRQ(SCIF4, 199),
+ INTC_IRQ(SCIF5, 200), INTC_IRQ(SCIF5, 201),
+ INTC_IRQ(SCIF5, 202), INTC_IRQ(SCIF5, 203),
+ INTC_IRQ(SCIF6, 204), INTC_IRQ(SCIF6, 205),
+ INTC_IRQ(SCIF6, 206), INTC_IRQ(SCIF6, 207),
+ INTC_IRQ(SCIF7, 208), INTC_IRQ(SCIF7, 209),
+ INTC_IRQ(SCIF7, 210), INTC_IRQ(SCIF7, 211),
+
+ INTC_IRQ(DMAC0_DMINTA, 212), INTC_IRQ(DMAC4_DMINT4, 216),
+ INTC_IRQ(DMAC5_DMINT5, 217), INTC_IRQ(DMAC6_DMINT6, 218),
+ INTC_IRQ(DMAC7_DMINT7, 219),
+
+ INTC_IRQ(RCAN0, 228), INTC_IRQ(RCAN0, 229),
+ INTC_IRQ(RCAN0, 230),
+ INTC_IRQ(RCAN0, 231), INTC_IRQ(RCAN0, 232),
+
+ INTC_IRQ(RCAN1, 234), INTC_IRQ(RCAN1, 235),
+ INTC_IRQ(RCAN1, 236),
+ INTC_IRQ(RCAN1, 237), INTC_IRQ(RCAN1, 238),
+
+ INTC_IRQ(SSI0_SSII, 244), INTC_IRQ(SSI1_SSII, 245),
+
+ INTC_IRQ(TMR0, 246), INTC_IRQ(TMR0, 247),
+ INTC_IRQ(TMR0, 248),
+
+ INTC_IRQ(TMR1, 252), INTC_IRQ(TMR1, 253),
+ INTC_IRQ(TMR1, 254),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
+ PINT4, PINT5, PINT6, PINT7),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffe9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfffe941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfffe9420, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI, 0 } },
+ { 0xfffe9800, 0, 16, 4, /* IPR06 */ { 0, MTU20_ABCD, MTU20_VEF, MTU21_AB } },
+ { 0xfffe9802, 0, 16, 4, /* IPR07 */ { MTU21_VU, MTU22_AB, MTU22_VU, MTU23_ABCD } },
+ { 0xfffe9804, 0, 16, 4, /* IPR08 */ { MTU2_TCI3V, MTU24_ABCD, MTU2_TCI4V, MTU25_UVW } },
+
+ { 0xfffe9806, 0, 16, 4, /* IPR09 */ { RTC, WDT, IIC30, 0 } },
+ { 0xfffe9808, 0, 16, 4, /* IPR10 */ { IIC31, IIC32, DMAC0_DMINT0, DMAC1_DMINT1 } },
+ { 0xfffe980a, 0, 16, 4, /* IPR11 */ { DMAC2_DMINT2, DMAC3_DMINT3, SCIF0, SCIF1 } },
+ { 0xfffe980c, 0, 16, 4, /* IPR12 */ { SCIF2, SCIF3, SCIF4, SCIF5 } },
+ { 0xfffe980e, 0, 16, 4, /* IPR13 */ { SCIF6, SCIF7, DMAC0_DMINTA, DMAC4_DMINT4 } },
+ { 0xfffe9810, 0, 16, 4, /* IPR14 */ { DMAC5_DMINT5, DMAC6_DMINT6, DMAC7_DMINT7, 0 } },
+ { 0xfffe9812, 0, 16, 4, /* IPR15 */ { 0, RCAN0, RCAN1, 0 } },
+ { 0xfffe9814, 0, 16, 4, /* IPR16 */ { SSI0_SSII, SSI1_SSII, TMR0, TMR1 } },
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfffe9408, 0, 16, /* PINTER */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xfffe8000, 0x100),
+ DEFINE_RES_IRQ(180),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xfffe8800, 0x100),
+ DEFINE_RES_IRQ(184),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xfffe9000, 0x100),
+ DEFINE_RES_IRQ(188),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xfffe9800, 0x100),
+ DEFINE_RES_IRQ(192),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif4_resources[] = {
+ DEFINE_RES_MEM(0xfffea000, 0x100),
+ DEFINE_RES_IRQ(196),
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .resource = scif4_resources,
+ .num_resources = ARRAY_SIZE(scif4_resources),
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif5_resources[] = {
+ DEFINE_RES_MEM(0xfffea800, 0x100),
+ DEFINE_RES_IRQ(200),
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .resource = scif5_resources,
+ .num_resources = ARRAY_SIZE(scif5_resources),
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct plat_sci_port scif6_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif6_resources[] = {
+ DEFINE_RES_MEM(0xfffeb000, 0x100),
+ DEFINE_RES_IRQ(204),
+};
+
+static struct platform_device scif6_device = {
+ .name = "sh-sci",
+ .id = 6,
+ .resource = scif6_resources,
+ .num_resources = ARRAY_SIZE(scif6_resources),
+ .dev = {
+ .platform_data = &scif6_platform_data,
+ },
+};
+
+static struct plat_sci_port scif7_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif7_resources[] = {
+ DEFINE_RES_MEM(0xfffeb800, 0x100),
+ DEFINE_RES_IRQ(208),
+};
+
+static struct platform_device scif7_device = {
+ .name = "sh-sci",
+ .id = 7,
+ .resource = scif7_resources,
+ .num_resources = ARRAY_SIZE(scif7_resources),
+ .dev = {
+ .platform_data = &scif7_platform_data,
+ },
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffff0800,
+ .end = 0xffff2000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = 152,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct resource mtu2_resources[] = {
+ DEFINE_RES_MEM(0xfffe4000, 0x400),
+ DEFINE_RES_IRQ_NAMED(108, "tgi0a"),
+ DEFINE_RES_IRQ_NAMED(116, "tgi1a"),
+ DEFINE_RES_IRQ_NAMED(124, "tgi1b"),
+};
+
+static struct platform_device mtu2_device = {
+ .name = "sh-mtu2",
+ .id = -1,
+ .resource = mtu2_resources,
+ .num_resources = ARRAY_SIZE(mtu2_resources),
+};
+
+static struct platform_device *sh7201_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &rtc_device,
+ &mtu2_device,
+};
+
+static int __init sh7201_devices_setup(void)
+{
+ return platform_add_devices(sh7201_devices,
+ ARRAY_SIZE(sh7201_devices));
+}
+arch_initcall(sh7201_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct platform_device *sh7201_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &mtu2_device,
+};
+
+#define STBCR3 0xfffe0408
+
+void __init plat_early_device_setup(void)
+{
+ /* enable MTU2 clock */
+ __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
+
+ early_platform_add_devices(sh7201_early_devices,
+ ARRAY_SIZE(sh7201_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
index e98dc445035..3b4894cba92 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c
@@ -1,7 +1,7 @@
/*
* SH7203 and SH7263 Setup
*
- * Copyright (C) 2007 Paul Mundt
+ * Copyright (C) 2007 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -11,6 +11,8 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
enum {
UNUSED = 0,
@@ -18,50 +20,27 @@ enum {
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
- DMAC0_DEI, DMAC0_HEI, DMAC1_DEI, DMAC1_HEI,
- DMAC2_DEI, DMAC2_HEI, DMAC3_DEI, DMAC3_HEI,
- DMAC4_DEI, DMAC4_HEI, DMAC5_DEI, DMAC5_HEI,
- DMAC6_DEI, DMAC6_HEI, DMAC7_DEI, DMAC7_HEI,
+ DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
USB, LCDC, CMT0, CMT1, BSC, WDT,
- MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D,
- MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F,
- MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U,
- MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U,
- MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V,
- MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V,
- ADC_ADI,
- IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI, IIC30_TEI,
- IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI, IIC31_TEI,
- IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI, IIC32_TEI,
- IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI, IIC33_TEI,
- SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
- SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
- SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
- SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
- SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI,
- SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI,
- SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII,
- /* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */
- ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG, ROMDEC_ISEC, ROMDEC_IBUF,
- ROMDEC_IREADY,
+ MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
+ MTU3_ABCD, MTU4_ABCD, MTU2_TCI3V, MTU2_TCI4V,
- FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
+ ADC_ADI,
- SDHI3, SDHI0, SDHI1,
+ IIC30, IIC31, IIC32, IIC33,
+ SCIF0, SCIF1, SCIF2, SCIF3,
- RTC_ARM, RTC_PRD, RTC_CUP,
- RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1, RCAN0_SLE,
- RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1, RCAN1_SLE,
+ SSU0, SSU1,
- SRC_OVF, SRC_ODFI, SRC_IDEI, IEBI,
+ SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII,
+
+ /* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */
+ ROMDEC, FLCTL, SDHI, RTC, RCAN0, RCAN1,
+ SRC, IEBI,
/* interrupt groups */
- PINT, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
- MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
- MTU3_ABCD, MTU4_ABCD,
- IIC30, IIC31, IIC32, IIC33, SCIF0, SCIF1, SCIF2, SCIF3,
- SSU0, SSU1, ROMDEC, SDHI, FLCTL, RTC, RCAN0, RCAN1, SRC
+ PINT,
};
static struct intc_vect vectors[] __initdata = {
@@ -73,79 +52,80 @@ static struct intc_vect vectors[] __initdata = {
INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
- INTC_IRQ(DMAC0_DEI, 108), INTC_IRQ(DMAC0_HEI, 109),
- INTC_IRQ(DMAC1_DEI, 112), INTC_IRQ(DMAC1_HEI, 113),
- INTC_IRQ(DMAC2_DEI, 116), INTC_IRQ(DMAC2_HEI, 117),
- INTC_IRQ(DMAC3_DEI, 120), INTC_IRQ(DMAC3_HEI, 121),
- INTC_IRQ(DMAC4_DEI, 124), INTC_IRQ(DMAC4_HEI, 125),
- INTC_IRQ(DMAC5_DEI, 128), INTC_IRQ(DMAC5_HEI, 129),
- INTC_IRQ(DMAC6_DEI, 132), INTC_IRQ(DMAC6_HEI, 133),
- INTC_IRQ(DMAC7_DEI, 136), INTC_IRQ(DMAC7_HEI, 137),
+ INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
+ INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
+ INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
+ INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
+ INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
+ INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
+ INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
+ INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141),
INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143),
INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145),
- INTC_IRQ(MTU2_TGI0A, 146), INTC_IRQ(MTU2_TGI0B, 147),
- INTC_IRQ(MTU2_TGI0C, 148), INTC_IRQ(MTU2_TGI0D, 149),
- INTC_IRQ(MTU2_TCI0V, 150),
- INTC_IRQ(MTU2_TGI0E, 151), INTC_IRQ(MTU2_TGI0F, 152),
- INTC_IRQ(MTU2_TGI1A, 153), INTC_IRQ(MTU2_TGI1B, 154),
- INTC_IRQ(MTU2_TCI1V, 155), INTC_IRQ(MTU2_TCI1U, 156),
- INTC_IRQ(MTU2_TGI2A, 157), INTC_IRQ(MTU2_TGI2B, 158),
- INTC_IRQ(MTU2_TCI2V, 159), INTC_IRQ(MTU2_TCI2U, 160),
- INTC_IRQ(MTU2_TGI3A, 161), INTC_IRQ(MTU2_TGI3B, 162),
- INTC_IRQ(MTU2_TGI3C, 163), INTC_IRQ(MTU2_TGI3D, 164),
+ INTC_IRQ(MTU0_ABCD, 146), INTC_IRQ(MTU0_ABCD, 147),
+ INTC_IRQ(MTU0_ABCD, 148), INTC_IRQ(MTU0_ABCD, 149),
+ INTC_IRQ(MTU0_VEF, 150),
+ INTC_IRQ(MTU0_VEF, 151), INTC_IRQ(MTU0_VEF, 152),
+ INTC_IRQ(MTU1_AB, 153), INTC_IRQ(MTU1_AB, 154),
+ INTC_IRQ(MTU1_VU, 155), INTC_IRQ(MTU1_VU, 156),
+ INTC_IRQ(MTU2_AB, 157), INTC_IRQ(MTU2_AB, 158),
+ INTC_IRQ(MTU2_VU, 159), INTC_IRQ(MTU2_VU, 160),
+ INTC_IRQ(MTU3_ABCD, 161), INTC_IRQ(MTU3_ABCD, 162),
+ INTC_IRQ(MTU3_ABCD, 163), INTC_IRQ(MTU3_ABCD, 164),
INTC_IRQ(MTU2_TCI3V, 165),
- INTC_IRQ(MTU2_TGI4A, 166), INTC_IRQ(MTU2_TGI4B, 167),
- INTC_IRQ(MTU2_TGI4C, 168), INTC_IRQ(MTU2_TGI4D, 169),
+ INTC_IRQ(MTU4_ABCD, 166), INTC_IRQ(MTU4_ABCD, 167),
+ INTC_IRQ(MTU4_ABCD, 168), INTC_IRQ(MTU4_ABCD, 169),
INTC_IRQ(MTU2_TCI4V, 170),
INTC_IRQ(ADC_ADI, 171),
- INTC_IRQ(IIC30_STPI, 172), INTC_IRQ(IIC30_NAKI, 173),
- INTC_IRQ(IIC30_RXI, 174), INTC_IRQ(IIC30_TXI, 175),
- INTC_IRQ(IIC30_TEI, 176),
- INTC_IRQ(IIC31_STPI, 177), INTC_IRQ(IIC31_NAKI, 178),
- INTC_IRQ(IIC31_RXI, 179), INTC_IRQ(IIC31_TXI, 180),
- INTC_IRQ(IIC31_TEI, 181),
- INTC_IRQ(IIC32_STPI, 182), INTC_IRQ(IIC32_NAKI, 183),
- INTC_IRQ(IIC32_RXI, 184), INTC_IRQ(IIC32_TXI, 185),
- INTC_IRQ(IIC32_TEI, 186),
- INTC_IRQ(IIC33_STPI, 187), INTC_IRQ(IIC33_NAKI, 188),
- INTC_IRQ(IIC33_RXI, 189), INTC_IRQ(IIC33_TXI, 190),
- INTC_IRQ(IIC33_TEI, 191),
- INTC_IRQ(SCIF0_BRI, 192), INTC_IRQ(SCIF0_ERI, 193),
- INTC_IRQ(SCIF0_RXI, 194), INTC_IRQ(SCIF0_TXI, 195),
- INTC_IRQ(SCIF1_BRI, 196), INTC_IRQ(SCIF1_ERI, 197),
- INTC_IRQ(SCIF1_RXI, 198), INTC_IRQ(SCIF1_TXI, 199),
- INTC_IRQ(SCIF2_BRI, 200), INTC_IRQ(SCIF2_ERI, 201),
- INTC_IRQ(SCIF2_RXI, 202), INTC_IRQ(SCIF2_TXI, 203),
- INTC_IRQ(SCIF3_BRI, 204), INTC_IRQ(SCIF3_ERI, 205),
- INTC_IRQ(SCIF3_RXI, 206), INTC_IRQ(SCIF3_TXI, 207),
- INTC_IRQ(SSU0_SSERI, 208), INTC_IRQ(SSU0_SSRXI, 209),
- INTC_IRQ(SSU0_SSTXI, 210),
- INTC_IRQ(SSU1_SSERI, 211), INTC_IRQ(SSU1_SSRXI, 212),
- INTC_IRQ(SSU1_SSTXI, 213),
+ INTC_IRQ(IIC30, 172), INTC_IRQ(IIC30, 173),
+ INTC_IRQ(IIC30, 174), INTC_IRQ(IIC30, 175),
+ INTC_IRQ(IIC30, 176),
+ INTC_IRQ(IIC31, 177), INTC_IRQ(IIC31, 178),
+ INTC_IRQ(IIC31, 179), INTC_IRQ(IIC31, 180),
+ INTC_IRQ(IIC31, 181),
+ INTC_IRQ(IIC32, 182), INTC_IRQ(IIC32, 183),
+ INTC_IRQ(IIC32, 184), INTC_IRQ(IIC32, 185),
+ INTC_IRQ(IIC32, 186),
+ INTC_IRQ(IIC33, 187), INTC_IRQ(IIC33, 188),
+ INTC_IRQ(IIC33, 189), INTC_IRQ(IIC33, 190),
+ INTC_IRQ(IIC33, 191),
+ INTC_IRQ(SCIF0, 192), INTC_IRQ(SCIF0, 193),
+ INTC_IRQ(SCIF0, 194), INTC_IRQ(SCIF0, 195),
+ INTC_IRQ(SCIF1, 196), INTC_IRQ(SCIF1, 197),
+ INTC_IRQ(SCIF1, 198), INTC_IRQ(SCIF1, 199),
+ INTC_IRQ(SCIF2, 200), INTC_IRQ(SCIF2, 201),
+ INTC_IRQ(SCIF2, 202), INTC_IRQ(SCIF2, 203),
+ INTC_IRQ(SCIF3, 204), INTC_IRQ(SCIF3, 205),
+ INTC_IRQ(SCIF3, 206), INTC_IRQ(SCIF3, 207),
+ INTC_IRQ(SSU0, 208), INTC_IRQ(SSU0, 209),
+ INTC_IRQ(SSU0, 210),
+ INTC_IRQ(SSU1, 211), INTC_IRQ(SSU1, 212),
+ INTC_IRQ(SSU1, 213),
INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215),
INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217),
- INTC_IRQ(FLCTL_FLSTEI, 224), INTC_IRQ(FLCTL_FLTENDI, 225),
- INTC_IRQ(FLCTL_FLTREQ0I, 226), INTC_IRQ(FLCTL_FLTREQ1I, 227),
- INTC_IRQ(RTC_ARM, 231), INTC_IRQ(RTC_PRD, 232),
- INTC_IRQ(RTC_CUP, 233),
- INTC_IRQ(RCAN0_ERS, 234), INTC_IRQ(RCAN0_OVR, 235),
- INTC_IRQ(RCAN0_RM0, 236), INTC_IRQ(RCAN0_RM1, 237),
- INTC_IRQ(RCAN0_SLE, 238),
- INTC_IRQ(RCAN1_ERS, 239), INTC_IRQ(RCAN1_OVR, 240),
- INTC_IRQ(RCAN1_RM0, 241), INTC_IRQ(RCAN1_RM1, 242),
- INTC_IRQ(RCAN1_SLE, 243),
+ INTC_IRQ(FLCTL, 224), INTC_IRQ(FLCTL, 225),
+ INTC_IRQ(FLCTL, 226), INTC_IRQ(FLCTL, 227),
+ INTC_IRQ(RTC, 231), INTC_IRQ(RTC, 232),
+ INTC_IRQ(RTC, 233),
+ INTC_IRQ(RCAN0, 234), INTC_IRQ(RCAN0, 235),
+ INTC_IRQ(RCAN0, 236), INTC_IRQ(RCAN0, 237),
+ INTC_IRQ(RCAN0, 238),
+ INTC_IRQ(RCAN1, 239), INTC_IRQ(RCAN1, 240),
+ INTC_IRQ(RCAN1, 241), INTC_IRQ(RCAN1, 242),
+ INTC_IRQ(RCAN1, 243),
/* SH7263-specific trash */
#ifdef CONFIG_CPU_SUBTYPE_SH7263
- INTC_IRQ(ROMDEC_ISY, 218), INTC_IRQ(ROMDEC_IERR, 219),
- INTC_IRQ(ROMDEC_IARG, 220), INTC_IRQ(ROMDEC_ISEC, 221),
- INTC_IRQ(ROMDEC_IBUF, 222), INTC_IRQ(ROMDEC_IREADY, 223),
+ INTC_IRQ(ROMDEC, 218), INTC_IRQ(ROMDEC, 219),
+ INTC_IRQ(ROMDEC, 220), INTC_IRQ(ROMDEC, 221),
+ INTC_IRQ(ROMDEC, 222), INTC_IRQ(ROMDEC, 223),
- INTC_IRQ(SDHI3, 228), INTC_IRQ(SDHI0, 229), INTC_IRQ(SDHI1, 230),
+ INTC_IRQ(SDHI, 228), INTC_IRQ(SDHI, 229),
+ INTC_IRQ(SDHI, 230),
- INTC_IRQ(SRC_OVF, 244), INTC_IRQ(SRC_ODFI, 245),
- INTC_IRQ(SRC_IDEI, 246),
+ INTC_IRQ(SRC, 244), INTC_IRQ(SRC, 245),
+ INTC_IRQ(SRC, 246),
INTC_IRQ(IEBI, 247),
#endif
@@ -154,50 +134,6 @@ static struct intc_vect vectors[] __initdata = {
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
- INTC_GROUP(DMAC0, DMAC0_DEI, DMAC0_HEI),
- INTC_GROUP(DMAC1, DMAC1_DEI, DMAC1_HEI),
- INTC_GROUP(DMAC2, DMAC2_DEI, DMAC2_HEI),
- INTC_GROUP(DMAC3, DMAC3_DEI, DMAC3_HEI),
- INTC_GROUP(DMAC4, DMAC4_DEI, DMAC4_HEI),
- INTC_GROUP(DMAC5, DMAC5_DEI, DMAC5_HEI),
- INTC_GROUP(DMAC6, DMAC6_DEI, DMAC6_HEI),
- INTC_GROUP(DMAC7, DMAC7_DEI, DMAC7_HEI),
- INTC_GROUP(MTU0_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D),
- INTC_GROUP(MTU0_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F),
- INTC_GROUP(MTU1_AB, MTU2_TGI1A, MTU2_TGI1B),
- INTC_GROUP(MTU1_VU, MTU2_TCI1V, MTU2_TCI1U),
- INTC_GROUP(MTU2_AB, MTU2_TGI2A, MTU2_TGI2B),
- INTC_GROUP(MTU2_VU, MTU2_TCI2V, MTU2_TCI2U),
- INTC_GROUP(MTU3_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D),
- INTC_GROUP(MTU4_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D),
- INTC_GROUP(IIC30, IIC30_STPI, IIC30_NAKI, IIC30_RXI, IIC30_TXI,
- IIC30_TEI),
- INTC_GROUP(IIC31, IIC31_STPI, IIC31_NAKI, IIC31_RXI, IIC31_TXI,
- IIC31_TEI),
- INTC_GROUP(IIC32, IIC32_STPI, IIC32_NAKI, IIC32_RXI, IIC32_TXI,
- IIC32_TEI),
- INTC_GROUP(IIC33, IIC33_STPI, IIC33_NAKI, IIC33_RXI, IIC33_TXI,
- IIC33_TEI),
- INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
- INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
- INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
- INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
- INTC_GROUP(SSU0, SSU0_SSERI, SSU0_SSRXI, SSU0_SSTXI),
- INTC_GROUP(SSU1, SSU1_SSERI, SSU1_SSRXI, SSU1_SSTXI),
- INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I,
- FLCTL_FLTREQ1I),
- INTC_GROUP(RTC, RTC_ARM, RTC_PRD, RTC_CUP),
- INTC_GROUP(RCAN0, RCAN0_ERS, RCAN0_OVR, RCAN0_RM0, RCAN0_RM1,
- RCAN0_SLE),
- INTC_GROUP(RCAN1, RCAN1_ERS, RCAN1_OVR, RCAN1_RM0, RCAN1_RM1,
- RCAN1_SLE),
-
-#ifdef CONFIG_CPU_SUBTYPE_SH7263
- INTC_GROUP(ROMDEC, ROMDEC_ISY, ROMDEC_IERR, ROMDEC_IARG,
- ROMDEC_ISEC, ROMDEC_IBUF, ROMDEC_IREADY),
- INTC_GROUP(SDHI, SDHI3, SDHI0, SDHI1),
- INTC_GROUP(SRC, SRC_OVF, SRC_ODFI, SRC_IDEI),
-#endif
};
static struct intc_prio_reg prio_registers[] __initdata = {
@@ -237,40 +173,131 @@ static struct intc_mask_reg mask_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xfffe8000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 193, 194, 195, 192 },
- }, {
- .mapbase = 0xfffe8800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 197, 198, 199, 196 },
- }, {
- .mapbase = 0xfffe9000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 201, 202, 203, 200 },
- }, {
- .mapbase = 0xfffe9800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 205, 206, 207, 204 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xfffe8000, 0x100),
+ DEFINE_RES_IRQ(192),
};
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xfffe8800, 0x100),
+ DEFINE_RES_IRQ(196),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xfffe9000, 0x100),
+ DEFINE_RES_IRQ(200),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif2_platform_data,
},
};
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xfffe9800, 0x100),
+ DEFINE_RES_IRQ(204),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 3,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0xfffec000, 0x10),
+ DEFINE_RES_IRQ(142),
+ DEFINE_RES_IRQ(143),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-16",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct resource mtu2_resources[] = {
+ DEFINE_RES_MEM(0xfffe4000, 0x400),
+ DEFINE_RES_IRQ_NAMED(146, "tgi0a"),
+ DEFINE_RES_IRQ_NAMED(153, "tgi1a"),
+};
+
+static struct platform_device mtu2_device = {
+ .name = "sh-mtu2",
+ .id = -1,
+ .resource = mtu2_resources,
+ .num_resources = ARRAY_SIZE(mtu2_resources),
+};
+
static struct resource rtc_resources[] = {
[0] = {
.start = 0xffff2000,
@@ -278,17 +305,7 @@ static struct resource rtc_resources[] = {
.flags = IORESOURCE_IO,
},
[1] = {
- /* Period IRQ */
- .start = 232,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* Carry IRQ */
- .start = 233,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- /* Alarm IRQ */
+ /* Shared Period/Carry/Alarm IRQ */
.start = 231,
.flags = IORESOURCE_IRQ,
},
@@ -302,7 +319,12 @@ static struct platform_device rtc_device = {
};
static struct platform_device *sh7203_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt_device,
+ &mtu2_device,
&rtc_device,
};
@@ -311,9 +333,33 @@ static int __init sh7203_devices_setup(void)
return platform_add_devices(sh7203_devices,
ARRAY_SIZE(sh7203_devices));
}
-__initcall(sh7203_devices_setup);
+arch_initcall(sh7203_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
+
+static struct platform_device *sh7203_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt_device,
+ &mtu2_device,
+};
+
+#define STBCR3 0xfffe0408
+#define STBCR4 0xfffe040c
+
+void __init plat_early_device_setup(void)
+{
+ /* enable CMT clock */
+ __raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4);
+
+ /* enable MTU2 clock */
+ __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
+
+ early_platform_add_devices(sh7203_early_devices,
+ ARRAY_SIZE(sh7203_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
index e6d4ec445dd..49bc5a34bec 100644
--- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
@@ -2,6 +2,7 @@
* SH7206 Setup
*
* Copyright (C) 2006 Yoshinori Sato
+ * Copyright (C) 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -11,6 +12,8 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
enum {
UNUSED = 0,
@@ -19,34 +22,23 @@ enum {
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
ADC_ADI0, ADC_ADI1,
- DMAC0_DEI, DMAC0_HEI, DMAC1_DEI, DMAC1_HEI,
- DMAC2_DEI, DMAC2_HEI, DMAC3_DEI, DMAC3_HEI,
- DMAC4_DEI, DMAC4_HEI, DMAC5_DEI, DMAC5_HEI,
- DMAC6_DEI, DMAC6_HEI, DMAC7_DEI, DMAC7_HEI,
+
+ DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
+
+ MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
+ MTU3_ABCD, MTU4_ABCD, MTU5, POE2_12, MTU3S_ABCD, MTU4S_ABCD, MTU5S,
+ IIC3,
+
CMT0, CMT1, BSC, WDT,
- MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D,
- MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F,
- MTU2_TGI1A, MTU2_TGI1B, MTU2_TCI1V, MTU2_TCI1U,
- MTU2_TGI2A, MTU2_TGI2B, MTU2_TCI2V, MTU2_TCI2U,
- MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D, MTU2_TCI3V,
- MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D, MTU2_TCI4V,
- MTU2_TGI5U, MTU2_TGI5V, MTU2_TGI5W,
- POE2_OEI1, POE2_OEI2,
- MTU2S_TGI3A, MTU2S_TGI3B, MTU2S_TGI3C, MTU2S_TGI3D, MTU2S_TCI3V,
- MTU2S_TGI4A, MTU2S_TGI4B, MTU2S_TGI4C, MTU2S_TGI4D, MTU2S_TCI4V,
- MTU2S_TGI5U, MTU2S_TGI5V, MTU2S_TGI5W,
+
+ MTU2_TCI3V, MTU2_TCI4V, MTU2S_TCI3V, MTU2S_TCI4V,
+
POE2_OEI3,
- IIC3_STPI, IIC3_NAKI, IIC3_RXI, IIC3_TXI, IIC3_TEI,
- SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
- SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
- SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
- SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
+
+ SCIF0, SCIF1, SCIF2, SCIF3,
/* interrupt groups */
- PINT, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
- MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
- MTU3_ABCD, MTU4_ABCD, MTU5, POE2_12, MTU3S_ABCD, MTU4S_ABCD, MTU5S,
- IIC3, SCIF0, SCIF1, SCIF2, SCIF3,
+ PINT,
};
static struct intc_vect vectors[] __initdata = {
@@ -59,86 +51,58 @@ static struct intc_vect vectors[] __initdata = {
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
INTC_IRQ(ADC_ADI0, 92), INTC_IRQ(ADC_ADI1, 96),
- INTC_IRQ(DMAC0_DEI, 108), INTC_IRQ(DMAC0_HEI, 109),
- INTC_IRQ(DMAC1_DEI, 112), INTC_IRQ(DMAC1_HEI, 113),
- INTC_IRQ(DMAC2_DEI, 116), INTC_IRQ(DMAC2_HEI, 117),
- INTC_IRQ(DMAC3_DEI, 120), INTC_IRQ(DMAC3_HEI, 121),
- INTC_IRQ(DMAC4_DEI, 124), INTC_IRQ(DMAC4_HEI, 125),
- INTC_IRQ(DMAC5_DEI, 128), INTC_IRQ(DMAC5_HEI, 129),
- INTC_IRQ(DMAC6_DEI, 132), INTC_IRQ(DMAC6_HEI, 133),
- INTC_IRQ(DMAC7_DEI, 136), INTC_IRQ(DMAC7_HEI, 137),
+ INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
+ INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
+ INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
+ INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
+ INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
+ INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
+ INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
+ INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
INTC_IRQ(CMT0, 140), INTC_IRQ(CMT1, 144),
INTC_IRQ(BSC, 148), INTC_IRQ(WDT, 152),
- INTC_IRQ(MTU2_TGI0A, 156), INTC_IRQ(MTU2_TGI0B, 157),
- INTC_IRQ(MTU2_TGI0C, 158), INTC_IRQ(MTU2_TGI0D, 159),
- INTC_IRQ(MTU2_TCI0V, 160),
- INTC_IRQ(MTU2_TGI0E, 161), INTC_IRQ(MTU2_TGI0F, 162),
- INTC_IRQ(MTU2_TGI1A, 164), INTC_IRQ(MTU2_TGI1B, 165),
- INTC_IRQ(MTU2_TCI1V, 168), INTC_IRQ(MTU2_TCI1U, 169),
- INTC_IRQ(MTU2_TGI2A, 172), INTC_IRQ(MTU2_TGI2B, 173),
- INTC_IRQ(MTU2_TCI2V, 176), INTC_IRQ(MTU2_TCI2U, 177),
- INTC_IRQ(MTU2_TGI3A, 180), INTC_IRQ(MTU2_TGI3B, 181),
- INTC_IRQ(MTU2_TGI3C, 182), INTC_IRQ(MTU2_TGI3D, 183),
+ INTC_IRQ(MTU0_ABCD, 156), INTC_IRQ(MTU0_ABCD, 157),
+ INTC_IRQ(MTU0_ABCD, 158), INTC_IRQ(MTU0_ABCD, 159),
+ INTC_IRQ(MTU0_VEF, 160), INTC_IRQ(MTU0_VEF, 161),
+ INTC_IRQ(MTU0_VEF, 162),
+ INTC_IRQ(MTU1_AB, 164), INTC_IRQ(MTU1_AB, 165),
+ INTC_IRQ(MTU1_VU, 168), INTC_IRQ(MTU1_VU, 169),
+ INTC_IRQ(MTU2_AB, 172), INTC_IRQ(MTU2_AB, 173),
+ INTC_IRQ(MTU2_VU, 176), INTC_IRQ(MTU2_VU, 177),
+ INTC_IRQ(MTU3_ABCD, 180), INTC_IRQ(MTU3_ABCD, 181),
+ INTC_IRQ(MTU3_ABCD, 182), INTC_IRQ(MTU3_ABCD, 183),
INTC_IRQ(MTU2_TCI3V, 184),
- INTC_IRQ(MTU2_TGI4A, 188), INTC_IRQ(MTU2_TGI4B, 189),
- INTC_IRQ(MTU2_TGI4C, 190), INTC_IRQ(MTU2_TGI4D, 191),
+ INTC_IRQ(MTU4_ABCD, 188), INTC_IRQ(MTU4_ABCD, 189),
+ INTC_IRQ(MTU4_ABCD, 190), INTC_IRQ(MTU4_ABCD, 191),
INTC_IRQ(MTU2_TCI4V, 192),
- INTC_IRQ(MTU2_TGI5U, 196), INTC_IRQ(MTU2_TGI5V, 197),
- INTC_IRQ(MTU2_TGI5W, 198),
- INTC_IRQ(POE2_OEI1, 200), INTC_IRQ(POE2_OEI2, 201),
- INTC_IRQ(MTU2S_TGI3A, 204), INTC_IRQ(MTU2S_TGI3B, 205),
- INTC_IRQ(MTU2S_TGI3C, 206), INTC_IRQ(MTU2S_TGI3D, 207),
+ INTC_IRQ(MTU5, 196), INTC_IRQ(MTU5, 197),
+ INTC_IRQ(MTU5, 198),
+ INTC_IRQ(POE2_12, 200), INTC_IRQ(POE2_12, 201),
+ INTC_IRQ(MTU3S_ABCD, 204), INTC_IRQ(MTU3S_ABCD, 205),
+ INTC_IRQ(MTU3S_ABCD, 206), INTC_IRQ(MTU3S_ABCD, 207),
INTC_IRQ(MTU2S_TCI3V, 208),
- INTC_IRQ(MTU2S_TGI4A, 212), INTC_IRQ(MTU2S_TGI4B, 213),
- INTC_IRQ(MTU2S_TGI4C, 214), INTC_IRQ(MTU2S_TGI4D, 215),
+ INTC_IRQ(MTU4S_ABCD, 212), INTC_IRQ(MTU4S_ABCD, 213),
+ INTC_IRQ(MTU4S_ABCD, 214), INTC_IRQ(MTU4S_ABCD, 215),
INTC_IRQ(MTU2S_TCI4V, 216),
- INTC_IRQ(MTU2S_TGI5U, 220), INTC_IRQ(MTU2S_TGI5V, 221),
- INTC_IRQ(MTU2S_TGI5W, 222),
+ INTC_IRQ(MTU5S, 220), INTC_IRQ(MTU5S, 221),
+ INTC_IRQ(MTU5S, 222),
INTC_IRQ(POE2_OEI3, 224),
- INTC_IRQ(IIC3_STPI, 228), INTC_IRQ(IIC3_NAKI, 229),
- INTC_IRQ(IIC3_RXI, 230), INTC_IRQ(IIC3_TXI, 231),
- INTC_IRQ(IIC3_TEI, 232),
- INTC_IRQ(SCIF0_BRI, 240), INTC_IRQ(SCIF0_ERI, 241),
- INTC_IRQ(SCIF0_RXI, 242), INTC_IRQ(SCIF0_TXI, 243),
- INTC_IRQ(SCIF1_BRI, 244), INTC_IRQ(SCIF1_ERI, 245),
- INTC_IRQ(SCIF1_RXI, 246), INTC_IRQ(SCIF1_TXI, 247),
- INTC_IRQ(SCIF2_BRI, 248), INTC_IRQ(SCIF2_ERI, 249),
- INTC_IRQ(SCIF2_RXI, 250), INTC_IRQ(SCIF2_TXI, 251),
- INTC_IRQ(SCIF3_BRI, 252), INTC_IRQ(SCIF3_ERI, 253),
- INTC_IRQ(SCIF3_RXI, 254), INTC_IRQ(SCIF3_TXI, 255),
+ INTC_IRQ(IIC3, 228), INTC_IRQ(IIC3, 229),
+ INTC_IRQ(IIC3, 230), INTC_IRQ(IIC3, 231),
+ INTC_IRQ(IIC3, 232),
+ INTC_IRQ(SCIF0, 240), INTC_IRQ(SCIF0, 241),
+ INTC_IRQ(SCIF0, 242), INTC_IRQ(SCIF0, 243),
+ INTC_IRQ(SCIF1, 244), INTC_IRQ(SCIF1, 245),
+ INTC_IRQ(SCIF1, 246), INTC_IRQ(SCIF1, 247),
+ INTC_IRQ(SCIF2, 248), INTC_IRQ(SCIF2, 249),
+ INTC_IRQ(SCIF2, 250), INTC_IRQ(SCIF2, 251),
+ INTC_IRQ(SCIF3, 252), INTC_IRQ(SCIF3, 253),
+ INTC_IRQ(SCIF3, 254), INTC_IRQ(SCIF3, 255),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
- INTC_GROUP(DMAC0, DMAC0_DEI, DMAC0_HEI),
- INTC_GROUP(DMAC1, DMAC1_DEI, DMAC1_HEI),
- INTC_GROUP(DMAC2, DMAC2_DEI, DMAC2_HEI),
- INTC_GROUP(DMAC3, DMAC3_DEI, DMAC3_HEI),
- INTC_GROUP(DMAC4, DMAC4_DEI, DMAC4_HEI),
- INTC_GROUP(DMAC5, DMAC5_DEI, DMAC5_HEI),
- INTC_GROUP(DMAC6, DMAC6_DEI, DMAC6_HEI),
- INTC_GROUP(DMAC7, DMAC7_DEI, DMAC7_HEI),
- INTC_GROUP(MTU0_ABCD, MTU2_TGI0A, MTU2_TGI0B, MTU2_TGI0C, MTU2_TGI0D),
- INTC_GROUP(MTU0_VEF, MTU2_TCI0V, MTU2_TGI0E, MTU2_TGI0F),
- INTC_GROUP(MTU1_AB, MTU2_TGI1A, MTU2_TGI1B),
- INTC_GROUP(MTU1_VU, MTU2_TCI1V, MTU2_TCI1U),
- INTC_GROUP(MTU2_AB, MTU2_TGI2A, MTU2_TGI2B),
- INTC_GROUP(MTU2_VU, MTU2_TCI2V, MTU2_TCI2U),
- INTC_GROUP(MTU3_ABCD, MTU2_TGI3A, MTU2_TGI3B, MTU2_TGI3C, MTU2_TGI3D),
- INTC_GROUP(MTU4_ABCD, MTU2_TGI4A, MTU2_TGI4B, MTU2_TGI4C, MTU2_TGI4D),
- INTC_GROUP(MTU5, MTU2_TGI5U, MTU2_TGI5V, MTU2_TGI5W),
- INTC_GROUP(POE2_12, POE2_OEI1, POE2_OEI2),
- INTC_GROUP(MTU3S_ABCD, MTU2S_TGI3A, MTU2S_TGI3B,
- MTU2S_TGI3C, MTU2S_TGI3D),
- INTC_GROUP(MTU4S_ABCD, MTU2S_TGI4A, MTU2S_TGI4B,
- MTU2S_TGI4C, MTU2S_TGI4D),
- INTC_GROUP(MTU5S, MTU2S_TGI5U, MTU2S_TGI5V, MTU2S_TGI5W),
- INTC_GROUP(IIC3, IIC3_STPI, IIC3_NAKI, IIC3_RXI, IIC3_TXI, IIC3_TEI),
- INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
- INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
- INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
- INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
};
static struct intc_prio_reg prio_registers[] __initdata = {
@@ -169,42 +133,131 @@ static struct intc_mask_reg mask_registers[] __initdata = {
static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xfffe8000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 241, 242, 243, 240 },
- }, {
- .mapbase = 0xfffe8800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 245, 246, 247, 244 },
- }, {
- .mapbase = 0xfffe9000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 249, 250, 251, 248 },
- }, {
- .mapbase = 0xfffe9800,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 253, 254, 255, 252 },
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xfffe8000, 0x100),
+ DEFINE_RES_IRQ(240),
+};
+
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xfffe8800, 0x100),
+ DEFINE_RES_IRQ(244),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xfffe9000, 0x100),
+ DEFINE_RES_IRQ(248),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xfffe9800, 0x100),
+ DEFINE_RES_IRQ(252),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif3_platform_data,
},
};
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 3,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0xfffec000, 0x10),
+ DEFINE_RES_IRQ(140),
+ DEFINE_RES_IRQ(144),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-16",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct resource mtu2_resources[] = {
+ DEFINE_RES_MEM(0xfffe4000, 0x400),
+ DEFINE_RES_IRQ_NAMED(156, "tgi0a"),
+ DEFINE_RES_IRQ_NAMED(164, "tgi1a"),
+ DEFINE_RES_IRQ_NAMED(180, "tgi2a"),
+};
+
+static struct platform_device mtu2_device = {
+ .name = "sh-mtu2s",
+ .id = -1,
+ .resource = mtu2_resources,
+ .num_resources = ARRAY_SIZE(mtu2_resources),
+};
+
static struct platform_device *sh7206_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt_device,
+ &mtu2_device,
};
static int __init sh7206_devices_setup(void)
@@ -212,9 +265,33 @@ static int __init sh7206_devices_setup(void)
return platform_add_devices(sh7206_devices,
ARRAY_SIZE(sh7206_devices));
}
-__initcall(sh7206_devices_setup);
+arch_initcall(sh7206_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
+
+static struct platform_device *sh7206_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt_device,
+ &mtu2_device,
+};
+
+#define STBCR3 0xfffe0408
+#define STBCR4 0xfffe040c
+
+void __init plat_early_device_setup(void)
+{
+ /* enable CMT clock */
+ __raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4);
+
+ /* enable MTU2 clock */
+ __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
+
+ early_platform_add_devices(sh7206_early_devices,
+ ARRAY_SIZE(sh7206_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7264.c b/arch/sh/kernel/cpu/sh2a/setup-sh7264.c
new file mode 100644
index 00000000000..60814645556
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7264.c
@@ -0,0 +1,570 @@
+/*
+ * SH7264 Setup
+ *
+ * Copyright (C) 2012 Renesas Electronics Europe Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/usb/r8a66597.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
+
+ DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
+ DMAC8, DMAC9, DMAC10, DMAC11, DMAC12, DMAC13, DMAC14, DMAC15,
+ USB, VDC3, CMT0, CMT1, BSC, WDT,
+ MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
+ MTU3_ABCD, MTU3_TCI3V, MTU4_ABCD, MTU4_TCI4V,
+ PWMT1, PWMT2, ADC_ADI,
+ SSIF0, SSII1, SSII2, SSII3,
+ RSPDIF,
+ IIC30, IIC31, IIC32, IIC33,
+ SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
+ SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
+ SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
+ SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
+ SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI,
+ SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI,
+ SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI,
+ SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI,
+ SIO_FIFO, RSPIC0, RSPIC1,
+ RCAN0, RCAN1, IEBC, CD_ROMD,
+ NFMC, SDHI, RTC,
+ SRCC0, SRCC1, DCOMU, OFFI, IFEI,
+
+ /* interrupt groups */
+ PINT, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
+ INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
+
+ INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
+ INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
+ INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
+ INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
+
+ INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
+ INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
+ INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
+ INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
+ INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
+ INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
+ INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
+ INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
+ INTC_IRQ(DMAC8, 140), INTC_IRQ(DMAC8, 141),
+ INTC_IRQ(DMAC9, 144), INTC_IRQ(DMAC9, 145),
+ INTC_IRQ(DMAC10, 148), INTC_IRQ(DMAC10, 149),
+ INTC_IRQ(DMAC11, 152), INTC_IRQ(DMAC11, 153),
+ INTC_IRQ(DMAC12, 156), INTC_IRQ(DMAC12, 157),
+ INTC_IRQ(DMAC13, 160), INTC_IRQ(DMAC13, 161),
+ INTC_IRQ(DMAC14, 164), INTC_IRQ(DMAC14, 165),
+ INTC_IRQ(DMAC15, 168), INTC_IRQ(DMAC15, 169),
+
+ INTC_IRQ(USB, 170),
+ INTC_IRQ(VDC3, 171), INTC_IRQ(VDC3, 172),
+ INTC_IRQ(VDC3, 173), INTC_IRQ(VDC3, 174),
+ INTC_IRQ(CMT0, 175), INTC_IRQ(CMT1, 176),
+ INTC_IRQ(BSC, 177), INTC_IRQ(WDT, 178),
+
+ INTC_IRQ(MTU0_ABCD, 179), INTC_IRQ(MTU0_ABCD, 180),
+ INTC_IRQ(MTU0_ABCD, 181), INTC_IRQ(MTU0_ABCD, 182),
+ INTC_IRQ(MTU0_VEF, 183),
+ INTC_IRQ(MTU0_VEF, 184), INTC_IRQ(MTU0_VEF, 185),
+ INTC_IRQ(MTU1_AB, 186), INTC_IRQ(MTU1_AB, 187),
+ INTC_IRQ(MTU1_VU, 188), INTC_IRQ(MTU1_VU, 189),
+ INTC_IRQ(MTU2_AB, 190), INTC_IRQ(MTU2_AB, 191),
+ INTC_IRQ(MTU2_VU, 192), INTC_IRQ(MTU2_VU, 193),
+ INTC_IRQ(MTU3_ABCD, 194), INTC_IRQ(MTU3_ABCD, 195),
+ INTC_IRQ(MTU3_ABCD, 196), INTC_IRQ(MTU3_ABCD, 197),
+ INTC_IRQ(MTU3_TCI3V, 198),
+ INTC_IRQ(MTU4_ABCD, 199), INTC_IRQ(MTU4_ABCD, 200),
+ INTC_IRQ(MTU4_ABCD, 201), INTC_IRQ(MTU4_ABCD, 202),
+ INTC_IRQ(MTU4_TCI4V, 203),
+
+ INTC_IRQ(PWMT1, 204), INTC_IRQ(PWMT2, 205),
+
+ INTC_IRQ(ADC_ADI, 206),
+
+ INTC_IRQ(SSIF0, 207), INTC_IRQ(SSIF0, 208),
+ INTC_IRQ(SSIF0, 209),
+ INTC_IRQ(SSII1, 210), INTC_IRQ(SSII1, 211),
+ INTC_IRQ(SSII2, 212), INTC_IRQ(SSII2, 213),
+ INTC_IRQ(SSII3, 214), INTC_IRQ(SSII3, 215),
+
+ INTC_IRQ(RSPDIF, 216),
+
+ INTC_IRQ(IIC30, 217), INTC_IRQ(IIC30, 218),
+ INTC_IRQ(IIC30, 219), INTC_IRQ(IIC30, 220),
+ INTC_IRQ(IIC30, 221),
+ INTC_IRQ(IIC31, 222), INTC_IRQ(IIC31, 223),
+ INTC_IRQ(IIC31, 224), INTC_IRQ(IIC31, 225),
+ INTC_IRQ(IIC31, 226),
+ INTC_IRQ(IIC32, 227), INTC_IRQ(IIC32, 228),
+ INTC_IRQ(IIC32, 229), INTC_IRQ(IIC32, 230),
+ INTC_IRQ(IIC32, 231),
+
+ INTC_IRQ(SCIF0_BRI, 232), INTC_IRQ(SCIF0_ERI, 233),
+ INTC_IRQ(SCIF0_RXI, 234), INTC_IRQ(SCIF0_TXI, 235),
+ INTC_IRQ(SCIF1_BRI, 236), INTC_IRQ(SCIF1_ERI, 237),
+ INTC_IRQ(SCIF1_RXI, 238), INTC_IRQ(SCIF1_TXI, 239),
+ INTC_IRQ(SCIF2_BRI, 240), INTC_IRQ(SCIF2_ERI, 241),
+ INTC_IRQ(SCIF2_RXI, 242), INTC_IRQ(SCIF2_TXI, 243),
+ INTC_IRQ(SCIF3_BRI, 244), INTC_IRQ(SCIF3_ERI, 245),
+ INTC_IRQ(SCIF3_RXI, 246), INTC_IRQ(SCIF3_TXI, 247),
+ INTC_IRQ(SCIF4_BRI, 248), INTC_IRQ(SCIF4_ERI, 249),
+ INTC_IRQ(SCIF4_RXI, 250), INTC_IRQ(SCIF4_TXI, 251),
+ INTC_IRQ(SCIF5_BRI, 252), INTC_IRQ(SCIF5_ERI, 253),
+ INTC_IRQ(SCIF5_RXI, 254), INTC_IRQ(SCIF5_TXI, 255),
+ INTC_IRQ(SCIF6_BRI, 256), INTC_IRQ(SCIF6_ERI, 257),
+ INTC_IRQ(SCIF6_RXI, 258), INTC_IRQ(SCIF6_TXI, 259),
+ INTC_IRQ(SCIF7_BRI, 260), INTC_IRQ(SCIF7_ERI, 261),
+ INTC_IRQ(SCIF7_RXI, 262), INTC_IRQ(SCIF7_TXI, 263),
+
+ INTC_IRQ(SIO_FIFO, 264),
+
+ INTC_IRQ(RSPIC0, 265), INTC_IRQ(RSPIC0, 266),
+ INTC_IRQ(RSPIC0, 267),
+ INTC_IRQ(RSPIC1, 268), INTC_IRQ(RSPIC1, 269),
+ INTC_IRQ(RSPIC1, 270),
+
+ INTC_IRQ(RCAN0, 271), INTC_IRQ(RCAN0, 272),
+ INTC_IRQ(RCAN0, 273), INTC_IRQ(RCAN0, 274),
+ INTC_IRQ(RCAN0, 275),
+ INTC_IRQ(RCAN1, 276), INTC_IRQ(RCAN1, 277),
+ INTC_IRQ(RCAN1, 278), INTC_IRQ(RCAN1, 279),
+ INTC_IRQ(RCAN1, 280),
+
+ INTC_IRQ(IEBC, 281),
+
+ INTC_IRQ(CD_ROMD, 282), INTC_IRQ(CD_ROMD, 283),
+ INTC_IRQ(CD_ROMD, 284), INTC_IRQ(CD_ROMD, 285),
+ INTC_IRQ(CD_ROMD, 286), INTC_IRQ(CD_ROMD, 287),
+
+ INTC_IRQ(NFMC, 288), INTC_IRQ(NFMC, 289),
+ INTC_IRQ(NFMC, 290), INTC_IRQ(NFMC, 291),
+
+ INTC_IRQ(SDHI, 292), INTC_IRQ(SDHI, 293),
+ INTC_IRQ(SDHI, 294),
+
+ INTC_IRQ(RTC, 296), INTC_IRQ(RTC, 297),
+ INTC_IRQ(RTC, 298),
+
+ INTC_IRQ(SRCC0, 299), INTC_IRQ(SRCC0, 300),
+ INTC_IRQ(SRCC0, 301), INTC_IRQ(SRCC0, 302),
+ INTC_IRQ(SRCC0, 303),
+ INTC_IRQ(SRCC1, 304), INTC_IRQ(SRCC1, 305),
+ INTC_IRQ(SRCC1, 306), INTC_IRQ(SRCC1, 307),
+ INTC_IRQ(SRCC1, 308),
+
+ INTC_IRQ(DCOMU, 310), INTC_IRQ(DCOMU, 311),
+ INTC_IRQ(DCOMU, 312),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
+ PINT4, PINT5, PINT6, PINT7),
+ INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
+ INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
+ INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
+ INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
+ INTC_GROUP(SCIF4, SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI),
+ INTC_GROUP(SCIF5, SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI),
+ INTC_GROUP(SCIF6, SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI),
+ INTC_GROUP(SCIF7, SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
+ { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
+ { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
+ { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { DMAC8, DMAC9,
+ DMAC10, DMAC11 } },
+ { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { DMAC12, DMAC13,
+ DMAC14, DMAC15 } },
+ { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { USB, VDC3, CMT0, CMT1 } },
+ { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } },
+ { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { MTU1_AB, MTU1_VU,
+ MTU2_AB, MTU2_VU } },
+ { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU3_ABCD, MTU3_TCI3V,
+ MTU4_ABCD, MTU4_TCI4V } },
+ { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { PWMT1, PWMT2, ADC_ADI, 0 } },
+ { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSIF0, SSII1, SSII2, SSII3 } },
+ { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { RSPDIF, IIC30, IIC31, IIC32 } },
+ { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { SCIF0, SCIF1, SCIF2, SCIF3 } },
+ { 0xfffe0c18, 0, 16, 4, /* IPR18 */ { SCIF4, SCIF5, SCIF6, SCIF7 } },
+ { 0xfffe0c1a, 0, 16, 4, /* IPR19 */ { SIO_FIFO, 0, RSPIC0, RSPIC1, } },
+ { 0xfffe0c1c, 0, 16, 4, /* IPR20 */ { RCAN0, RCAN1, IEBC, CD_ROMD } },
+ { 0xfffe0c1e, 0, 16, 4, /* IPR21 */ { NFMC, SDHI, RTC, 0 } },
+ { 0xfffe0c20, 0, 16, 4, /* IPR22 */ { SRCC0, SRCC1, 0, DCOMU } },
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfffe0808, 0, 16, /* PINTER */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7264", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xfffe8000, 0x100),
+ DEFINE_RES_IRQ(233),
+ DEFINE_RES_IRQ(234),
+ DEFINE_RES_IRQ(235),
+ DEFINE_RES_IRQ(232),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xfffe8800, 0x100),
+ DEFINE_RES_IRQ(237),
+ DEFINE_RES_IRQ(238),
+ DEFINE_RES_IRQ(239),
+ DEFINE_RES_IRQ(236),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xfffe9000, 0x100),
+ DEFINE_RES_IRQ(241),
+ DEFINE_RES_IRQ(242),
+ DEFINE_RES_IRQ(243),
+ DEFINE_RES_IRQ(240),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xfffe9800, 0x100),
+ DEFINE_RES_IRQ(245),
+ DEFINE_RES_IRQ(246),
+ DEFINE_RES_IRQ(247),
+ DEFINE_RES_IRQ(244),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif4_resources[] = {
+ DEFINE_RES_MEM(0xfffea000, 0x100),
+ DEFINE_RES_IRQ(249),
+ DEFINE_RES_IRQ(250),
+ DEFINE_RES_IRQ(251),
+ DEFINE_RES_IRQ(248),
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .resource = scif4_resources,
+ .num_resources = ARRAY_SIZE(scif4_resources),
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif5_resources[] = {
+ DEFINE_RES_MEM(0xfffea800, 0x100),
+ DEFINE_RES_IRQ(253),
+ DEFINE_RES_IRQ(254),
+ DEFINE_RES_IRQ(255),
+ DEFINE_RES_IRQ(252),
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .resource = scif5_resources,
+ .num_resources = ARRAY_SIZE(scif5_resources),
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct plat_sci_port scif6_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif6_resources[] = {
+ DEFINE_RES_MEM(0xfffeb000, 0x100),
+ DEFINE_RES_IRQ(257),
+ DEFINE_RES_IRQ(258),
+ DEFINE_RES_IRQ(259),
+ DEFINE_RES_IRQ(256),
+};
+
+static struct platform_device scif6_device = {
+ .name = "sh-sci",
+ .id = 6,
+ .resource = scif6_resources,
+ .num_resources = ARRAY_SIZE(scif6_resources),
+ .dev = {
+ .platform_data = &scif6_platform_data,
+ },
+};
+
+static struct plat_sci_port scif7_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif7_resources[] = {
+ DEFINE_RES_MEM(0xfffeb800, 0x100),
+ DEFINE_RES_IRQ(261),
+ DEFINE_RES_IRQ(262),
+ DEFINE_RES_IRQ(263),
+ DEFINE_RES_IRQ(260),
+};
+
+static struct platform_device scif7_device = {
+ .name = "sh-sci",
+ .id = 7,
+ .resource = scif7_resources,
+ .num_resources = ARRAY_SIZE(scif7_resources),
+ .dev = {
+ .platform_data = &scif7_platform_data,
+ },
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 3,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0xfffec000, 0x10),
+ DEFINE_RES_IRQ(175),
+ DEFINE_RES_IRQ(176),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-16",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct resource mtu2_resources[] = {
+ DEFINE_RES_MEM(0xfffe4000, 0x400),
+ DEFINE_RES_IRQ_NAMED(179, "tgi0a"),
+ DEFINE_RES_IRQ_NAMED(186, "tgi1a"),
+};
+
+static struct platform_device mtu2_device = {
+ .name = "sh-mtu2",
+ .id = -1,
+ .resource = mtu2_resources,
+ .num_resources = ARRAY_SIZE(mtu2_resources),
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xfffe6000,
+ .end = 0xfffe6000 + 0x30 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = 296,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+/* USB Host */
+static void usb_port_power(int port, int power)
+{
+ __raw_writew(0x200 , 0xffffc0c2) ; /* Initialise UACS25 */
+}
+
+static struct r8a66597_platdata r8a66597_data = {
+ .on_chip = 1,
+ .endian = 1,
+ .port_power = usb_port_power,
+};
+
+static struct resource r8a66597_usb_host_resources[] = {
+ [0] = {
+ .start = 0xffffc000,
+ .end = 0xffffc0e4,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 170,
+ .end = 170,
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+ },
+};
+
+static struct platform_device r8a66597_usb_host_device = {
+ .name = "r8a66597_hcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = NULL, /* not use dma */
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &r8a66597_data,
+ },
+ .num_resources = ARRAY_SIZE(r8a66597_usb_host_resources),
+ .resource = r8a66597_usb_host_resources,
+};
+
+static struct platform_device *sh7264_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &cmt_device,
+ &mtu2_device,
+ &rtc_device,
+ &r8a66597_usb_host_device,
+};
+
+static int __init sh7264_devices_setup(void)
+{
+ return platform_add_devices(sh7264_devices,
+ ARRAY_SIZE(sh7264_devices));
+}
+arch_initcall(sh7264_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct platform_device *sh7264_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &cmt_device,
+ &mtu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7264_early_devices,
+ ARRAY_SIZE(sh7264_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7269.c b/arch/sh/kernel/cpu/sh2a/setup-sh7269.c
new file mode 100644
index 00000000000..16ce5aa77bd
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh2a/setup-sh7269.c
@@ -0,0 +1,586 @@
+/*
+ * SH7269 Setup
+ *
+ * Copyright (C) 2012 Renesas Electronics Europe Ltd
+ * Copyright (C) 2012 Phil Edworthy
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/usb/r8a66597.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
+
+ DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
+ DMAC8, DMAC9, DMAC10, DMAC11, DMAC12, DMAC13, DMAC14, DMAC15,
+ USB, VDC4, CMT0, CMT1, BSC, WDT,
+ MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
+ MTU3_ABCD, MTU3_TCI3V, MTU4_ABCD, MTU4_TCI4V,
+ PWMT1, PWMT2, ADC_ADI,
+ SSIF0, SSII1, SSII2, SSII3, SSII4, SSII5,
+ RSPDIF,
+ IIC30, IIC31, IIC32, IIC33,
+ SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
+ SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
+ SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
+ SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
+ SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI,
+ SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI,
+ SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI,
+ SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI,
+ RCAN0, RCAN1, RCAN2,
+ RSPIC0, RSPIC1,
+ IEBC, CD_ROMD,
+ NFMC,
+ SDHI0, SDHI1,
+ RTC,
+ SRCC0, SRCC1, SRCC2,
+
+ /* interrupt groups */
+ PINT, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
+ INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
+ INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
+ INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
+
+ INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
+ INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
+ INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
+ INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
+
+ INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
+ INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
+ INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
+ INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
+ INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
+ INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
+ INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
+ INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
+ INTC_IRQ(DMAC8, 140), INTC_IRQ(DMAC8, 141),
+ INTC_IRQ(DMAC9, 144), INTC_IRQ(DMAC9, 145),
+ INTC_IRQ(DMAC10, 148), INTC_IRQ(DMAC10, 149),
+ INTC_IRQ(DMAC11, 152), INTC_IRQ(DMAC11, 153),
+ INTC_IRQ(DMAC12, 156), INTC_IRQ(DMAC12, 157),
+ INTC_IRQ(DMAC13, 160), INTC_IRQ(DMAC13, 161),
+ INTC_IRQ(DMAC14, 164), INTC_IRQ(DMAC14, 165),
+ INTC_IRQ(DMAC15, 168), INTC_IRQ(DMAC15, 169),
+
+ INTC_IRQ(USB, 170),
+
+ INTC_IRQ(VDC4, 171), INTC_IRQ(VDC4, 172),
+ INTC_IRQ(VDC4, 173), INTC_IRQ(VDC4, 174),
+ INTC_IRQ(VDC4, 175), INTC_IRQ(VDC4, 176),
+ INTC_IRQ(VDC4, 177), INTC_IRQ(VDC4, 177),
+
+ INTC_IRQ(CMT0, 188), INTC_IRQ(CMT1, 189),
+
+ INTC_IRQ(BSC, 190), INTC_IRQ(WDT, 191),
+
+ INTC_IRQ(MTU0_ABCD, 192), INTC_IRQ(MTU0_ABCD, 193),
+ INTC_IRQ(MTU0_ABCD, 194), INTC_IRQ(MTU0_ABCD, 195),
+ INTC_IRQ(MTU0_VEF, 196), INTC_IRQ(MTU0_VEF, 197),
+ INTC_IRQ(MTU0_VEF, 198),
+ INTC_IRQ(MTU1_AB, 199), INTC_IRQ(MTU1_AB, 200),
+ INTC_IRQ(MTU1_VU, 201), INTC_IRQ(MTU1_VU, 202),
+ INTC_IRQ(MTU2_AB, 203), INTC_IRQ(MTU2_AB, 204),
+ INTC_IRQ(MTU2_VU, 205), INTC_IRQ(MTU2_VU, 206),
+ INTC_IRQ(MTU3_ABCD, 207), INTC_IRQ(MTU3_ABCD, 208),
+ INTC_IRQ(MTU3_ABCD, 209), INTC_IRQ(MTU3_ABCD, 210),
+ INTC_IRQ(MTU3_TCI3V, 211),
+ INTC_IRQ(MTU4_ABCD, 212), INTC_IRQ(MTU4_ABCD, 213),
+ INTC_IRQ(MTU4_ABCD, 214), INTC_IRQ(MTU4_ABCD, 215),
+ INTC_IRQ(MTU4_TCI4V, 216),
+
+ INTC_IRQ(PWMT1, 217), INTC_IRQ(PWMT2, 218),
+
+ INTC_IRQ(ADC_ADI, 223),
+
+ INTC_IRQ(SSIF0, 224), INTC_IRQ(SSIF0, 225),
+ INTC_IRQ(SSIF0, 226),
+ INTC_IRQ(SSII1, 227), INTC_IRQ(SSII1, 228),
+ INTC_IRQ(SSII2, 229), INTC_IRQ(SSII2, 230),
+ INTC_IRQ(SSII3, 231), INTC_IRQ(SSII3, 232),
+ INTC_IRQ(SSII4, 233), INTC_IRQ(SSII4, 234),
+ INTC_IRQ(SSII5, 235), INTC_IRQ(SSII5, 236),
+
+ INTC_IRQ(RSPDIF, 237),
+
+ INTC_IRQ(IIC30, 238), INTC_IRQ(IIC30, 239),
+ INTC_IRQ(IIC30, 240), INTC_IRQ(IIC30, 241),
+ INTC_IRQ(IIC30, 242),
+ INTC_IRQ(IIC31, 243), INTC_IRQ(IIC31, 244),
+ INTC_IRQ(IIC31, 245), INTC_IRQ(IIC31, 246),
+ INTC_IRQ(IIC31, 247),
+ INTC_IRQ(IIC32, 248), INTC_IRQ(IIC32, 249),
+ INTC_IRQ(IIC32, 250), INTC_IRQ(IIC32, 251),
+ INTC_IRQ(IIC32, 252),
+ INTC_IRQ(IIC33, 253), INTC_IRQ(IIC33, 254),
+ INTC_IRQ(IIC33, 255), INTC_IRQ(IIC33, 256),
+ INTC_IRQ(IIC33, 257),
+
+ INTC_IRQ(SCIF0_BRI, 258), INTC_IRQ(SCIF0_ERI, 259),
+ INTC_IRQ(SCIF0_RXI, 260), INTC_IRQ(SCIF0_TXI, 261),
+ INTC_IRQ(SCIF1_BRI, 262), INTC_IRQ(SCIF1_ERI, 263),
+ INTC_IRQ(SCIF1_RXI, 264), INTC_IRQ(SCIF1_TXI, 265),
+ INTC_IRQ(SCIF2_BRI, 266), INTC_IRQ(SCIF2_ERI, 267),
+ INTC_IRQ(SCIF2_RXI, 268), INTC_IRQ(SCIF2_TXI, 269),
+ INTC_IRQ(SCIF3_BRI, 270), INTC_IRQ(SCIF3_ERI, 271),
+ INTC_IRQ(SCIF3_RXI, 272), INTC_IRQ(SCIF3_TXI, 273),
+ INTC_IRQ(SCIF4_BRI, 274), INTC_IRQ(SCIF4_ERI, 275),
+ INTC_IRQ(SCIF4_RXI, 276), INTC_IRQ(SCIF4_TXI, 277),
+ INTC_IRQ(SCIF5_BRI, 278), INTC_IRQ(SCIF5_ERI, 279),
+ INTC_IRQ(SCIF5_RXI, 280), INTC_IRQ(SCIF5_TXI, 281),
+ INTC_IRQ(SCIF6_BRI, 282), INTC_IRQ(SCIF6_ERI, 283),
+ INTC_IRQ(SCIF6_RXI, 284), INTC_IRQ(SCIF6_TXI, 285),
+ INTC_IRQ(SCIF7_BRI, 286), INTC_IRQ(SCIF7_ERI, 287),
+ INTC_IRQ(SCIF7_RXI, 288), INTC_IRQ(SCIF7_TXI, 289),
+
+ INTC_IRQ(RCAN0, 291), INTC_IRQ(RCAN0, 292),
+ INTC_IRQ(RCAN0, 293), INTC_IRQ(RCAN0, 294),
+ INTC_IRQ(RCAN0, 295),
+ INTC_IRQ(RCAN1, 296), INTC_IRQ(RCAN1, 297),
+ INTC_IRQ(RCAN1, 298), INTC_IRQ(RCAN1, 299),
+ INTC_IRQ(RCAN1, 300),
+ INTC_IRQ(RCAN2, 301), INTC_IRQ(RCAN2, 302),
+ INTC_IRQ(RCAN2, 303), INTC_IRQ(RCAN2, 304),
+ INTC_IRQ(RCAN2, 305),
+
+ INTC_IRQ(RSPIC0, 306), INTC_IRQ(RSPIC0, 307),
+ INTC_IRQ(RSPIC0, 308),
+ INTC_IRQ(RSPIC1, 309), INTC_IRQ(RSPIC1, 310),
+ INTC_IRQ(RSPIC1, 311),
+
+ INTC_IRQ(IEBC, 318),
+
+ INTC_IRQ(CD_ROMD, 319), INTC_IRQ(CD_ROMD, 320),
+ INTC_IRQ(CD_ROMD, 321), INTC_IRQ(CD_ROMD, 322),
+ INTC_IRQ(CD_ROMD, 323), INTC_IRQ(CD_ROMD, 324),
+
+ INTC_IRQ(NFMC, 325), INTC_IRQ(NFMC, 326),
+ INTC_IRQ(NFMC, 327), INTC_IRQ(NFMC, 328),
+
+ INTC_IRQ(SDHI0, 332), INTC_IRQ(SDHI0, 333),
+ INTC_IRQ(SDHI0, 334),
+ INTC_IRQ(SDHI1, 335), INTC_IRQ(SDHI1, 336),
+ INTC_IRQ(SDHI1, 337),
+
+ INTC_IRQ(RTC, 338), INTC_IRQ(RTC, 339),
+ INTC_IRQ(RTC, 340),
+
+ INTC_IRQ(SRCC0, 341), INTC_IRQ(SRCC0, 342),
+ INTC_IRQ(SRCC0, 343), INTC_IRQ(SRCC0, 344),
+ INTC_IRQ(SRCC0, 345),
+ INTC_IRQ(SRCC1, 346), INTC_IRQ(SRCC1, 347),
+ INTC_IRQ(SRCC1, 348), INTC_IRQ(SRCC1, 349),
+ INTC_IRQ(SRCC1, 350),
+ INTC_IRQ(SRCC2, 351), INTC_IRQ(SRCC2, 352),
+ INTC_IRQ(SRCC2, 353), INTC_IRQ(SRCC2, 354),
+ INTC_IRQ(SRCC2, 355),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
+ PINT4, PINT5, PINT6, PINT7),
+ INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
+ INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
+ INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
+ INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
+ INTC_GROUP(SCIF4, SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI),
+ INTC_GROUP(SCIF5, SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI),
+ INTC_GROUP(SCIF6, SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI),
+ INTC_GROUP(SCIF7, SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
+ { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
+ { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
+ { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
+ { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { DMAC8, DMAC9,
+ DMAC10, DMAC11 } },
+ { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { DMAC12, DMAC13,
+ DMAC14, DMAC15 } },
+ { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { USB, VDC4, VDC4, VDC4 } },
+ { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { 0, 0, 0, 0 } },
+ { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { CMT0, CMT1, BSC, WDT } },
+ { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU0_ABCD, MTU0_VEF,
+ MTU1_AB, MTU1_VU } },
+ { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { MTU2_AB, MTU2_VU,
+ MTU3_ABCD, MTU3_TCI3V } },
+ { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { MTU4_ABCD, MTU4_TCI4V,
+ PWMT1, PWMT2 } },
+ { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { 0, 0, 0, 0 } },
+ { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { ADC_ADI, SSIF0, SSII1, SSII2 } },
+ { 0xfffe0c18, 0, 16, 4, /* IPR18 */ { SSII3, SSII4, SSII5, RSPDIF} },
+ { 0xfffe0c1a, 0, 16, 4, /* IPR19 */ { IIC30, IIC31, IIC32, IIC33 } },
+ { 0xfffe0c1c, 0, 16, 4, /* IPR20 */ { SCIF0, SCIF1, SCIF2, SCIF3 } },
+ { 0xfffe0c1e, 0, 16, 4, /* IPR21 */ { SCIF4, SCIF5, SCIF6, SCIF7 } },
+ { 0xfffe0c20, 0, 16, 4, /* IPR22 */ { 0, RCAN0, RCAN1, RCAN2 } },
+ { 0xfffe0c22, 0, 16, 4, /* IPR23 */ { RSPIC0, RSPIC1, 0, 0 } },
+ { 0xfffe0c24, 0, 16, 4, /* IPR24 */ { IEBC, CD_ROMD, NFMC, 0 } },
+ { 0xfffe0c26, 0, 16, 4, /* IPR25 */ { SDHI0, SDHI1, RTC, 0 } },
+ { 0xfffe0c28, 0, 16, 4, /* IPR26 */ { SRCC0, SRCC1, SRCC2, 0 } },
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xfffe0808, 0, 16, /* PINTER */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7269", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xe8007000, 0x100),
+ DEFINE_RES_IRQ(259),
+ DEFINE_RES_IRQ(260),
+ DEFINE_RES_IRQ(261),
+ DEFINE_RES_IRQ(258),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xe8007800, 0x100),
+ DEFINE_RES_IRQ(263),
+ DEFINE_RES_IRQ(264),
+ DEFINE_RES_IRQ(265),
+ DEFINE_RES_IRQ(262),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xe8008000, 0x100),
+ DEFINE_RES_IRQ(267),
+ DEFINE_RES_IRQ(268),
+ DEFINE_RES_IRQ(269),
+ DEFINE_RES_IRQ(266),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xe8008800, 0x100),
+ DEFINE_RES_IRQ(271),
+ DEFINE_RES_IRQ(272),
+ DEFINE_RES_IRQ(273),
+ DEFINE_RES_IRQ(270),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif4_resources[] = {
+ DEFINE_RES_MEM(0xe8009000, 0x100),
+ DEFINE_RES_IRQ(275),
+ DEFINE_RES_IRQ(276),
+ DEFINE_RES_IRQ(277),
+ DEFINE_RES_IRQ(274),
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .resource = scif4_resources,
+ .num_resources = ARRAY_SIZE(scif4_resources),
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif5_resources[] = {
+ DEFINE_RES_MEM(0xe8009800, 0x100),
+ DEFINE_RES_IRQ(279),
+ DEFINE_RES_IRQ(280),
+ DEFINE_RES_IRQ(281),
+ DEFINE_RES_IRQ(278),
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .resource = scif5_resources,
+ .num_resources = ARRAY_SIZE(scif5_resources),
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct plat_sci_port scif6_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif6_resources[] = {
+ DEFINE_RES_MEM(0xe800a000, 0x100),
+ DEFINE_RES_IRQ(283),
+ DEFINE_RES_IRQ(284),
+ DEFINE_RES_IRQ(285),
+ DEFINE_RES_IRQ(282),
+};
+
+static struct platform_device scif6_device = {
+ .name = "sh-sci",
+ .id = 6,
+ .resource = scif6_resources,
+ .num_resources = ARRAY_SIZE(scif6_resources),
+ .dev = {
+ .platform_data = &scif6_platform_data,
+ },
+};
+
+static struct plat_sci_port scif7_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE |
+ SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif7_resources[] = {
+ DEFINE_RES_MEM(0xe800a800, 0x100),
+ DEFINE_RES_IRQ(287),
+ DEFINE_RES_IRQ(288),
+ DEFINE_RES_IRQ(289),
+ DEFINE_RES_IRQ(286),
+};
+
+static struct platform_device scif7_device = {
+ .name = "sh-sci",
+ .id = 7,
+ .resource = scif7_resources,
+ .num_resources = ARRAY_SIZE(scif7_resources),
+ .dev = {
+ .platform_data = &scif7_platform_data,
+ },
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 3,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0xfffec000, 0x10),
+ DEFINE_RES_IRQ(188),
+ DEFINE_RES_IRQ(189),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-16",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct resource mtu2_resources[] = {
+ DEFINE_RES_MEM(0xfffe4000, 0x400),
+ DEFINE_RES_IRQ_NAMED(192, "tgi0a"),
+ DEFINE_RES_IRQ_NAMED(203, "tgi1a"),
+};
+
+static struct platform_device mtu2_device = {
+ .name = "sh-mtu2",
+ .id = -1,
+ .resource = mtu2_resources,
+ .num_resources = ARRAY_SIZE(mtu2_resources),
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xfffe6000,
+ .end = 0xfffe6000 + 0x30 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = 338,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+/* USB Host */
+static struct r8a66597_platdata r8a66597_data = {
+ .on_chip = 1,
+ .endian = 1,
+};
+
+static struct resource r8a66597_usb_host_resources[] = {
+ [0] = {
+ .start = 0xe8010000,
+ .end = 0xe80100e4,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 170,
+ .end = 170,
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+ },
+};
+
+static struct platform_device r8a66597_usb_host_device = {
+ .name = "r8a66597_hcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = NULL, /* not use dma */
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &r8a66597_data,
+ },
+ .num_resources = ARRAY_SIZE(r8a66597_usb_host_resources),
+ .resource = r8a66597_usb_host_resources,
+};
+
+static struct platform_device *sh7269_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &cmt_device,
+ &mtu2_device,
+ &rtc_device,
+ &r8a66597_usb_host_device,
+};
+
+static int __init sh7269_devices_setup(void)
+{
+ return platform_add_devices(sh7269_devices,
+ ARRAY_SIZE(sh7269_devices));
+}
+arch_initcall(sh7269_devices_setup);
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct platform_device *sh7269_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &cmt_device,
+ &mtu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7269_early_devices,
+ ARRAY_SIZE(sh7269_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index 3ae4d9111f1..d3634ae7b71 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -2,18 +2,20 @@
# Makefile for the Linux/SuperH SH-3 backends.
#
-obj-y := ex.o probe.o entry.o
+obj-y := ex.o probe.o entry.o setup-sh3.o
+
+obj-$(CONFIG_HIBERNATION) += swsusp.o
# CPU subtype setup
-obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh770x.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh770x.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o serial-sh770x.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o serial-sh7710.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o serial-sh7710.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o serial-sh7720.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o serial-sh7720.o
# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SH3) := clock-sh3.o
@@ -24,4 +26,8 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7710) := clock-sh7710.o
clock-$(CONFIG_CPU_SUBTYPE_SH7720) := clock-sh7710.o
clock-$(CONFIG_CPU_SUBTYPE_SH7712) := clock-sh7712.o
+# Pinmux setup
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7720) := pinmux-sh7720.o
+
obj-y += $(clock-y)
+obj-$(CONFIG_GPIOLIB) += $(pinmux-y)
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh3.c b/arch/sh/kernel/cpu/sh3/clock-sh3.c
index c3c945958ba..90faa44ca94 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh3.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh3.c
@@ -28,60 +28,60 @@ static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
clk->rate *= pfc_divisors[idx];
}
-static struct clk_ops sh3_master_clk_ops = {
+static struct sh_clk_ops sh3_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ return clk->parent->rate / pfc_divisors[idx];
}
-static struct clk_ops sh3_module_clk_ops = {
+static struct sh_clk_ops sh3_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
- clk->rate = clk->parent->rate / stc_multipliers[idx];
+ return clk->parent->rate / stc_multipliers[idx];
}
-static struct clk_ops sh3_bus_clk_ops = {
+static struct sh_clk_ops sh3_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
+static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
- clk->rate = clk->parent->rate / ifc_divisors[idx];
+ return clk->parent->rate / ifc_divisors[idx];
}
-static struct clk_ops sh3_cpu_clk_ops = {
+static struct sh_clk_ops sh3_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
-static struct clk_ops *sh3_clk_ops[] = {
+static struct sh_clk_ops *sh3_clk_ops[] = {
&sh3_master_clk_ops,
&sh3_module_clk_ops,
&sh3_bus_clk_ops,
&sh3_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh3_clk_ops))
*ops = sh3_clk_ops[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7705.c b/arch/sh/kernel/cpu/sh3/clock-sh7705.c
index dfdbf3277fd..a8da4a9986b 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7705.c
@@ -32,51 +32,51 @@ static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0003];
+ clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003];
}
-static struct clk_ops sh7705_master_clk_ops = {
+static struct sh_clk_ops sh7705_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = ctrl_inw(FRQCR) & 0x0003;
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ int idx = __raw_readw(FRQCR) & 0x0003;
+ return clk->parent->rate / pfc_divisors[idx];
}
-static struct clk_ops sh7705_module_clk_ops = {
+static struct sh_clk_ops sh7705_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0300) >> 8;
- clk->rate = clk->parent->rate / stc_multipliers[idx];
+ int idx = (__raw_readw(FRQCR) & 0x0300) >> 8;
+ return clk->parent->rate / stc_multipliers[idx];
}
-static struct clk_ops sh7705_bus_clk_ops = {
+static struct sh_clk_ops sh7705_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
+static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0030) >> 4;
- clk->rate = clk->parent->rate / ifc_divisors[idx];
+ int idx = (__raw_readw(FRQCR) & 0x0030) >> 4;
+ return clk->parent->rate / ifc_divisors[idx];
}
-static struct clk_ops sh7705_cpu_clk_ops = {
+static struct sh_clk_ops sh7705_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
-static struct clk_ops *sh7705_clk_ops[] = {
+static struct sh_clk_ops *sh7705_clk_ops[] = {
&sh7705_master_clk_ops,
&sh7705_module_clk_ops,
&sh7705_bus_clk_ops,
&sh7705_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7705_clk_ops))
*ops = sh7705_clk_ops[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7706.c b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
index 0cf96f9833b..a4088e5b220 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7706.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
@@ -24,60 +24,60 @@ static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
clk->rate *= pfc_divisors[idx];
}
-static struct clk_ops sh7706_master_clk_ops = {
+static struct sh_clk_ops sh7706_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ return clk->parent->rate / pfc_divisors[idx];
}
-static struct clk_ops sh7706_module_clk_ops = {
+static struct sh_clk_ops sh7706_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
- clk->rate = clk->parent->rate / stc_multipliers[idx];
+ return clk->parent->rate / stc_multipliers[idx];
}
-static struct clk_ops sh7706_bus_clk_ops = {
+static struct sh_clk_ops sh7706_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
+static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
- clk->rate = clk->parent->rate / ifc_divisors[idx];
+ return clk->parent->rate / ifc_divisors[idx];
}
-static struct clk_ops sh7706_cpu_clk_ops = {
+static struct sh_clk_ops sh7706_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
-static struct clk_ops *sh7706_clk_ops[] = {
+static struct sh_clk_ops *sh7706_clk_ops[] = {
&sh7706_master_clk_ops,
&sh7706_module_clk_ops,
&sh7706_bus_clk_ops,
&sh7706_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7706_clk_ops))
*ops = sh7706_clk_ops[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
index b791a29fdb6..54a6d4bcc0d 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c
@@ -22,74 +22,63 @@ static int stc_multipliers[] = { 1, 2, 4, 8, 3, 6, 1, 1 };
static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
-static void set_bus_parent(struct clk *clk)
-{
- struct clk *bus_clk = clk_get(NULL, "bus_clk");
- clk->parent = bus_clk;
- clk_put(bus_clk);
-}
-
static void master_clk_init(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
clk->rate *= pfc_divisors[idx];
}
-static struct clk_ops sh7709_master_clk_ops = {
+static struct sh_clk_ops sh7709_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ return clk->parent->rate / pfc_divisors[idx];
}
-static struct clk_ops sh7709_module_clk_ops = {
-#ifdef CLOCK_MODE_0_1_2_7
- .init = set_bus_parent,
-#endif
+static struct sh_clk_ops sh7709_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = (frqcr & 0x0080) ?
((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1;
- clk->rate = clk->parent->rate * stc_multipliers[idx];
+ return clk->parent->rate * stc_multipliers[idx];
}
-static struct clk_ops sh7709_bus_clk_ops = {
+static struct sh_clk_ops sh7709_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
+static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
- clk->rate = clk->parent->rate / ifc_divisors[idx];
+ return clk->parent->rate / ifc_divisors[idx];
}
-static struct clk_ops sh7709_cpu_clk_ops = {
- .init = set_bus_parent,
+static struct sh_clk_ops sh7709_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
-static struct clk_ops *sh7709_clk_ops[] = {
+static struct sh_clk_ops *sh7709_clk_ops[] = {
&sh7709_master_clk_ops,
&sh7709_module_clk_ops,
&sh7709_bus_clk_ops,
&sh7709_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7709_clk_ops))
*ops = sh7709_clk_ops[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7710.c b/arch/sh/kernel/cpu/sh3/clock-sh7710.c
index 4744c50ec44..ce601b2e397 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7710.c
@@ -26,51 +26,51 @@ static int md_table[] = { 1, 2, 3, 4, 6, 8, 12 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= md_table[ctrl_inw(FRQCR) & 0x0007];
+ clk->rate *= md_table[__raw_readw(FRQCR) & 0x0007];
}
-static struct clk_ops sh7710_master_clk_ops = {
+static struct sh_clk_ops sh7710_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0007);
- clk->rate = clk->parent->rate / md_table[idx];
+ int idx = (__raw_readw(FRQCR) & 0x0007);
+ return clk->parent->rate / md_table[idx];
}
-static struct clk_ops sh7710_module_clk_ops = {
+static struct sh_clk_ops sh7710_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0700) >> 8;
- clk->rate = clk->parent->rate / md_table[idx];
+ int idx = (__raw_readw(FRQCR) & 0x0700) >> 8;
+ return clk->parent->rate / md_table[idx];
}
-static struct clk_ops sh7710_bus_clk_ops = {
+static struct sh_clk_ops sh7710_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
+static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0070) >> 4;
- clk->rate = clk->parent->rate / md_table[idx];
+ int idx = (__raw_readw(FRQCR) & 0x0070) >> 4;
+ return clk->parent->rate / md_table[idx];
}
-static struct clk_ops sh7710_cpu_clk_ops = {
+static struct sh_clk_ops sh7710_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
-static struct clk_ops *sh7710_clk_ops[] = {
+static struct sh_clk_ops *sh7710_clk_ops[] = {
&sh7710_master_clk_ops,
&sh7710_module_clk_ops,
&sh7710_bus_clk_ops,
&sh7710_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7710_clk_ops))
*ops = sh7710_clk_ops[idx];
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
index 54f54df51ef..21438a9a1ae 100644
--- a/arch/sh/kernel/cpu/sh3/clock-sh7712.c
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c
@@ -23,47 +23,47 @@ static int divisors[] = { 1, 2, 3, 4, 6 };
static void master_clk_init(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = (frqcr & 0x0300) >> 8;
clk->rate *= multipliers[idx];
}
-static struct clk_ops sh7712_master_clk_ops = {
+static struct sh_clk_ops sh7712_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = frqcr & 0x0007;
- clk->rate = clk->parent->rate / divisors[idx];
+ return clk->parent->rate / divisors[idx];
}
-static struct clk_ops sh7712_module_clk_ops = {
+static struct sh_clk_ops sh7712_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
+static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int frqcr = ctrl_inw(FRQCR);
+ int frqcr = __raw_readw(FRQCR);
int idx = (frqcr & 0x0030) >> 4;
- clk->rate = clk->parent->rate / divisors[idx];
+ return clk->parent->rate / divisors[idx];
}
-static struct clk_ops sh7712_cpu_clk_ops = {
+static struct sh_clk_ops sh7712_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
-static struct clk_ops *sh7712_clk_ops[] = {
+static struct sh_clk_ops *sh7712_clk_ops[] = {
&sh7712_master_clk_ops,
&sh7712_module_clk_ops,
&sh7712_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7712_clk_ops))
*ops = sh7712_clk_ops[idx];
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 4004073f98c..262db6ec067 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -2,7 +2,7 @@
* arch/sh/kernel/cpu/sh3/entry.S
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
- * Copyright (C) 2003 - 2006 Paul Mundt
+ * Copyright (C) 2003 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -14,8 +14,10 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
-#include <asm/cpu/mmu_context.h>
+#include <cpu/mmu_context.h>
#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -52,10 +54,6 @@
* syscall #
*
*/
-#if defined(CONFIG_KGDB_NMI)
-NMI_VEC = 0x1c0 ! Must catch early for debounce
-#endif
-
/* Offsets to the stack */
OFF_R0 = 0 /* Return value. New ABI also arg4 */
OFF_R1 = 4 /* New ABI: arg5 */
@@ -70,7 +68,6 @@ OFF_PC = (16*4)
OFF_SR = (16*4+8)
OFF_TRA = (16*4+6*4)
-
#define k0 r0
#define k1 r1
#define k2 r2
@@ -112,34 +109,33 @@ OFF_TRA = (16*4+6*4)
#if defined(CONFIG_MMU)
.align 2
ENTRY(tlb_miss_load)
- bra call_dpf
+ bra call_handle_tlbmiss
mov #0, r5
.align 2
ENTRY(tlb_miss_store)
- bra call_dpf
- mov #1, r5
+ bra call_handle_tlbmiss
+ mov #FAULT_CODE_WRITE, r5
.align 2
ENTRY(initial_page_write)
- bra call_dpf
- mov #1, r5
+ bra call_handle_tlbmiss
+ mov #FAULT_CODE_INITIAL, r5
.align 2
ENTRY(tlb_protection_violation_load)
- bra call_dpf
- mov #0, r5
+ bra call_do_page_fault
+ mov #FAULT_CODE_PROT, r5
.align 2
ENTRY(tlb_protection_violation_store)
- bra call_dpf
- mov #1, r5
+ bra call_do_page_fault
+ mov #(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5
-call_dpf:
+call_handle_tlbmiss:
mov.l 1f, r0
mov r5, r8
mov.l @r0, r6
- mov r6, r9
mov.l 2f, r0
sts pr, r10
jsr @r0
@@ -150,16 +146,23 @@ call_dpf:
lds r10, pr
rts
nop
-0: mov.l 3f, r0
- mov r9, r6
+0:
mov r8, r5
+call_do_page_fault:
+ mov.l 1f, r0
+ mov.l @r0, r6
+
+ mov.l 3f, r0
+ mov.l 4f, r1
+ mov r15, r4
jmp @r0
- mov r15, r4
+ lds r1, pr
.align 2
1: .long MMU_TEA
-2: .long __do_page_fault
+2: .long handle_tlbmiss
3: .long do_page_fault
+4: .long ret_from_exception
.align 2
ENTRY(address_error_load)
@@ -187,44 +190,35 @@ call_dae:
#if defined(CONFIG_SH_STANDARD_BIOS)
/* Unwind the stack and jmp to the debug entry */
ENTRY(sh_bios_handler)
- mov.l @r15+, r0
- mov.l @r15+, r1
- mov.l @r15+, r2
- mov.l @r15+, r3
- mov.l @r15+, r4
- mov.l @r15+, r5
- mov.l @r15+, r6
- mov.l @r15+, r7
- stc sr, r8
- mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
- or r9, r8
- ldc r8, sr ! here, change the register bank
- mov.l @r15+, r8
- mov.l @r15+, r9
- mov.l @r15+, r10
- mov.l @r15+, r11
- mov.l @r15+, r12
- mov.l @r15+, r13
- mov.l @r15+, r14
- mov.l @r15+, k0
- ldc.l @r15+, spc
- lds.l @r15+, pr
- mov.l @r15+, k1
- ldc.l @r15+, gbr
- lds.l @r15+, mach
- lds.l @r15+, macl
- mov k0, r15
+ mov.l 1f, r8
+ bsr restore_regs
+ nop
+
+ lds k2, pr ! restore pr
+ mov k4, r15
!
mov.l 2f, k0
mov.l @k0, k0
jmp @k0
- ldc k1, ssr
+ ldc k3, ssr
.align 2
1: .long 0x300000f0
2: .long gdb_vbr_vector
#endif /* CONFIG_SH_STANDARD_BIOS */
-restore_all:
+! restore_regs()
+! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
+! - switch bank
+! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
+! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
+! k2 returns original pr
+! k3 returns original sr
+! k4 returns original stack pointer
+! r8 passes SR bitmask, overwritten with restored data on return
+! r9 trashed
+! BL=0 on entry, on exit BL=1 (depending on r8).
+
+ENTRY(restore_regs)
mov.l @r15+, r0
mov.l @r15+, r1
mov.l @r15+, r2
@@ -234,10 +228,9 @@ restore_all:
mov.l @r15+, r6
mov.l @r15+, r7
!
- stc sr, r8
- mov.l 7f, r9
- or r9, r8 ! BL =1, RB=1
- ldc r8, sr ! here, change the register bank
+ stc sr, r9
+ or r8, r9
+ ldc r9, sr
!
mov.l @r15+, r8
mov.l @r15+, r9
@@ -248,53 +241,27 @@ restore_all:
mov.l @r15+, r14
mov.l @r15+, k4 ! original stack pointer
ldc.l @r15+, spc
- lds.l @r15+, pr
+ mov.l @r15+, k2 ! original PR
mov.l @r15+, k3 ! original SR
ldc.l @r15+, gbr
lds.l @r15+, mach
lds.l @r15+, macl
- add #4, r15 ! Skip syscall number
- !
-#ifdef CONFIG_SH_DSP
- mov.l @r15+, k0 ! DSP mode marker
- mov.l 5f, k1
- cmp/eq k0, k1 ! Do we have a DSP stack frame?
- bf skip_restore
-
- stc sr, k0 ! Enable CPU DSP mode
- or k1, k0 ! (within kernel it may be disabled)
- ldc k0, sr
- mov r2, k0 ! Backup r2
-
- ! Restore DSP registers from stack
- mov r15, r2
- movs.l @r2+, a1
- movs.l @r2+, a0g
- movs.l @r2+, a1g
- movs.l @r2+, m0
- movs.l @r2+, m1
- mov r2, r15
-
- lds.l @r15+, a0
- lds.l @r15+, x0
- lds.l @r15+, x1
- lds.l @r15+, y0
- lds.l @r15+, y1
- lds.l @r15+, dsr
- ldc.l @r15+, rs
- ldc.l @r15+, re
- ldc.l @r15+, mod
-
- mov k0, r2 ! Restore r2
-skip_restore:
-#endif
+ rts
+ add #4, r15 ! Skip syscall number
+
+restore_all:
+ mov.l 7f, r8
+ bsr restore_regs
+ nop
+
+ lds k2, pr ! restore pr
!
! Calculate new SR value
mov k3, k2 ! original SR value
- mov #0xf0, k1
+ mov #0xfffffff0, k1
extu.b k1, k1
not k1, k1
- and k1, k2 ! Mask orignal SR value
+ and k1, k2 ! Mask original SR value
!
mov k3, k0 ! Calculate IMASK-bits
shlr2 k0
@@ -307,22 +274,12 @@ skip_restore:
6: or k0, k2 ! Set the IMASK-bits
ldc k2, ssr
!
-#if defined(CONFIG_KGDB_NMI)
- ! Clear in_nmi
- mov.l 6f, k0
- mov #0, k1
- mov.b k1, @k0
-#endif
- mov.l @r15+, k2 ! restore EXPEVT
mov k4, r15
rte
nop
.align 2
5: .long 0x00001000 ! DSP
-#ifdef CONFIG_KGDB_NMI
-6: .long in_nmi
-#endif
7: .long 0x30000000
! common exception handler
@@ -336,81 +293,21 @@ skip_restore:
ENTRY(vbr_base)
.long 0
!
+! 0x100: General exception vector
+!
.balign 256,0,256
general_exception:
- mov.l 1f, k2
- mov.l 2f, k3
-#ifdef CONFIG_CPU_SUBTYPE_SHX3
- mov.l @k2, k2
-
- ! Is EXPEVT larger than 0x800?
- mov #0x8, k0
- shll8 k0
- cmp/hs k0, k2
- bf 0f
-
- ! then add 0x580 (k2 is 0xd80 or 0xda0)
- mov #0x58, k0
- shll2 k0
- shll2 k0
- add k0, k2
-0:
- bra handle_exception
- nop
-#else
- bra handle_exception
- mov.l @k2, k2
-#endif
- .align 2
-1: .long EXPEVT
-2: .long ret_from_exception
-!
-!
-
- .balign 1024,0,1024
-tlb_miss:
- mov.l 1f, k2
- mov.l 4f, k3
- bra handle_exception
- mov.l @k2, k2
-!
- .balign 512,0,512
-interrupt:
- mov.l 2f, k2
- mov.l 3f, k3
-#if defined(CONFIG_KGDB_NMI)
- ! Debounce (filter nested NMI)
- mov.l @k2, k0
- mov.l 5f, k1
- cmp/eq k1, k0
- bf 0f
- mov.l 6f, k1
- tas.b @k1
- bt 0f
- rte
- nop
- .align 2
-5: .long NMI_VEC
-6: .long in_nmi
-0:
-#endif /* defined(CONFIG_KGDB_NMI) */
bra handle_exception
- mov #-1, k2 ! interrupt exception marker
+ sts pr, k3 ! save original pr value in k3
- .align 2
-1: .long EXPEVT
-2: .long INTEVT
-3: .long ret_from_irq
-4: .long ret_from_exception
-
-!
-!
- .align 2
-ENTRY(handle_exception)
- ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
- ! save all registers onto stack.
- !
+! prepare_stack()
+! - roll back gRB
+! - switch to kernel stack
+! k0 returns original sp (after roll back)
+! k1 trashed
+! k2 trashed
+prepare_stack:
#ifdef CONFIG_GUSA
! Check for roll back gRB (User and Kernel)
mov r15, k0
@@ -430,7 +327,7 @@ ENTRY(handle_exception)
2: mov k1, r15 ! SP = r1
1:
#endif
-
+ ! Switch to kernel stack if needed
stc ssr, k0 ! Is it from kernel space?
shll k0 ! Check MD bit (bit30) by shifting it into...
shll k0 ! ...the T bit
@@ -443,65 +340,67 @@ ENTRY(handle_exception)
add current, k1
mov k1, r15 ! change to kernel stack
!
-1: mov.l 2f, k1
- !
-#ifdef CONFIG_SH_DSP
- mov.l r2, @-r15 ! Save r2, we need another reg
- stc sr, k4
- mov.l 1f, r2
- tst r2, k4 ! Check if in DSP mode
- mov.l @r15+, r2 ! Restore r2 now
- bt/s skip_save
- mov #0, k4 ! Set marker for no stack frame
-
- mov r2, k4 ! Backup r2 (in k4) for later
-
- ! Save DSP registers on stack
- stc.l mod, @-r15
- stc.l re, @-r15
- stc.l rs, @-r15
- sts.l dsr, @-r15
- sts.l y1, @-r15
- sts.l y0, @-r15
- sts.l x1, @-r15
- sts.l x0, @-r15
- sts.l a0, @-r15
-
- ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
-
- ! FIXME: Make sure that this is still the case with newer toolchains,
- ! as we're not at all interested in supporting ancient toolchains at
- ! this point. -- PFM.
-
- mov r15, r2
- .word 0xf653 ! movs.l a1, @-r2
- .word 0xf6f3 ! movs.l a0g, @-r2
- .word 0xf6d3 ! movs.l a1g, @-r2
- .word 0xf6c3 ! movs.l m0, @-r2
- .word 0xf6e3 ! movs.l m1, @-r2
- mov r2, r15
-
- mov k4, r2 ! Restore r2
- mov.l 1f, k4 ! Force DSP stack frame
-skip_save:
- mov.l k4, @-r15 ! Push DSP mode marker onto stack
-#endif
- ! Save the user registers on the stack.
- mov.l k2, @-r15 ! EXPEVT
+1:
+ rts
+ nop
- mov #-1, k4
- mov.l k4, @-r15 ! set TRA (default: -1)
- !
+!
+! 0x400: Instruction and Data TLB miss exception vector
+!
+ .balign 1024,0,1024
+tlb_miss:
+ sts pr, k3 ! save original pr value in k3
+
+handle_exception:
+ mova exception_data, k0
+
+ ! Setup stack and save DSP context (k0 contains original r15 on return)
+ bsr prepare_stack
+ PREF(k0)
+
+ ! Save registers / Switch to bank 0
+ mov.l 5f, k2 ! vector register address
+ mov.l 1f, k4 ! SR bits to clear in k4
+ bsr save_regs ! needs original pr value in k3
+ mov.l @k2, k2 ! read out vector and keep in k2
+
+handle_exception_special:
+ setup_frame_reg
+
+ ! Setup return address and jump to exception handler
+ mov.l 7f, r9 ! fetch return address
+ stc r2_bank, r0 ! k2 (vector)
+ mov.l 6f, r10
+ shlr2 r0
+ shlr r0
+ mov.l @(r0, r10), r10
+ jmp @r10
+ lds r9, pr ! put return address in pr
+
+ .align L1_CACHE_SHIFT
+
+! save_regs()
+! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
+! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
+! - switch bank
+! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
+! k0 contains original stack pointer*
+! k1 trashed
+! k3 passes original pr*
+! k4 passes SR bitmask
+! BL=1 on entry, on exit BL=0.
+
+ENTRY(save_regs)
+ mov #-1, r1
+ mov.l k1, @-r15 ! set TRA (default: -1)
sts.l macl, @-r15
sts.l mach, @-r15
stc.l gbr, @-r15
stc.l ssr, @-r15
- sts.l pr, @-r15
+ mov.l k3, @-r15 ! original pr in k3
stc.l spc, @-r15
- !
- lds k3, pr ! Set the return address to pr
- !
- mov.l k0, @-r15 ! save orignal stack
+
+ mov.l k0, @-r15 ! original stack pointer in k0
mov.l r14, @-r15
mov.l r13, @-r15
mov.l r12, @-r15
@@ -509,13 +408,23 @@ skip_save:
mov.l r10, @-r15
mov.l r9, @-r15
mov.l r8, @-r15
- !
- stc sr, r8 ! Back to normal register bank, and
- or k1, r8 ! Block all interrupts
- mov.l 3f, k1
- and k1, r8 ! ...
- ldc r8, sr ! ...changed here.
- !
+
+ mov.l 0f, k3 ! SR bits to set in k3
+
+ ! fall-through
+
+! save_low_regs()
+! - modify SR for bank switch
+! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
+! k3 passes bits to set in SR
+! k4 passes bits to clear in SR
+
+ENTRY(save_low_regs)
+ stc sr, r8
+ or k3, r8
+ and k4, r8
+ ldc r8, sr
+
mov.l r7, @-r15
mov.l r6, @-r15
mov.l r5, @-r15
@@ -523,52 +432,82 @@ skip_save:
mov.l r3, @-r15
mov.l r2, @-r15
mov.l r1, @-r15
- mov.l r0, @-r15
+ rts
+ mov.l r0, @-r15
+
+!
+! 0x600: Interrupt / NMI vector
+!
+ .balign 512,0,512
+ENTRY(handle_interrupt)
+ sts pr, k3 ! save original pr value in k3
+ mova exception_data, k0
+
+ ! Setup stack and save DSP context (k0 contains original r15 on return)
+ bsr prepare_stack
+ PREF(k0)
+
+ ! Save registers / Switch to bank 0
+ mov.l 1f, k4 ! SR bits to clear in k4
+ bsr save_regs ! needs original pr value in k3
+ mov #-1, k2 ! default vector kept in k2
+
+ setup_frame_reg
+
+ stc sr, r0 ! get status register
+ shlr2 r0
+ and #0x3c, r0
+ cmp/eq #0x3c, r0
+ bf 9f
+ TRACE_IRQS_OFF
+9:
+
+ ! Setup return address and jump to do_IRQ
+ mov.l 4f, r9 ! fetch return address
+ lds r9, pr ! put return address in pr
+ mov.l 2f, r4
+ mov.l 3f, r9
+ mov.l @r4, r4 ! pass INTEVT vector as arg0
+
+ shlr2 r4
+ shlr r4
+ mov r4, r0 ! save vector->jmp table offset for later
+
+ shlr2 r4 ! vector to IRQ# conversion
+ add #-0x10, r4
+
+ cmp/pz r4 ! is it a valid IRQ?
+ bt 10f
/*
- * This gets a bit tricky.. in the INTEVT case we don't want to use
- * the VBR offset as a destination in the jump call table, since all
- * of the destinations are the same. In this case, (interrupt) sets
- * a marker in r2 (now r2_bank since SR.RB changed), which we check
- * to determine the exception type. For all other exceptions, we
- * forcibly read EXPEVT from memory and fix up the jump address, in
- * the interrupt exception case we jump to do_IRQ() and defer the
- * INTEVT read until there. As a bonus, we can also clean up the SR.RB
- * checks that do_IRQ() was doing..
+ * We got here as a result of taking the INTEVT path for something
+ * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
+ * path and special case the event dispatch instead. This is the
+ * expected path for the NMI (and any other brilliantly implemented
+ * exception), which effectively wants regular exception dispatch
+ * but is unfortunately reported through INTEVT rather than
+ * EXPEVT. Grr.
*/
- stc r2_bank, r8
- cmp/pz r8
- bf interrupt_exception
- shlr2 r8
- shlr r8
- mov.l 4f, r9
- add r8, r9
- mov.l @r9, r9
+ mov.l 6f, r9
+ mov.l @(r0, r9), r9
jmp @r9
- nop
- rts
- nop
+ mov r15, r8 ! trap handlers take saved regs in r8
- .align 2
-1: .long 0x00001000 ! DSP=1
-2: .long 0x000080f0 ! FD=1, IMASK=15
-3: .long 0xcfffffff ! RB=0, BL=0
-4: .long exception_handling_table
+10:
+ jmp @r9 ! Off to do_IRQ() we go.
+ mov r15, r5 ! pass saved registers as arg1
-interrupt_exception:
- mov.l 1f, r9
- mov.l 2f, r4
- mov.l @r4, r4
- jmp @r9
- mov r15, r5
+ENTRY(exception_none)
rts
nop
- .align 2
-1: .long do_IRQ
+ .align L1_CACHE_SHIFT
+exception_data:
+0: .long 0x000080f0 ! FD=1, IMASK=15
+1: .long 0xcfffffff ! RB=0, BL=0
2: .long INTEVT
-
- .align 2
-ENTRY(exception_none)
- rts
- nop
+3: .long do_IRQ
+4: .long ret_from_irq
+5: .long EXPEVT
+6: .long exception_handling_table
+7: .long ret_from_exception
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index 11b6d9c6eda..99b4d020179 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -4,7 +4,7 @@
* The SH-3 and SH-4 exception vector table.
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
- * Copyright (C) 2003 - 2006 Paul Mundt
+ * Copyright (C) 2003 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -12,13 +12,30 @@
*/
#include <linux/linkage.h>
+#if !defined(CONFIG_MMU)
+#define tlb_miss_load exception_error
+#define tlb_miss_store exception_error
+#define initial_page_write exception_error
+#define tlb_protection_violation_load exception_error
+#define tlb_protection_violation_store exception_error
+#define address_error_load exception_error
+#define address_error_store exception_error
+#endif
+
+#if !defined(CONFIG_SH_FPU)
+#define fpu_error_trap_handler exception_error
+#endif
+
+#if !defined(CONFIG_KGDB)
+#define kgdb_handle_exception exception_error
+#endif
+
.align 2
.data
ENTRY(exception_handling_table)
.long exception_error /* 000 */
.long exception_error
-#if defined(CONFIG_MMU)
.long tlb_miss_load /* 040 */
.long tlb_miss_store
.long initial_page_write
@@ -26,32 +43,13 @@ ENTRY(exception_handling_table)
.long tlb_protection_violation_store
.long address_error_load
.long address_error_store /* 100 */
-#else
- .long exception_error ! tlb miss load /* 040 */
- .long exception_error ! tlb miss store
- .long exception_error ! initial page write
- .long exception_error ! tlb prot violation load
- .long exception_error ! tlb prot violation store
- .long exception_error ! address error load
- .long exception_error ! address error store /* 100 */
-#endif
-#if defined(CONFIG_SH_FPU)
.long fpu_error_trap_handler /* 120 */
-#else
- .long exception_error /* 120 */
-#endif
.long exception_error /* 140 */
.long system_call ! Unconditional Trap /* 160 */
.long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
.long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
-ENTRY(nmi_slot)
-#if defined (CONFIG_KGDB_NMI)
- .long kgdb_handle_exception /* 1C0 */ ! Allow trap to debugger
-#else
- .long exception_none /* 1C0 */ ! Not implemented yet
-#endif
-ENTRY(user_break_point_trap)
- .long break_point_trap /* 1E0 */
+ .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger
+ .long breakpoint_trap_handler /* 1E0 */
/*
* Pad the remainder of the table out, exceptions residing in far
diff --git a/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c b/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c
new file mode 100644
index 00000000000..26e90a66ebb
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c
@@ -0,0 +1,30 @@
+/*
+ * SH7720 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7720_pfc_resources[] = {
+ [0] = {
+ .start = 0xa4050100,
+ .end = 0xa405016f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7720", sh7720_pfc_resources,
+ ARRAY_SIZE(sh7720_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index 10f2a760c5e..426e1e1dced 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,7 +16,7 @@
#include <asm/cache.h>
#include <asm/io.h>
-int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
+void cpu_probe(void)
{
unsigned long addr0, addr1, data0, data1, data2, data3;
@@ -30,23 +30,23 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
/* First, write back & invalidate */
- data0 = ctrl_inl(addr0);
- ctrl_outl(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
- data1 = ctrl_inl(addr1);
- ctrl_outl(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
+ data0 = __raw_readl(addr0);
+ __raw_writel(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
+ data1 = __raw_readl(addr1);
+ __raw_writel(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
/* Next, check if there's shadow or not */
- data0 = ctrl_inl(addr0);
+ data0 = __raw_readl(addr0);
data0 ^= SH_CACHE_VALID;
- ctrl_outl(data0, addr0);
- data1 = ctrl_inl(addr1);
+ __raw_writel(data0, addr0);
+ data1 = __raw_readl(addr1);
data2 = data1 ^ SH_CACHE_VALID;
- ctrl_outl(data2, addr1);
- data3 = ctrl_inl(addr0);
+ __raw_writel(data2, addr1);
+ data3 = __raw_readl(addr0);
/* Lastly, invaliate them. */
- ctrl_outl(data0&~SH_CACHE_VALID, addr0);
- ctrl_outl(data2&~SH_CACHE_VALID, addr1);
+ __raw_writel(data0&~SH_CACHE_VALID, addr0);
+ __raw_writel(data2&~SH_CACHE_VALID, addr1);
back_to_cached();
@@ -94,9 +94,9 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
boot_cpu_data.dcache.way_incr = (1 << 13);
boot_cpu_data.dcache.entry_mask = 0x1ff0;
boot_cpu_data.dcache.sets = 512;
- ctrl_outl(CCR_CACHE_32KB, CCR3_REG);
+ __raw_writel(CCR_CACHE_32KB, CCR3_REG);
#else
- ctrl_outl(CCR_CACHE_16KB, CCR3_REG);
+ __raw_writel(CCR_CACHE_16KB, CCR3_REG);
#endif
#endif
}
@@ -107,5 +107,5 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void)
boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
boot_cpu_data.icache = boot_cpu_data.dcache;
- return 0;
+ boot_cpu_data.family = CPU_FAMILY_SH3;
}
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh770x.c b/arch/sh/kernel/cpu/sh3/serial-sh770x.c
new file mode 100644
index 00000000000..4f7242c676b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/serial-sh770x.c
@@ -0,0 +1,33 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <cpu/serial.h>
+
+#define SCPCR 0xA4000116
+#define SCPDR 0xA4000136
+
+static void sh770x_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ /* We need to set SCPCR to enable RTS/CTS */
+ data = __raw_readw(SCPCR);
+ /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
+ __raw_writew(data & 0x0fcf, SCPCR);
+
+ if (!(cflag & CRTSCTS)) {
+ /* We need to set SCPCR to enable RTS/CTS */
+ data = __raw_readw(SCPCR);
+ /* Clear out SCP7MD1,0, SCP4MD1,0,
+ Set SCP6MD1,0 = {01} (output) */
+ __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
+
+ data = __raw_readb(SCPDR);
+ /* Set /RTS2 (bit6) = 0 */
+ __raw_writeb(data & 0xbf, SCPDR);
+ }
+}
+
+struct plat_sci_port_ops sh770x_sci_port_ops = {
+ .init_pins = sh770x_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7710.c b/arch/sh/kernel/cpu/sh3/serial-sh7710.c
new file mode 100644
index 00000000000..42190ef6aeb
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/serial-sh7710.c
@@ -0,0 +1,20 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <cpu/serial.h>
+
+#define PACR 0xa4050100
+#define PBCR 0xa4050102
+
+static void sh7710_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ if (port->mapbase == 0xA4400000) {
+ __raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
+ __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
+ } else if (port->mapbase == 0xA4410000)
+ __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
+}
+
+struct plat_sci_port_ops sh7710_sci_port_ops = {
+ .init_pins = sh7710_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7720.c b/arch/sh/kernel/cpu/sh3/serial-sh7720.c
new file mode 100644
index 00000000000..c4a0336660d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/serial-sh7720.c
@@ -0,0 +1,37 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+#include <cpu/serial.h>
+#include <cpu/gpio.h>
+
+static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ if (cflag & CRTSCTS) {
+ /* enable RTS/CTS */
+ if (port->mapbase == 0xa4430000) { /* SCIF0 */
+ /* Clear PTCR bit 9-2; enable all scif pins but sck */
+ data = __raw_readw(PORT_PTCR);
+ __raw_writew((data & 0xfc03), PORT_PTCR);
+ } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
+ /* Clear PVCR bit 9-2 */
+ data = __raw_readw(PORT_PVCR);
+ __raw_writew((data & 0xfc03), PORT_PVCR);
+ }
+ } else {
+ if (port->mapbase == 0xa4430000) { /* SCIF0 */
+ /* Clear PTCR bit 5-2; enable only tx and rx */
+ data = __raw_readw(PORT_PTCR);
+ __raw_writew((data & 0xffc3), PORT_PTCR);
+ } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
+ /* Clear PVCR bit 5-2 */
+ data = __raw_readw(PORT_PVCR);
+ __raw_writew((data & 0xffc3), PORT_PVCR);
+ }
+ }
+}
+
+struct plat_sci_port_ops sh7720_sci_port_ops = {
+ .init_pins = sh7720_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh3.c b/arch/sh/kernel/cpu/sh3/setup-sh3.c
new file mode 100644
index 00000000000..53be70b9811
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh3.c
@@ -0,0 +1,71 @@
+/*
+ * Shared SH3 Setup code
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+/* All SH3 devices are equipped with IRQ0->5 (except sh7708) */
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
+};
+
+static struct intc_vect vectors_irq0123[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+};
+
+static struct intc_vect vectors_irq45[] __initdata = {
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
+ { 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4000004, 0, 8, /* IRR0 */
+ { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa4000010, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh3-irq0123",
+ vectors_irq0123, NULL, NULL,
+ prio_registers, sense_registers, ack_registers);
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq45, "sh3-irq45",
+ vectors_irq45, NULL, NULL,
+ prio_registers, sense_registers, ack_registers);
+
+#define INTC_ICR1 0xa4000010UL
+#define INTC_ICR1_IRQLVL (1<<14)
+
+void __init plat_irq_setup_pins(int mode)
+{
+ if (mode == IRQ_MODE_IRQ) {
+ __raw_writew(__raw_readw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1);
+ register_intc_controller(&intc_desc_irq0123);
+ return;
+ }
+ BUG();
+}
+
+void __init plat_irq_setup_sh3(void)
+{
+ register_intc_controller(&intc_desc_irq45);
+}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
index f581534cb73..6a72fd14de2 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -1,7 +1,7 @@
/*
* SH7705 Setup
*
- * Copyright (C) 2006, 2007 Paul Mundt
+ * Copyright (C) 2006 - 2009 Paul Mundt
* Copyright (C) 2007 Nobuhiro Iwamatsu
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -13,7 +13,10 @@
#include <linux/irq.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
#include <asm/rtc.h>
+#include <cpu/serial.h>
enum {
UNUSED = 0,
@@ -21,51 +24,36 @@ enum {
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
PINT07, PINT815,
- DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3,
- SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
- SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
- ADC_ADI,
- USB_USI0, USB_USI1,
+
+ DMAC, SCIF0, SCIF2, ADC_ADI, USB,
+
TPU0, TPU1, TPU2, TPU3,
- TMU0, TMU1, TMU2_TUNI, TMU2_TICPI,
- RTC_ATI, RTC_PRI, RTC_CUI,
- WDT,
- REF_RCMI,
+ TMU0, TMU1, TMU2,
- /* interrupt groups */
- RTC, TMU2, DMAC, USB, SCIF2, SCIF0,
+ RTC, WDT, REF_RCMI,
};
static struct intc_vect vectors[] __initdata = {
- INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+ /* IRQ0->5 are handled in setup-sh3.c */
INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720),
- INTC_VECT(DMAC_DEI0, 0x800), INTC_VECT(DMAC_DEI1, 0x820),
- INTC_VECT(DMAC_DEI2, 0x840), INTC_VECT(DMAC_DEI3, 0x860),
- INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0),
- INTC_VECT(SCIF0_TXI, 0x8e0),
- INTC_VECT(SCIF2_ERI, 0x900), INTC_VECT(SCIF2_RXI, 0x920),
- INTC_VECT(SCIF2_TXI, 0x960),
+ INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820),
+ INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860),
+ INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0),
+ INTC_VECT(SCIF0, 0x8e0),
+ INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920),
+ INTC_VECT(SCIF2, 0x960),
INTC_VECT(ADC_ADI, 0x980),
- INTC_VECT(USB_USI0, 0xa20), INTC_VECT(USB_USI1, 0xa40),
+ INTC_VECT(USB, 0xa20), INTC_VECT(USB, 0xa40),
INTC_VECT(TPU0, 0xc00), INTC_VECT(TPU1, 0xc20),
- INTC_VECT(TPU3, 0xc80), INTC_VECT(TPU1, 0xca0),
+ INTC_VECT(TPU2, 0xc80), INTC_VECT(TPU3, 0xca0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
- INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460),
- INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
- INTC_VECT(RTC_CUI, 0x4c0),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
INTC_VECT(WDT, 0x560),
INTC_VECT(REF_RCMI, 0x580),
};
-static struct intc_group groups[] __initdata = {
- INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
- INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI),
- INTC_GROUP(DMAC, DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3),
- INTC_GROUP(USB, USB_USI0, USB_USI1),
- INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
- INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } },
@@ -78,38 +66,53 @@ static struct intc_prio_reg prio_registers[] __initdata = {
};
-static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, groups,
+static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL,
NULL, prio_registers, NULL);
-static struct intc_vect vectors_irq[] __initdata = {
- INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
- INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE |
+ SCSCR_RE | SCSCR_CKE1 | SCSCR_CKE0,
+ .type = PORT_SCIF,
+ .ops = &sh770x_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
};
-static DECLARE_INTC_DESC(intc_desc_irq, "sh7705-irq", vectors_irq, NULL,
- NULL, prio_registers, NULL);
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xa4410000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x900)),
+};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xa4410000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 56, 57, 59 },
- }, {
- .mapbase = 0xa4400000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 53, 55 },
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE | SCSCR_RE,
+ .type = PORT_SCIF,
+ .ops = &sh770x_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xa4400000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x880)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif1_platform_data,
},
};
@@ -120,17 +123,9 @@ static struct resource rtc_resources[] = {
.flags = IORESOURCE_IO,
},
[1] = {
- .start = 20,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = 21,
+ .start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
- [3] = {
- .start = 22,
- .flags = IORESOURCE_IRQ,
- },
};
static struct sh_rtc_platform_info rtc_info = {
@@ -147,8 +142,31 @@ static struct platform_device rtc_device = {
},
};
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xfffffe90, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu-sh3",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
static struct platform_device *sh7705_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
&rtc_device,
};
@@ -157,18 +175,22 @@ static int __init sh7705_devices_setup(void)
return platform_add_devices(sh7705_devices,
ARRAY_SIZE(sh7705_devices));
}
-__initcall(sh7705_devices_setup);
+arch_initcall(sh7705_devices_setup);
+
+static struct platform_device *sh7705_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
+};
-void __init plat_irq_setup_pins(int mode)
+void __init plat_early_device_setup(void)
{
- if (mode == IRQ_MODE_IRQ) {
- register_intc_controller(&intc_desc_irq);
- return;
- }
- BUG();
+ early_platform_add_devices(sh7705_early_devices,
+ ARRAY_SIZE(sh7705_early_devices));
}
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
+ plat_irq_setup_sh3();
}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index d3733b13ea5..9139d14b9c5 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -2,6 +2,7 @@
* SH3 Setup code for SH7706, SH7707, SH7708, SH7709
*
* Copyright (C) 2007 Magnus Damm
+ * Copyright (C) 2009 Paul Mundt
*
* Based on setup-sh7709.c
*
@@ -17,6 +18,9 @@
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <cpu/serial.h>
enum {
UNUSED = 0,
@@ -24,46 +28,37 @@ enum {
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
PINT07, PINT815,
- DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3,
- SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
- SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
- SCI_ERI, SCI_RXI, SCI_TXI, SCI_TEI,
- ADC_ADI,
+ DMAC, SCIF0, SCIF2, SCI, ADC_ADI,
LCDC, PCC0, PCC1,
- TMU0, TMU1, TMU2_TUNI, TMU2_TICPI,
- RTC_ATI, RTC_PRI, RTC_CUI,
- WDT,
- REF_RCMI, REF_ROVI,
-
- /* interrupt groups */
- RTC, REF, TMU2, DMAC, SCI, SCIF2, SCIF0,
+ TMU0, TMU1, TMU2,
+ RTC, WDT, REF,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
- INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460),
- INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
- INTC_VECT(RTC_CUI, 0x4c0),
- INTC_VECT(SCI_ERI, 0x4e0), INTC_VECT(SCI_RXI, 0x500),
- INTC_VECT(SCI_TXI, 0x520), INTC_VECT(SCI_TEI, 0x540),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(SCI, 0x4e0), INTC_VECT(SCI, 0x500),
+ INTC_VECT(SCI, 0x520), INTC_VECT(SCI, 0x540),
INTC_VECT(WDT, 0x560),
- INTC_VECT(REF_RCMI, 0x580),
- INTC_VECT(REF_ROVI, 0x5a0),
+ INTC_VECT(REF, 0x580),
+ INTC_VECT(REF, 0x5a0),
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
- INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
- INTC_VECT(DMAC_DEI0, 0x800), INTC_VECT(DMAC_DEI1, 0x820),
- INTC_VECT(DMAC_DEI2, 0x840), INTC_VECT(DMAC_DEI3, 0x860),
+ /* IRQ0->5 are handled in setup-sh3.c */
+ INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820),
+ INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860),
INTC_VECT(ADC_ADI, 0x980),
- INTC_VECT(SCIF2_ERI, 0x900), INTC_VECT(SCIF2_RXI, 0x920),
- INTC_VECT(SCIF2_BRI, 0x940), INTC_VECT(SCIF2_TXI, 0x960),
+ INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920),
+ INTC_VECT(SCIF2, 0x940), INTC_VECT(SCIF2, 0x960),
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720),
- INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0),
- INTC_VECT(SCIF0_BRI, 0x8c0), INTC_VECT(SCIF0_TXI, 0x8e0),
+ INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0),
+ INTC_VECT(SCIF0, 0x8c0), INTC_VECT(SCIF0, 0x8e0),
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707)
INTC_VECT(LCDC, 0x9a0),
@@ -71,16 +66,6 @@ static struct intc_vect vectors[] __initdata = {
#endif
};
-static struct intc_group groups[] __initdata = {
- INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
- INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI),
- INTC_GROUP(REF, REF_RCMI, REF_ROVI),
- INTC_GROUP(DMAC, DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3),
- INTC_GROUP(SCI, SCI_ERI, SCI_RXI, SCI_TXI, SCI_TEI),
- INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
- INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } },
@@ -101,21 +86,9 @@ static struct intc_prio_reg prio_registers[] __initdata = {
#endif
};
-static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, groups,
+static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, NULL,
NULL, prio_registers, NULL);
-#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
- defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
-static struct intc_vect vectors_irq[] __initdata = {
- INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
- INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
-};
-
-static DECLARE_INTC_DESC(intc_desc_irq, "sh770x-irq", vectors_irq, NULL,
- NULL, prio_registers, NULL);
-#endif
-
static struct resource rtc_resources[] = {
[0] = {
.start = 0xfffffec0,
@@ -123,15 +96,7 @@ static struct resource rtc_resources[] = {
.flags = IORESOURCE_IO,
},
[1] = {
- .start = 21,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = 22,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- .start = 20,
+ .start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
@@ -143,47 +108,115 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xfffffe80,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 23, 24, 25, 0 },
+static struct plat_sci_port scif0_platform_data = {
+ .port_reg = 0xa4000136,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE,
+ .type = PORT_SCI,
+ .ops = &sh770x_sci_port_ops,
+ .regshift = 1,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xfffffe80, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x4e0)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
},
+};
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
- {
- .mapbase = 0xa4000150,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 56, 57, 59, 58 },
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE,
+ .type = PORT_SCIF,
+ .ops = &sh770x_sci_port_ops,
+ .regtype = SCIx_SH3_SCIF_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xa4000150, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x900)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
},
+};
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
- {
- .mapbase = 0xa4000140,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_IRDA,
- .irqs = { 52, 53, 55, 54 },
- },
-#endif
- {
- .flags = 0,
- }
+static struct plat_sci_port scif2_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE,
+ .type = PORT_IRDA,
+ .ops = &sh770x_sci_port_ops,
+ .regshift = 1,
};
-static struct platform_device sci_device = {
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xa4000140, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x880)),
+};
+
+static struct platform_device scif2_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif2_platform_data,
},
};
+#endif
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xfffffe90, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu-sh3",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
static struct platform_device *sh770x_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif1_device,
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif2_device,
+#endif
+ &tmu0_device,
&rtc_device,
};
@@ -192,26 +225,34 @@ static int __init sh770x_devices_setup(void)
return platform_add_devices(sh770x_devices,
ARRAY_SIZE(sh770x_devices));
}
-__initcall(sh770x_devices_setup);
+arch_initcall(sh770x_devices_setup);
-#define INTC_ICR1 0xa4000010UL
-#define INTC_ICR1_IRQLVL (1<<14)
-
-void __init plat_irq_setup_pins(int mode)
-{
- if (mode == IRQ_MODE_IRQ) {
+static struct platform_device *sh770x_early_devices[] __initdata = {
+ &scif0_device,
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
- ctrl_outw(ctrl_inw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1);
- register_intc_controller(&intc_desc_irq);
- return;
+ &scif1_device,
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ &scif2_device,
#endif
- }
- BUG();
+ &tmu0_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh770x_early_devices,
+ ARRAY_SIZE(sh770x_early_devices));
}
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
+#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709)
+ plat_irq_setup_sh3();
+#endif
}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
index 7406c9ad925..e9ed300dba5 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -1,7 +1,7 @@
/*
* SH3 Setup code for SH7710, SH7712
*
- * Copyright (C) 2006, 2007 Paul Mundt
+ * Copyright (C) 2006 - 2009 Paul Mundt
* Copyright (C) 2007 Nobuhiro Iwamatsu
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -13,6 +13,8 @@
#include <linux/irq.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
#include <asm/rtc.h>
enum {
@@ -20,83 +22,53 @@ enum {
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
- DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3,
- SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
- SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
- DMAC_DEI4, DMAC_DEI5,
- IPSEC,
+ DMAC1, SCIF0, SCIF1, DMAC2, IPSEC,
EDMAC0, EDMAC1, EDMAC2,
- SIOF0_ERI, SIOF0_TXI, SIOF0_RXI, SIOF0_CCI,
- SIOF1_ERI, SIOF1_TXI, SIOF1_RXI, SIOF1_CCI,
- TMU0, TMU1, TMU2,
- RTC_ATI, RTC_PRI, RTC_CUI,
- WDT,
- REF,
+ SIOF0, SIOF1,
- /* interrupt groups */
- RTC, DMAC1, SCIF0, SCIF1, DMAC2, SIOF0, SIOF1,
+ TMU0, TMU1, TMU2,
+ RTC, WDT, REF,
};
static struct intc_vect vectors[] __initdata = {
- INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
- INTC_VECT(DMAC_DEI0, 0x800), INTC_VECT(DMAC_DEI1, 0x820),
- INTC_VECT(DMAC_DEI2, 0x840), INTC_VECT(DMAC_DEI3, 0x860),
- INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0),
- INTC_VECT(SCIF0_BRI, 0x8c0), INTC_VECT(SCIF0_TXI, 0x8e0),
- INTC_VECT(SCIF1_ERI, 0x900), INTC_VECT(SCIF1_RXI, 0x920),
- INTC_VECT(SCIF1_BRI, 0x940), INTC_VECT(SCIF1_TXI, 0x960),
- INTC_VECT(DMAC_DEI4, 0xb80), INTC_VECT(DMAC_DEI5, 0xba0),
+ /* IRQ0->5 are handled in setup-sh3.c */
+ INTC_VECT(DMAC1, 0x800), INTC_VECT(DMAC1, 0x820),
+ INTC_VECT(DMAC1, 0x840), INTC_VECT(DMAC1, 0x860),
+ INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0),
+ INTC_VECT(SCIF0, 0x8c0), INTC_VECT(SCIF0, 0x8e0),
+ INTC_VECT(SCIF1, 0x900), INTC_VECT(SCIF1, 0x920),
+ INTC_VECT(SCIF1, 0x940), INTC_VECT(SCIF1, 0x960),
+ INTC_VECT(DMAC2, 0xb80), INTC_VECT(DMAC2, 0xba0),
#ifdef CONFIG_CPU_SUBTYPE_SH7710
INTC_VECT(IPSEC, 0xbe0),
#endif
INTC_VECT(EDMAC0, 0xc00), INTC_VECT(EDMAC1, 0xc20),
INTC_VECT(EDMAC2, 0xc40),
- INTC_VECT(SIOF0_ERI, 0xe00), INTC_VECT(SIOF0_TXI, 0xe20),
- INTC_VECT(SIOF0_RXI, 0xe40), INTC_VECT(SIOF0_CCI, 0xe60),
- INTC_VECT(SIOF1_ERI, 0xe80), INTC_VECT(SIOF1_TXI, 0xea0),
- INTC_VECT(SIOF1_RXI, 0xec0), INTC_VECT(SIOF1_CCI, 0xee0),
+ INTC_VECT(SIOF0, 0xe00), INTC_VECT(SIOF0, 0xe20),
+ INTC_VECT(SIOF0, 0xe40), INTC_VECT(SIOF0, 0xe60),
+ INTC_VECT(SIOF1, 0xe80), INTC_VECT(SIOF1, 0xea0),
+ INTC_VECT(SIOF1, 0xec0), INTC_VECT(SIOF1, 0xee0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440),
- INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
- INTC_VECT(RTC_CUI, 0x4c0),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
INTC_VECT(WDT, 0x560),
INTC_VECT(REF, 0x580),
};
-static struct intc_group groups[] __initdata = {
- INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
- INTC_GROUP(DMAC1, DMAC_DEI0, DMAC_DEI1, DMAC_DEI2, DMAC_DEI3),
- INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
- INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
- INTC_GROUP(DMAC2, DMAC_DEI4, DMAC_DEI5),
- INTC_GROUP(SIOF0, SIOF0_ERI, SIOF0_TXI, SIOF0_RXI, SIOF0_CCI),
- INTC_GROUP(SIOF1, SIOF1_ERI, SIOF1_TXI, SIOF1_RXI, SIOF1_CCI),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } },
{ 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
{ 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } },
{ 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC1, SCIF0, SCIF1 } },
- { 0xa4080000, 0, 16, 4, /* IPRF */ { 0, DMAC2 } },
-#ifdef CONFIG_CPU_SUBTYPE_SH7710
- { 0xa4080000, 0, 16, 4, /* IPRF */ { IPSEC } },
-#endif
+ { 0xa4080000, 0, 16, 4, /* IPRF */ { IPSEC, DMAC2 } },
{ 0xa4080002, 0, 16, 4, /* IPRG */ { EDMAC0, EDMAC1, EDMAC2 } },
{ 0xa4080004, 0, 16, 4, /* IPRH */ { 0, 0, 0, SIOF0 } },
{ 0xa4080006, 0, 16, 4, /* IPRI */ { 0, 0, SIOF1 } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, groups,
- NULL, prio_registers, NULL);
-
-static struct intc_vect vectors_irq[] __initdata = {
- INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
- INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
-};
-
-static DECLARE_INTC_DESC(intc_desc_irq, "sh7710-irq", vectors_irq, NULL,
+static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, NULL,
NULL, prio_registers, NULL);
static struct resource rtc_resources[] = {
@@ -106,17 +78,9 @@ static struct resource rtc_resources[] = {
.flags = IORESOURCE_IO,
},
[1] = {
- .start = 20,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = 21,
+ .start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
- [3] = {
- .start = 22,
- .flags = IORESOURCE_IRQ,
- },
};
static struct sh_rtc_platform_info rtc_info = {
@@ -133,33 +97,75 @@ static struct platform_device rtc_device = {
},
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xa4400000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 53, 55, 54 },
- }, {
- .mapbase = 0xa4410000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 56, 57, 59, 58 },
- }, {
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE |
+ SCSCR_CKE1 | SCSCR_CKE0,
+ .type = PORT_SCIF,
+};
- .flags = 0,
- }
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xa4400000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x880)),
};
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif0_platform_data,
},
};
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE |
+ SCSCR_CKE1 | SCSCR_CKE0,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xa4410000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x900)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xa412fe90, 0x28),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu-sh3",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
static struct platform_device *sh7710_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
&rtc_device,
};
@@ -168,18 +174,22 @@ static int __init sh7710_devices_setup(void)
return platform_add_devices(sh7710_devices,
ARRAY_SIZE(sh7710_devices));
}
-__initcall(sh7710_devices_setup);
+arch_initcall(sh7710_devices_setup);
+
+static struct platform_device *sh7710_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
+};
-void __init plat_irq_setup_pins(int mode)
+void __init plat_early_device_setup(void)
{
- if (mode == IRQ_MODE_IRQ) {
- register_intc_controller(&intc_desc_irq);
- return;
- }
- BUG();
+ early_platform_add_devices(sh7710_early_devices,
+ ARRAY_SIZE(sh7710_early_devices));
}
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
+ plat_irq_setup_sh3();
}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
index 8028082527c..84df85a5b80 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c
@@ -1,7 +1,8 @@
/*
- * SH7720 Setup
+ * Setup code for SH7720, SH7721.
*
* Copyright (C) 2007 Markus Brunner, Mark Jonas
+ * Copyright (C) 2009 Paul Mundt
*
* Based on arch/sh/kernel/cpu/sh4/setup-sh7750.c:
*
@@ -17,11 +18,11 @@
#include <linux/serial.h>
#include <linux/io.h>
#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <linux/usb/ohci_pdriver.h>
#include <asm/rtc.h>
-
-#define INTC_ICR1 0xA4140010UL
-#define INTC_ICR_IRLM 0x4000
-#define INTC_ICR_IRQ (~INTC_ICR_IRLM)
+#include <cpu/serial.h>
static struct resource rtc_resources[] = {
[0] = {
@@ -30,18 +31,8 @@ static struct resource rtc_resources[] = {
.flags = IORESOURCE_IO,
},
[1] = {
- /* Period IRQ */
- .start = 21,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* Carry IRQ */
- .start = 22,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- /* Alarm IRQ */
- .start = 20,
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
@@ -60,28 +51,49 @@ static struct platform_device rtc_device = {
},
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xa4430000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- }, {
- .mapbase = 0xa4438000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
- }, {
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE,
+ .type = PORT_SCIF,
+ .ops = &sh7720_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
+};
- .flags = 0,
- }
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xa4430000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc00)),
};
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE,
+ .type = PORT_SCIF,
+ .ops = &sh7720_sci_port_ops,
+ .regtype = SCIx_SH7705_SCIF_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xa4438000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc20)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif1_platform_data,
},
};
@@ -92,19 +104,23 @@ static struct resource usb_ohci_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = 67,
- .end = 67,
+ .start = evt2irq(0xa60),
+ .end = evt2irq(0xa60),
.flags = IORESOURCE_IRQ,
},
};
static u64 usb_ohci_dma_mask = 0xffffffffUL;
+
+static struct usb_ohci_pdata usb_ohci_pdata;
+
static struct platform_device usb_ohci_device = {
- .name = "sh_ohci",
+ .name = "ohci-platform",
.id = -1,
.dev = {
.dma_mask = &usb_ohci_dma_mask,
.coherent_dma_mask = 0xffffffff,
+ .platform_data = &usb_ohci_pdata,
},
.num_resources = ARRAY_SIZE(usb_ohci_resources),
.resource = usb_ohci_resources,
@@ -119,8 +135,8 @@ static struct resource usbf_resources[] = {
},
[1] = {
.name = "sh_udc",
- .start = 65,
- .end = 65,
+ .start = evt2irq(0xa20),
+ .end = evt2irq(0xa20),
.flags = IORESOURCE_IRQ,
},
};
@@ -136,9 +152,52 @@ static struct platform_device usbf_device = {
.resource = usbf_resources,
};
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 0x1f,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0x044a0000, 0x60),
+ DEFINE_RES_IRQ(evt2irq(0xf00)),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-32",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xa412fe90, 0x28),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu-sh3",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
static struct platform_device *sh7720_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &cmt_device,
+ &tmu0_device,
&rtc_device,
- &sci_device,
&usb_ohci_device,
&usbf_device,
};
@@ -148,77 +207,74 @@ static int __init sh7720_devices_setup(void)
return platform_add_devices(sh7720_devices,
ARRAY_SIZE(sh7720_devices));
}
-__initcall(sh7720_devices_setup);
+arch_initcall(sh7720_devices_setup);
+
+static struct platform_device *sh7720_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &cmt_device,
+ &tmu0_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7720_early_devices,
+ ARRAY_SIZE(sh7720_early_devices));
+}
enum {
UNUSED = 0,
/* interrupt sources */
- TMU0, TMU1, TMU2, RTC_ATI, RTC_PRI, RTC_CUI,
- WDT, REF_RCMI, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND,
+ TMU0, TMU1, TMU2, RTC,
+ WDT, REF_RCMI, SIM,
IRQ0, IRQ1, IRQ2, IRQ3,
USBF_SPD, TMU_SUNI, IRQ5, IRQ4,
- DMAC1_DEI0, DMAC1_DEI1, DMAC1_DEI2, DMAC1_DEI3, LCDC, SSL,
- ADC, DMAC2_DEI4, DMAC2_DEI5, USBFI0, USBFI1, CMT,
+ DMAC1, LCDC, SSL,
+ ADC, DMAC2, USBFI, CMT,
SCIF0, SCIF1,
- PINT07, PINT815, TPU0, TPU1, TPU2, TPU3, IIC,
- SIOF0, SIOF1, MMCI0, MMCI1, MMCI2, MMCI3, PCC,
+ PINT07, PINT815, TPU, IIC,
+ SIOF0, SIOF1, MMC, PCC,
USBHI, AFEIF,
H_UDI,
- /* interrupt groups */
- TMU, RTC, SIM, DMAC1, USBFI, DMAC2, USB, TPU, MMC,
};
static struct intc_vect vectors[] __initdata = {
+ /* IRQ0->5 are handled in setup-sh3.c */
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
- INTC_VECT(TMU2, 0x440), INTC_VECT(RTC_ATI, 0x480),
- INTC_VECT(RTC_PRI, 0x4a0), INTC_VECT(RTC_CUI, 0x4c0),
- INTC_VECT(SIM_ERI, 0x4e0), INTC_VECT(SIM_RXI, 0x500),
- INTC_VECT(SIM_TXI, 0x520), INTC_VECT(SIM_TEND, 0x540),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(RTC, 0x480),
+ INTC_VECT(RTC, 0x4a0), INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(SIM, 0x4e0), INTC_VECT(SIM, 0x500),
+ INTC_VECT(SIM, 0x520), INTC_VECT(SIM, 0x540),
INTC_VECT(WDT, 0x560), INTC_VECT(REF_RCMI, 0x580),
/* H_UDI cannot be masked */ INTC_VECT(TMU_SUNI, 0x6c0),
- INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1_DEI0, 0x800),
- INTC_VECT(DMAC1_DEI1, 0x820), INTC_VECT(DMAC1_DEI2, 0x840),
- INTC_VECT(DMAC1_DEI3, 0x860), INTC_VECT(LCDC, 0x900),
+ INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1, 0x800),
+ INTC_VECT(DMAC1, 0x820), INTC_VECT(DMAC1, 0x840),
+ INTC_VECT(DMAC1, 0x860), INTC_VECT(LCDC, 0x900),
#if defined(CONFIG_CPU_SUBTYPE_SH7720)
INTC_VECT(SSL, 0x980),
#endif
- INTC_VECT(USBFI0, 0xa20), INTC_VECT(USBFI1, 0xa40),
+ INTC_VECT(USBFI, 0xa20), INTC_VECT(USBFI, 0xa40),
INTC_VECT(USBHI, 0xa60),
- INTC_VECT(DMAC2_DEI4, 0xb80), INTC_VECT(DMAC2_DEI5, 0xba0),
+ INTC_VECT(DMAC2, 0xb80), INTC_VECT(DMAC2, 0xba0),
INTC_VECT(ADC, 0xbe0), INTC_VECT(SCIF0, 0xc00),
INTC_VECT(SCIF1, 0xc20), INTC_VECT(PINT07, 0xc80),
INTC_VECT(PINT815, 0xca0), INTC_VECT(SIOF0, 0xd00),
- INTC_VECT(SIOF1, 0xd20), INTC_VECT(TPU0, 0xd80),
- INTC_VECT(TPU1, 0xda0), INTC_VECT(TPU2, 0xdc0),
- INTC_VECT(TPU3, 0xde0), INTC_VECT(IIC, 0xe00),
- INTC_VECT(MMCI0, 0xe80), INTC_VECT(MMCI1, 0xea0),
- INTC_VECT(MMCI2, 0xec0), INTC_VECT(MMCI3, 0xee0),
+ INTC_VECT(SIOF1, 0xd20), INTC_VECT(TPU, 0xd80),
+ INTC_VECT(TPU, 0xda0), INTC_VECT(TPU, 0xdc0),
+ INTC_VECT(TPU, 0xde0), INTC_VECT(IIC, 0xe00),
+ INTC_VECT(MMC, 0xe80), INTC_VECT(MMC, 0xea0),
+ INTC_VECT(MMC, 0xec0), INTC_VECT(MMC, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(PCC, 0xf60),
INTC_VECT(AFEIF, 0xfe0),
};
-static struct intc_group groups[] __initdata = {
- INTC_GROUP(TMU, TMU0, TMU1, TMU2),
- INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
- INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND),
- INTC_GROUP(DMAC1, DMAC1_DEI0, DMAC1_DEI1, DMAC1_DEI2, DMAC1_DEI3),
- INTC_GROUP(USBFI, USBFI0, USBFI1),
- INTC_GROUP(DMAC2, DMAC2_DEI4, DMAC2_DEI5),
- INTC_GROUP(TPU, TPU0, TPU1, TPU2, TPU3),
- INTC_GROUP(MMC, MMCI0, MMCI1, MMCI2, MMCI3),
-};
-
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } },
{ 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
{ 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } },
-#if defined(CONFIG_CPU_SUBTYPE_SH7720)
{ 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } },
-#else
- { 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, 0 } },
-#endif
{ 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } },
{ 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } },
{ 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } },
@@ -226,35 +282,11 @@ static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xA4080008UL, 0, 16, 4, /* IPRJ */ { 0, USBHI, 0, AFEIF } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, groups,
+static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, NULL,
NULL, prio_registers, NULL);
-static struct intc_sense_reg sense_registers[] __initdata = {
- { INTC_ICR1, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
-};
-
-static struct intc_vect vectors_irq[] __initdata = {
- INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
- INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
- INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
-};
-
-static DECLARE_INTC_DESC(intc_irq_desc, "sh7720-irq", vectors_irq,
- NULL, NULL, prio_registers, sense_registers);
-
-void __init plat_irq_setup_pins(int mode)
-{
- switch (mode) {
- case IRQ_MODE_IRQ:
- ctrl_outw(ctrl_inw(INTC_ICR1) & INTC_ICR_IRQ, INTC_ICR1);
- register_intc_controller(&intc_irq_desc);
- break;
- default:
- BUG();
- }
-}
-
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
+ plat_irq_setup_sh3();
}
diff --git a/arch/sh/kernel/cpu/sh3/swsusp.S b/arch/sh/kernel/cpu/sh3/swsusp.S
new file mode 100644
index 00000000000..01145426a2b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/swsusp.S
@@ -0,0 +1,147 @@
+/*
+ * arch/sh/kernel/cpu/sh3/swsusp.S
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+
+#define k0 r0
+#define k1 r1
+#define k2 r2
+#define k3 r3
+#define k4 r4
+
+! swsusp_arch_resume()
+! - copy restore_pblist pages
+! - restore registers from swsusp_arch_regs_cpu0
+
+ENTRY(swsusp_arch_resume)
+ mov.l 1f, r15
+ mov.l 2f, r4
+ mov.l @r4, r4
+
+swsusp_copy_loop:
+ mov r4, r0
+ cmp/eq #0, r0
+ bt swsusp_restore_regs
+
+ mov.l @(PBE_ADDRESS, r4), r2
+ mov.l @(PBE_ORIG_ADDRESS, r4), r5
+
+ mov #(PAGE_SIZE >> 10), r3
+ shll8 r3
+ shlr2 r3 /* PAGE_SIZE / 16 */
+swsusp_copy_page:
+ dt r3
+ mov.l @r2+,r1 /* 16n+0 */
+ mov.l r1,@r5
+ add #4,r5
+ mov.l @r2+,r1 /* 16n+4 */
+ mov.l r1,@r5
+ add #4,r5
+ mov.l @r2+,r1 /* 16n+8 */
+ mov.l r1,@r5
+ add #4,r5
+ mov.l @r2+,r1 /* 16n+12 */
+ mov.l r1,@r5
+ bf/s swsusp_copy_page
+ add #4,r5
+
+ bra swsusp_copy_loop
+ mov.l @(PBE_NEXT, r4), r4
+
+swsusp_restore_regs:
+ ! BL=0: R7->R0 is bank0
+ mov.l 3f, r8
+ mov.l 4f, r5
+ jsr @r5
+ nop
+
+ ! BL=1: R7->R0 is bank1
+ lds k2, pr
+ ldc k3, ssr
+
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+
+ rte
+ nop
+ ! BL=0: R7->R0 is bank0
+
+ .align 2
+1: .long swsusp_arch_regs_cpu0
+2: .long restore_pblist
+3: .long 0x20000000 ! RB=1
+4: .long restore_regs
+
+! swsusp_arch_suspend()
+! - prepare pc for resume, return from function without swsusp_save on resume
+! - save registers in swsusp_arch_regs_cpu0
+! - call swsusp_save write suspend image
+
+ENTRY(swsusp_arch_suspend)
+ sts pr, r0 ! save pr in r0
+ mov r15, r2 ! save sp in r2
+ mov r8, r5 ! save r8 in r5
+ stc sr, r1
+ ldc r1, ssr ! save sr in ssr
+ mov.l 1f, r1
+ ldc r1, spc ! setup pc value for resuming
+ mov.l 5f, r15 ! use swsusp_arch_regs_cpu0 as stack
+ mov.l 6f, r3
+ add r3, r15 ! save from top of structure
+
+ ! BL=0: R7->R0 is bank0
+ mov.l 2f, r3 ! get new SR value for bank1
+ mov #0, r4
+ mov.l 7f, r1
+ jsr @r1 ! switch to bank1 and save bank1 r7->r0
+ not r4, r4
+
+ ! BL=1: R7->R0 is bank1
+ stc r2_bank, k0 ! fetch old sp from r2_bank0
+ mov.l 3f, k4 ! SR bits to clear in k4
+ mov.l 8f, k1
+ jsr @k1 ! switch to bank0 and save all regs
+ stc r0_bank, k3 ! fetch old pr from r0_bank0
+
+ ! BL=0: R7->R0 is bank0
+ mov r2, r15 ! restore old sp
+ mov r5, r8 ! restore old r8
+ stc ssr, r1
+ ldc r1, sr ! restore old sr
+ lds r0, pr ! restore old pr
+ mov.l 4f, r0
+ jmp @r0
+ nop
+
+swsusp_call_save:
+ mov r2, r15 ! restore old sp
+ mov r5, r8 ! restore old r8
+ lds r0, pr ! restore old pr
+ rts
+ mov #0, r0
+
+ .align 2
+1: .long swsusp_call_save
+2: .long 0x20000000 ! RB=1
+3: .long 0xdfffffff ! RB=0
+4: .long swsusp_save
+5: .long swsusp_arch_regs_cpu0
+6: .long SWSUSP_ARCH_REGS_SIZE
+7: .long save_low_regs
+8: .long save_regs
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index d608557c7a3..3a1dbc70983 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -5,9 +5,15 @@
obj-y := probe.o common.o
common-y += $(addprefix ../sh3/, entry.o ex.o)
+obj-$(CONFIG_HIBERNATION) += $(addprefix ../sh3/, swsusp.o)
obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o
obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
+# Perf events
+perf-$(CONFIG_CPU_SUBTYPE_SH7750) := perf_event.o
+perf-$(CONFIG_CPU_SUBTYPE_SH7750S) := perf_event.o
+perf-$(CONFIG_CPU_SUBTYPE_SH7091) := perf_event.o
+
# CPU subtype setup
obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o
obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o
@@ -26,4 +32,5 @@ endif
# Additional clocks by subtype
clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o
-obj-y += $(clock-y)
+obj-y += $(clock-y)
+obj-$(CONFIG_PERF_EVENTS) += $(perf-y)
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index a33429463e9..4b5bab5f875 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -12,19 +12,20 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
-#include <asm/io.h>
#define CPG2_FRQCR3 0xfe0a0018
static int frqcr3_divisors[] = { 1, 2, 3, 4, 6, 8, 16 };
static int frqcr3_values[] = { 0, 1, 2, 3, 4, 5, 6 };
-static void emi_clk_recalc(struct clk *clk)
+static unsigned long emi_clk_recalc(struct clk *clk)
{
- int idx = ctrl_inl(CPG2_FRQCR3) & 0x0007;
- clk->rate = clk->parent->rate / frqcr3_divisors[idx];
+ int idx = __raw_readl(CPG2_FRQCR3) & 0x0007;
+ return clk->parent->rate / frqcr3_divisors[idx];
}
static inline int frqcr3_lookup(struct clk *clk, unsigned long rate)
@@ -40,29 +41,27 @@ static inline int frqcr3_lookup(struct clk *clk, unsigned long rate)
return 5;
}
-static struct clk_ops sh4202_emi_clk_ops = {
+static struct sh_clk_ops sh4202_emi_clk_ops = {
.recalc = emi_clk_recalc,
};
static struct clk sh4202_emi_clk = {
- .name = "emi_clk",
- .flags = CLK_ALWAYS_ENABLED,
+ .flags = CLK_ENABLE_ON_INIT,
.ops = &sh4202_emi_clk_ops,
};
-static void femi_clk_recalc(struct clk *clk)
+static unsigned long femi_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(CPG2_FRQCR3) >> 3) & 0x0007;
- clk->rate = clk->parent->rate / frqcr3_divisors[idx];
+ int idx = (__raw_readl(CPG2_FRQCR3) >> 3) & 0x0007;
+ return clk->parent->rate / frqcr3_divisors[idx];
}
-static struct clk_ops sh4202_femi_clk_ops = {
+static struct sh_clk_ops sh4202_femi_clk_ops = {
.recalc = femi_clk_recalc,
};
static struct clk sh4202_femi_clk = {
- .name = "femi_clk",
- .flags = CLK_ALWAYS_ENABLED,
+ .flags = CLK_ENABLE_ON_INIT,
.ops = &sh4202_femi_clk_ops,
};
@@ -82,18 +81,17 @@ static void shoc_clk_init(struct clk *clk)
for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) {
int divisor = frqcr3_divisors[i];
- if (clk->ops->set_rate(clk, clk->parent->rate /
- divisor, 0) == 0)
+ if (clk->ops->set_rate(clk, clk->parent->rate / divisor) == 0)
break;
}
WARN_ON(i == ARRAY_SIZE(frqcr3_divisors)); /* Undefined clock */
}
-static void shoc_clk_recalc(struct clk *clk)
+static unsigned long shoc_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(CPG2_FRQCR3) >> 6) & 0x0007;
- clk->rate = clk->parent->rate / frqcr3_divisors[idx];
+ int idx = (__raw_readl(CPG2_FRQCR3) >> 6) & 0x0007;
+ return clk->parent->rate / frqcr3_divisors[idx];
}
static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
@@ -111,7 +109,7 @@ static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
return 0;
}
-static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id)
+static int shoc_clk_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long frqcr3;
unsigned int tmp;
@@ -122,25 +120,24 @@ static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id)
tmp = frqcr3_lookup(clk, rate);
- frqcr3 = ctrl_inl(CPG2_FRQCR3);
+ frqcr3 = __raw_readl(CPG2_FRQCR3);
frqcr3 &= ~(0x0007 << 6);
frqcr3 |= tmp << 6;
- ctrl_outl(frqcr3, CPG2_FRQCR3);
+ __raw_writel(frqcr3, CPG2_FRQCR3);
clk->rate = clk->parent->rate / frqcr3_divisors[tmp];
return 0;
}
-static struct clk_ops sh4202_shoc_clk_ops = {
+static struct sh_clk_ops sh4202_shoc_clk_ops = {
.init = shoc_clk_init,
.recalc = shoc_clk_recalc,
.set_rate = shoc_clk_set_rate,
};
static struct clk sh4202_shoc_clk = {
- .name = "shoc_clk",
- .flags = CLK_ALWAYS_ENABLED,
+ .flags = CLK_ENABLE_ON_INIT,
.ops = &sh4202_shoc_clk_ops,
};
@@ -150,31 +147,31 @@ static struct clk *sh4202_onchip_clocks[] = {
&sh4202_shoc_clk,
};
-static int __init sh4202_clk_init(void)
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("emi_clk", &sh4202_emi_clk),
+ CLKDEV_CON_ID("femi_clk", &sh4202_femi_clk),
+ CLKDEV_CON_ID("shoc_clk", &sh4202_shoc_clk),
+};
+
+int __init arch_clk_init(void)
{
- struct clk *clk = clk_get(NULL, "master_clk");
- int i;
+ struct clk *clk;
+ int i, ret = 0;
+ cpg_clk_init();
+
+ clk = clk_get(NULL, "master_clk");
for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) {
struct clk *clkp = sh4202_onchip_clocks[i];
clkp->parent = clk;
- clk_register(clkp);
- clk_enable(clkp);
+ ret |= clk_register(clkp);
}
- /*
- * Now that we have the rest of the clocks registered, we need to
- * force the parent clock to propagate so that these clocks will
- * automatically figure out their rate. We cheat by handing the
- * parent clock its current rate and forcing child propagation.
- */
- clk_set_rate(clk, clk_get_rate(clk));
-
clk_put(clk);
- return 0;
-}
-
-arch_initcall(sh4202_clk_init);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4.c b/arch/sh/kernel/cpu/sh4/clock-sh4.c
index dca9f87a12d..99e5ec8b483 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4.c
@@ -28,51 +28,51 @@ static int pfc_divisors[] = { 2, 3, 4, 6, 8, 2, 2, 2 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0007];
+ clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0007];
}
-static struct clk_ops sh4_master_clk_ops = {
+static struct sh_clk_ops sh4_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) & 0x0007);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ int idx = (__raw_readw(FRQCR) & 0x0007);
+ return clk->parent->rate / pfc_divisors[idx];
}
-static struct clk_ops sh4_module_clk_ops = {
+static struct sh_clk_ops sh4_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) >> 3) & 0x0007;
- clk->rate = clk->parent->rate / bfc_divisors[idx];
+ int idx = (__raw_readw(FRQCR) >> 3) & 0x0007;
+ return clk->parent->rate / bfc_divisors[idx];
}
-static struct clk_ops sh4_bus_clk_ops = {
+static struct sh_clk_ops sh4_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
+static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inw(FRQCR) >> 6) & 0x0007;
- clk->rate = clk->parent->rate / ifc_divisors[idx];
+ int idx = (__raw_readw(FRQCR) >> 6) & 0x0007;
+ return clk->parent->rate / ifc_divisors[idx];
}
-static struct clk_ops sh4_cpu_clk_ops = {
+static struct sh_clk_ops sh4_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
-static struct clk_ops *sh4_clk_ops[] = {
+static struct sh_clk_ops *sh4_clk_ops[] = {
&sh4_master_clk_ops,
&sh4_module_clk_ops,
&sh4_bus_clk_ops,
&sh4_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh4_clk_ops))
*ops = sh4_clk_ops[idx];
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c
index 8020796139f..69ab4d3c8d4 100644
--- a/arch/sh/kernel/cpu/sh4/fpu.c
+++ b/arch/sh/kernel/cpu/sh4/fpu.c
@@ -13,10 +13,10 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/io.h>
-#include <asm/cpu/fpu.h>
+#include <cpu/fpu.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/fpu.h>
+#include <asm/traps.h>
/* The PR (precision) bit in the FP Status Register must be clear when
* an frchg instruction is executed, otherwise the instruction is undefined.
@@ -36,18 +36,16 @@ extern unsigned long int float32_add(unsigned long int a, unsigned long int b);
extern unsigned long long float64_sub(unsigned long long a,
unsigned long long b);
extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
-
+extern unsigned long int float64_to_float32(unsigned long long a);
static unsigned int fpu_exception_flags;
/*
* Save FPU registers onto task structure.
- * Assume called with FPU enabled (SR.FD=0).
*/
-void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+void save_fpu(struct task_struct *tsk)
{
unsigned long dummy;
- clear_tsk_thread_flag(tsk, TIF_USEDFPU);
enable_fpu();
asm volatile ("sts.l fpul, @-%0\n\t"
"sts.l fpscr, @-%0\n\t"
@@ -87,15 +85,14 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
"fmov.s fr1, @-%0\n\t"
"fmov.s fr0, @-%0\n\t"
"lds %3, fpscr\n\t":"=r" (dummy)
- :"0"((char *)(&tsk->thread.fpu.hard.status)),
+ :"0"((char *)(&tsk->thread.xstate->hardfpu.status)),
"r"(FPSCR_RCHG), "r"(FPSCR_INIT)
:"memory");
disable_fpu();
- release_fpu(regs);
}
-static void restore_fpu(struct task_struct *tsk)
+void restore_fpu(struct task_struct *tsk)
{
unsigned long dummy;
@@ -138,62 +135,11 @@ static void restore_fpu(struct task_struct *tsk)
"lds.l @%0+, fpscr\n\t"
"lds.l @%0+, fpul\n\t"
:"=r" (dummy)
- :"0"(&tsk->thread.fpu), "r"(FPSCR_RCHG)
+ :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
:"memory");
disable_fpu();
}
-/*
- * Load the FPU with signalling NANS. This bit pattern we're using
- * has the property that no matter wether considered as single or as
- * double precision represents signaling NANS.
- */
-
-static void fpu_init(void)
-{
- enable_fpu();
- asm volatile ( "lds %0, fpul\n\t"
- "lds %1, fpscr\n\t"
- "fsts fpul, fr0\n\t"
- "fsts fpul, fr1\n\t"
- "fsts fpul, fr2\n\t"
- "fsts fpul, fr3\n\t"
- "fsts fpul, fr4\n\t"
- "fsts fpul, fr5\n\t"
- "fsts fpul, fr6\n\t"
- "fsts fpul, fr7\n\t"
- "fsts fpul, fr8\n\t"
- "fsts fpul, fr9\n\t"
- "fsts fpul, fr10\n\t"
- "fsts fpul, fr11\n\t"
- "fsts fpul, fr12\n\t"
- "fsts fpul, fr13\n\t"
- "fsts fpul, fr14\n\t"
- "fsts fpul, fr15\n\t"
- "frchg\n\t"
- "fsts fpul, fr0\n\t"
- "fsts fpul, fr1\n\t"
- "fsts fpul, fr2\n\t"
- "fsts fpul, fr3\n\t"
- "fsts fpul, fr4\n\t"
- "fsts fpul, fr5\n\t"
- "fsts fpul, fr6\n\t"
- "fsts fpul, fr7\n\t"
- "fsts fpul, fr8\n\t"
- "fsts fpul, fr9\n\t"
- "fsts fpul, fr10\n\t"
- "fsts fpul, fr11\n\t"
- "fsts fpul, fr12\n\t"
- "fsts fpul, fr13\n\t"
- "fsts fpul, fr14\n\t"
- "fsts fpul, fr15\n\t"
- "frchg\n\t"
- "lds %2, fpscr\n\t"
- : /* no output */
- :"r" (0), "r"(FPSCR_RCHG), "r"(FPSCR_INIT));
- disable_fpu();
-}
-
/**
* denormal_to_double - Given denormalized float number,
* store double float
@@ -285,10 +231,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
/* fcnvsd */
struct task_struct *tsk = current;
- save_fpu(tsk, regs);
- if ((tsk->thread.fpu.hard.fpscr & FPSCR_CAUSE_ERROR))
+ if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))
/* FPU error */
- denormal_to_double(&tsk->thread.fpu.hard,
+ denormal_to_double(&tsk->thread.xstate->hardfpu,
(finsn >> 8) & 0xf);
else
return 0;
@@ -304,9 +249,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
- hx = tsk->thread.fpu.hard.fp_regs[n];
- hy = tsk->thread.fpu.hard.fp_regs[m];
- fpscr = tsk->thread.fpu.hard.fpscr;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & FPSCR_DBL_PRECISION;
if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -316,18 +261,18 @@ static int ieee_fpe_handler(struct pt_regs *regs)
/* FPU error because of denormal (doubles) */
llx = ((long long)hx << 32)
- | tsk->thread.fpu.hard.fp_regs[n + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
lly = ((long long)hy << 32)
- | tsk->thread.fpu.hard.fp_regs[m + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
llx = float64_mul(llx, lly);
- tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
- tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_CAUSE_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal (floats) */
hx = float32_mul(hx, hy);
- tsk->thread.fpu.hard.fp_regs[n] = hx;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
@@ -342,9 +287,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
- hx = tsk->thread.fpu.hard.fp_regs[n];
- hy = tsk->thread.fpu.hard.fp_regs[m];
- fpscr = tsk->thread.fpu.hard.fpscr;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & FPSCR_DBL_PRECISION;
if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -354,15 +299,15 @@ static int ieee_fpe_handler(struct pt_regs *regs)
/* FPU error because of denormal (doubles) */
llx = ((long long)hx << 32)
- | tsk->thread.fpu.hard.fp_regs[n + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
lly = ((long long)hy << 32)
- | tsk->thread.fpu.hard.fp_regs[m + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
if ((finsn & 0xf00f) == 0xf000)
llx = float64_add(llx, lly);
else
llx = float64_sub(llx, lly);
- tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
- tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_CAUSE_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
@@ -371,7 +316,7 @@ static int ieee_fpe_handler(struct pt_regs *regs)
hx = float32_add(hx, hy);
else
hx = float32_sub(hx, hy);
- tsk->thread.fpu.hard.fp_regs[n] = hx;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
@@ -386,9 +331,9 @@ static int ieee_fpe_handler(struct pt_regs *regs)
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
- hx = tsk->thread.fpu.hard.fp_regs[n];
- hy = tsk->thread.fpu.hard.fp_regs[m];
- fpscr = tsk->thread.fpu.hard.fpscr;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[n];
+ hy = tsk->thread.xstate->hardfpu.fp_regs[m];
+ fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & FPSCR_DBL_PRECISION;
if ((fpscr & FPSCR_CAUSE_ERROR)
@@ -398,20 +343,43 @@ static int ieee_fpe_handler(struct pt_regs *regs)
/* FPU error because of denormal (doubles) */
llx = ((long long)hx << 32)
- | tsk->thread.fpu.hard.fp_regs[n + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
lly = ((long long)hy << 32)
- | tsk->thread.fpu.hard.fp_regs[m + 1];
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
llx = float64_div(llx, lly);
- tsk->thread.fpu.hard.fp_regs[n] = llx >> 32;
- tsk->thread.fpu.hard.fp_regs[n + 1] = llx & 0xffffffff;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
+ tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_CAUSE_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal (floats) */
hx = float32_div(hx, hy);
- tsk->thread.fpu.hard.fp_regs[n] = hx;
+ tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
+ } else
+ return 0;
+
+ regs->pc = nextpc;
+ return 1;
+ } else if ((finsn & 0xf0bd) == 0xf0bd) {
+ /* fcnvds - double to single precision convert */
+ struct task_struct *tsk = current;
+ int m;
+ unsigned int hx;
+
+ m = (finsn >> 8) & 0x7;
+ hx = tsk->thread.xstate->hardfpu.fp_regs[m];
+
+ if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)
+ && ((hx & 0x7fffffff) < 0x00100000)) {
+ /* subnormal double to float conversion */
+ long long llx;
+
+ llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32)
+ | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
+
+ tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx);
} else
return 0;
@@ -430,7 +398,7 @@ void float_raise(unsigned int flags)
int float_rounding_mode(void)
{
struct task_struct *tsk = current;
- int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.fpu.hard.fpscr);
+ int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr);
return roundingMode;
}
@@ -439,19 +407,19 @@ BUILD_TRAP_HANDLER(fpu_error)
struct task_struct *tsk = current;
TRAP_HANDLER_DECL;
- save_fpu(tsk, regs);
+ __unlazy_fpu(tsk, regs);
fpu_exception_flags = 0;
if (ieee_fpe_handler(regs)) {
- tsk->thread.fpu.hard.fpscr &=
+ tsk->thread.xstate->hardfpu.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
- tsk->thread.fpu.hard.fpscr |= fpu_exception_flags;
+ tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags;
/* Set the FPSCR flag as well as cause bits - simply
* replicate the cause */
- tsk->thread.fpu.hard.fpscr |= (fpu_exception_flags >> 10);
+ tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10);
grab_fpu(regs);
restore_fpu(tsk);
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
- if ((((tsk->thread.fpu.hard.fpscr & FPSCR_ENABLE_MASK) >> 7) &
+ task_thread_info(tsk)->status |= TS_USEDFPU;
+ if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) &
(fpu_exception_flags >> 2)) == 0) {
return;
}
@@ -459,25 +427,3 @@ BUILD_TRAP_HANDLER(fpu_error)
force_sig(SIGFPE, tsk);
}
-
-BUILD_TRAP_HANDLER(fpu_state_restore)
-{
- struct task_struct *tsk = current;
- TRAP_HANDLER_DECL;
-
- grab_fpu(regs);
- if (!user_mode(regs)) {
- printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
- return;
- }
-
- if (used_math()) {
- /* Using the FPU again. */
- restore_fpu(tsk);
- } else {
- /* First time FPU user. */
- fpu_init();
- set_used_math();
- }
- set_tsk_thread_flag(tsk, TIF_USEDFPU);
-}
diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c
new file mode 100644
index 00000000000..fa4f724b295
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/perf_event.c
@@ -0,0 +1,268 @@
+/*
+ * Performance events support for SH7750-style performance counters
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <asm/processor.h>
+
+#define PM_CR_BASE 0xff000084 /* 16-bit */
+#define PM_CTR_BASE 0xff100004 /* 32-bit */
+
+#define PMCR(n) (PM_CR_BASE + ((n) * 0x04))
+#define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08))
+#define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08))
+
+#define PMCR_PMM_MASK 0x0000003f
+
+#define PMCR_CLKF 0x00000100
+#define PMCR_PMCLR 0x00002000
+#define PMCR_PMST 0x00004000
+#define PMCR_PMEN 0x00008000
+
+static struct sh_pmu sh7750_pmu;
+
+/*
+ * There are a number of events supported by each counter (33 in total).
+ * Since we have 2 counters, each counter will take the event code as it
+ * corresponds to the PMCR PMM setting. Each counter can be configured
+ * independently.
+ *
+ * Event Code Description
+ * ---------- -----------
+ *
+ * 0x01 Operand read access
+ * 0x02 Operand write access
+ * 0x03 UTLB miss
+ * 0x04 Operand cache read miss
+ * 0x05 Operand cache write miss
+ * 0x06 Instruction fetch (w/ cache)
+ * 0x07 Instruction TLB miss
+ * 0x08 Instruction cache miss
+ * 0x09 All operand accesses
+ * 0x0a All instruction accesses
+ * 0x0b OC RAM operand access
+ * 0x0d On-chip I/O space access
+ * 0x0e Operand access (r/w)
+ * 0x0f Operand cache miss (r/w)
+ * 0x10 Branch instruction
+ * 0x11 Branch taken
+ * 0x12 BSR/BSRF/JSR
+ * 0x13 Instruction execution
+ * 0x14 Instruction execution in parallel
+ * 0x15 FPU Instruction execution
+ * 0x16 Interrupt
+ * 0x17 NMI
+ * 0x18 trapa instruction execution
+ * 0x19 UBCA match
+ * 0x1a UBCB match
+ * 0x21 Instruction cache fill
+ * 0x22 Operand cache fill
+ * 0x23 Elapsed time
+ * 0x24 Pipeline freeze by I-cache miss
+ * 0x25 Pipeline freeze by D-cache miss
+ * 0x27 Pipeline freeze by branch instruction
+ * 0x28 Pipeline freeze by CPU register
+ * 0x29 Pipeline freeze by FPU
+ */
+
+static const int sh7750_general_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0023,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x000a,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010,
+ [PERF_COUNT_HW_BRANCH_MISSES] = -1,
+ [PERF_COUNT_HW_BUS_CYCLES] = -1,
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+static const int sh7750_cache_events
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0001,
+ [ C(RESULT_MISS) ] = 0x0004,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0002,
+ [ C(RESULT_MISS) ] = 0x0005,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0006,
+ [ C(RESULT_MISS) ] = 0x0008,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x0003,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x0007,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static int sh7750_event_map(int event)
+{
+ return sh7750_general_events[event];
+}
+
+static u64 sh7750_pmu_read(int idx)
+{
+ return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) |
+ __raw_readl(PMCTRL(idx));
+}
+
+static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readw(PMCR(idx));
+ tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN);
+ __raw_writew(tmp, PMCR(idx));
+}
+
+static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx)
+{
+ __raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx));
+ __raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx));
+}
+
+static void sh7750_pmu_disable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh7750_pmu.num_events; i++)
+ __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i));
+}
+
+static void sh7750_pmu_enable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh7750_pmu.num_events; i++)
+ __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i));
+}
+
+static struct sh_pmu sh7750_pmu = {
+ .name = "sh7750",
+ .num_events = 2,
+ .event_map = sh7750_event_map,
+ .max_events = ARRAY_SIZE(sh7750_general_events),
+ .raw_event_mask = PMCR_PMM_MASK,
+ .cache_events = &sh7750_cache_events,
+ .read = sh7750_pmu_read,
+ .disable = sh7750_pmu_disable,
+ .enable = sh7750_pmu_enable,
+ .disable_all = sh7750_pmu_disable_all,
+ .enable_all = sh7750_pmu_enable_all,
+};
+
+static int __init sh7750_pmu_init(void)
+{
+ /*
+ * Make sure this CPU actually has perf counters.
+ */
+ if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
+ pr_notice("HW perf events unsupported, software events only.\n");
+ return -ENODEV;
+ }
+
+ return register_sh_pmu(&sh7750_pmu);
+}
+early_initcall(sh7750_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 9e89984c4f1..a521bcf5069 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -15,7 +15,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-int __init detect_cpu_and_cache_system(void)
+void cpu_probe(void)
{
unsigned long pvr, prr, cvr;
unsigned long size;
@@ -28,9 +28,9 @@ int __init detect_cpu_and_cache_system(void)
[9] = (1 << 16)
};
- pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffffff;
- prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff;
- cvr = (ctrl_inl(CCN_CVR));
+ pvr = (__raw_readl(CCN_PVR) >> 8) & 0xffffff;
+ prr = (__raw_readl(CCN_PRR) >> 4) & 0xff;
+ cvr = (__raw_readl(CCN_CVR));
/*
* Setup some sane SH-4 defaults for the icache
@@ -50,18 +50,33 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.dcache.ways = 1;
boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
+ /* We don't know the chip cut */
+ boot_cpu_data.cut_major = boot_cpu_data.cut_minor = -1;
+
/*
* Setup some generic flags we can probe on SH-4A parts
*/
if (((pvr >> 16) & 0xff) == 0x10) {
- if ((cvr & 0x10000000) == 0)
+ boot_cpu_data.family = CPU_FAMILY_SH4A;
+
+ if ((cvr & 0x10000000) == 0) {
boot_cpu_data.flags |= CPU_HAS_DSP;
+ boot_cpu_data.family = CPU_FAMILY_SH4AL_DSP;
+ }
- boot_cpu_data.flags |= CPU_HAS_LLSC;
+ boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER;
+ boot_cpu_data.cut_major = pvr & 0x7f;
+
+ boot_cpu_data.icache.ways = 4;
+ boot_cpu_data.dcache.ways = 4;
+ } else {
+ /* And some SH-4 defaults.. */
+ boot_cpu_data.flags |= CPU_HAS_PTEA | CPU_HAS_FPU;
+ boot_cpu_data.family = CPU_FAMILY_SH4;
}
- /* FPU detection works for everyone */
- if ((cvr & 0x20000000) == 1)
+ /* FPU detection works for almost everyone */
+ if ((cvr & 0x20000000))
boot_cpu_data.flags |= CPU_HAS_FPU;
/* Mask off the upper chip ID */
@@ -74,25 +89,20 @@ int __init detect_cpu_and_cache_system(void)
switch (pvr) {
case 0x205:
boot_cpu_data.type = CPU_SH7750;
- boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
- CPU_HAS_PERF_COUNTER;
+ boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG |
+ CPU_HAS_PERF_COUNTER;
break;
case 0x206:
boot_cpu_data.type = CPU_SH7750S;
- boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
- CPU_HAS_PERF_COUNTER;
+ boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG |
+ CPU_HAS_PERF_COUNTER;
break;
case 0x1100:
boot_cpu_data.type = CPU_SH7751;
- boot_cpu_data.flags |= CPU_HAS_FPU;
break;
case 0x2001:
case 0x2004:
boot_cpu_data.type = CPU_SH7770;
- boot_cpu_data.icache.ways = 4;
- boot_cpu_data.dcache.ways = 4;
-
- boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_LLSC;
break;
case 0x2006:
case 0x200A:
@@ -103,52 +113,64 @@ int __init detect_cpu_and_cache_system(void)
else
boot_cpu_data.type = CPU_SH7780;
- boot_cpu_data.icache.ways = 4;
- boot_cpu_data.dcache.ways = 4;
-
- boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER |
- CPU_HAS_LLSC;
break;
case 0x3000:
case 0x3003:
case 0x3009:
boot_cpu_data.type = CPU_SH7343;
- boot_cpu_data.icache.ways = 4;
- boot_cpu_data.dcache.ways = 4;
- boot_cpu_data.flags |= CPU_HAS_LLSC;
break;
case 0x3004:
case 0x3007:
boot_cpu_data.type = CPU_SH7785;
- boot_cpu_data.icache.ways = 4;
- boot_cpu_data.dcache.ways = 4;
- boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER |
- CPU_HAS_LLSC;
+ break;
+ case 0x4004:
+ case 0x4005:
+ boot_cpu_data.type = CPU_SH7786;
+ boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE;
break;
case 0x3008:
- if (prr == 0xa0 || prr == 0xa1) {
+ switch (prr) {
+ case 0x50:
+ case 0x51:
+ boot_cpu_data.type = CPU_SH7723;
+ boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
+ break;
+ case 0x70:
+ boot_cpu_data.type = CPU_SH7366;
+ break;
+ case 0xa0:
+ case 0xa1:
boot_cpu_data.type = CPU_SH7722;
- boot_cpu_data.icache.ways = 4;
- boot_cpu_data.dcache.ways = 4;
- boot_cpu_data.flags |= CPU_HAS_LLSC;
+ break;
}
- else if (prr == 0x70) {
- boot_cpu_data.type = CPU_SH7366;
- boot_cpu_data.icache.ways = 4;
- boot_cpu_data.dcache.ways = 4;
- boot_cpu_data.flags |= CPU_HAS_LLSC;
+ break;
+ case 0x300b:
+ switch (prr) {
+ case 0x20:
+ boot_cpu_data.type = CPU_SH7724;
+ boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
+ break;
+ case 0x10:
+ case 0x11:
+ boot_cpu_data.type = CPU_SH7757;
+ break;
+ case 0xd0:
+ case 0x40: /* yon-ten-go */
+ boot_cpu_data.type = CPU_SH7372;
+ break;
+ case 0xE0: /* 0x4E0 */
+ boot_cpu_data.type = CPU_SH7734; /* SH7733/SH7734 */
+ break;
+
}
break;
case 0x4000: /* 1st cut */
case 0x4001: /* 2nd cut */
boot_cpu_data.type = CPU_SHX3;
- boot_cpu_data.icache.ways = 4;
- boot_cpu_data.dcache.ways = 4;
- boot_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER |
- CPU_HAS_LLSC;
break;
case 0x700:
boot_cpu_data.type = CPU_SH4_501;
+ boot_cpu_data.flags &= ~CPU_HAS_FPU;
boot_cpu_data.icache.ways = 2;
boot_cpu_data.dcache.ways = 2;
break;
@@ -156,7 +178,6 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.type = CPU_SH4_202;
boot_cpu_data.icache.ways = 2;
boot_cpu_data.dcache.ways = 2;
- boot_cpu_data.flags |= CPU_HAS_FPU;
break;
case 0x500 ... 0x501:
switch (prr) {
@@ -174,23 +195,9 @@ int __init detect_cpu_and_cache_system(void)
boot_cpu_data.icache.ways = 2;
boot_cpu_data.dcache.ways = 2;
- boot_cpu_data.flags |= CPU_HAS_FPU;
-
- break;
- default:
- boot_cpu_data.type = CPU_SH_NONE;
break;
}
-#ifdef CONFIG_SH_DIRECT_MAPPED
- boot_cpu_data.icache.ways = 1;
- boot_cpu_data.dcache.ways = 1;
-#endif
-
-#ifdef CONFIG_CPU_HAS_PTEA
- boot_cpu_data.flags |= CPU_HAS_PTEA;
-#endif
-
/*
* On anything that's not a direct-mapped cache, look to the CVR
* for I/D-cache specifics.
@@ -210,38 +217,47 @@ int __init detect_cpu_and_cache_system(void)
}
/*
- * Setup the L2 cache desc
- *
* SH-4A's have an optional PIPT L2.
*/
if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
/*
- * Size calculation is much more sensible
- * than it is for the L1.
- *
- * Sizes are 128KB, 258KB, 512KB, and 1MB.
+ * Verify that it really has something hooked up, this
+ * is the safety net for CPUs that have optional L2
+ * support yet do not implement it.
*/
- size = (cvr & 0xf) << 17;
+ if ((cvr & 0xf) == 0)
+ boot_cpu_data.flags &= ~CPU_HAS_L2_CACHE;
+ else {
+ /*
+ * Silicon and specifications have clearly never
+ * met..
+ */
+ cvr ^= 0xf;
- BUG_ON(!size);
+ /*
+ * Size calculation is much more sensible
+ * than it is for the L1.
+ *
+ * Sizes are 128KB, 256KB, 512KB, and 1MB.
+ */
+ size = (cvr & 0xf) << 17;
- boot_cpu_data.scache.way_incr = (1 << 16);
- boot_cpu_data.scache.entry_shift = 5;
- boot_cpu_data.scache.ways = 4;
- boot_cpu_data.scache.linesz = L1_CACHE_BYTES;
+ boot_cpu_data.scache.way_incr = (1 << 16);
+ boot_cpu_data.scache.entry_shift = 5;
+ boot_cpu_data.scache.ways = 4;
+ boot_cpu_data.scache.linesz = L1_CACHE_BYTES;
- boot_cpu_data.scache.entry_mask =
- (boot_cpu_data.scache.way_incr -
- boot_cpu_data.scache.linesz);
+ boot_cpu_data.scache.entry_mask =
+ (boot_cpu_data.scache.way_incr -
+ boot_cpu_data.scache.linesz);
- boot_cpu_data.scache.sets = size /
- (boot_cpu_data.scache.linesz *
- boot_cpu_data.scache.ways);
+ boot_cpu_data.scache.sets = size /
+ (boot_cpu_data.scache.linesz *
+ boot_cpu_data.scache.ways);
- boot_cpu_data.scache.way_size =
- (boot_cpu_data.scache.sets *
- boot_cpu_data.scache.linesz);
+ boot_cpu_data.scache.way_size =
+ (boot_cpu_data.scache.sets *
+ boot_cpu_data.scache.linesz);
+ }
}
-
- return 0;
}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
index 7371abf64f8..e7a7b3cdf68 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -2,6 +2,7 @@
* SH4-202 Setup
*
* Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2009 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -11,28 +12,58 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <linux/io.h>
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe80000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffe80000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x700)),
+ DEFINE_RES_IRQ(evt2irq(0x720)),
+ DEFINE_RES_IRQ(evt2irq(0x760)),
+ DEFINE_RES_IRQ(evt2irq(0x740)),
};
-static struct platform_device sci_device = {
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
},
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh4202_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &tmu0_device,
};
static int __init sh4202_devices_setup(void)
@@ -40,9 +71,72 @@ static int __init sh4202_devices_setup(void)
return platform_add_devices(sh4202_devices,
ARRAY_SIZE(sh4202_devices));
}
-__initcall(sh4202_devices_setup);
+arch_initcall(sh4202_devices_setup);
+
+static struct platform_device *sh4202_early_devices[] __initdata = {
+ &scif0_device,
+ &tmu0_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh4202_early_devices,
+ ARRAY_SIZE(sh4202_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */
+ HUDI, TMU0, TMU1, TMU2, RTC, SCIF, WDT,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(HUDI, 0x600),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720),
+ INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760),
+ INTC_VECT(WDT, 0x560),
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
+ { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, 0, 0, 0 } },
+ { 0xffd0000c, 0, 16, 4, /* IPRC */ { 0, 0, SCIF, HUDI } },
+ { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh4-202", vectors, NULL,
+ NULL, prio_registers, NULL);
+
+static struct intc_vect vectors_irlm[] __initdata = {
+ INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
+ INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irlm, "sh4-202_irlm", vectors_irlm, NULL,
+ NULL, prio_registers, NULL);
void __init plat_irq_setup(void)
{
- /* do nothing - all IRL interrupts are handled by the board code */
+ register_intc_controller(&intc_desc);
+}
+
+#define INTC_ICR 0xffd00000UL
+#define INTC_ICR_IRLM (1<<7)
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
+ __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+ register_intc_controller(&intc_desc_irlm);
+ break;
+ default:
+ BUG();
+ }
}
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index ec884039b91..5f08c59b9f3 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -1,5 +1,5 @@
/*
- * SH7750/SH7751 Setup
+ * SH7091/SH7750/SH7750S/SH7750R/SH7751/SH7751R Setup
*
* Copyright (C) 2006 Paul Mundt
* Copyright (C) 2006 Jamie Lenehan
@@ -12,7 +12,10 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/io.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
#include <linux/serial_sci.h>
+#include <generated/machtypes.h>
static struct resource rtc_resources[] = {
[0] = {
@@ -21,18 +24,8 @@ static struct resource rtc_resources[] = {
.flags = IORESOURCE_IO,
},
[1] = {
- /* Period IRQ */
- .start = 21,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* Carry IRQ */
- .start = 22,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- /* Alarm IRQ */
- .start = 20,
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
@@ -44,86 +37,176 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct plat_sci_port sci_platform_data[] = {
- {
-#ifndef CONFIG_SH_RTS7751R2D
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 23, 24, 25, 0 },
- }, {
-#endif
- .mapbase = 0xffe80000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port sci_platform_data = {
+ .port_reg = 0xffe0001C,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE,
+ .type = PORT_SCI,
+ .regshift = 2,
+};
+
+static struct resource sci_resources[] = {
+ DEFINE_RES_MEM(0xffe00000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x4e0)),
};
static struct platform_device sci_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .resource = sci_resources,
+ .num_resources = ARRAY_SIZE(sci_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &sci_platform_data,
+ },
+};
+
+static struct plat_sci_port scif_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif_resources[] = {
+ DEFINE_RES_MEM(0xffe80000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x700)),
+};
+
+static struct platform_device scif_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif_resources,
+ .num_resources = ARRAY_SIZE(scif_resources),
+ .dev = {
+ .platform_data = &scif_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+/* SH7750R, SH7751 and SH7751R all have two extra timer channels */
+#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R)
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channels_mask = 3,
+};
+
+static struct resource tmu1_resources[] = {
+ DEFINE_RES_MEM(0xfe100000, 0x20),
+ DEFINE_RES_IRQ(evt2irq(0xb00)),
+ DEFINE_RES_IRQ(evt2irq(0xb80)),
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh-tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
},
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
};
+#endif
+
static struct platform_device *sh7750_devices[] __initdata = {
&rtc_device,
- &sci_device,
+ &tmu0_device,
+#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R)
+ &tmu1_device,
+#endif
};
static int __init sh7750_devices_setup(void)
{
+ if (mach_is_rts7751r2d()) {
+ platform_device_register(&scif_device);
+ } else {
+ platform_device_register(&sci_device);
+ platform_device_register(&scif_device);
+ }
+
return platform_add_devices(sh7750_devices,
ARRAY_SIZE(sh7750_devices));
}
-__initcall(sh7750_devices_setup);
+arch_initcall(sh7750_devices_setup);
+
+static struct platform_device *sh7750_early_devices[] __initdata = {
+ &tmu0_device,
+#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7751R)
+ &tmu1_device,
+#endif
+};
+
+void __init plat_early_device_setup(void)
+{
+ struct platform_device *dev[1];
+
+ if (mach_is_rts7751r2d()) {
+ scif_platform_data.scscr |= SCSCR_CKE1;
+ dev[0] = &scif_device;
+ early_platform_add_devices(dev, 1);
+ } else {
+ dev[0] = &sci_device;
+ early_platform_add_devices(dev, 1);
+ dev[0] = &scif_device;
+ early_platform_add_devices(dev, 1);
+ }
+
+ early_platform_add_devices(sh7750_early_devices,
+ ARRAY_SIZE(sh7750_early_devices));
+}
enum {
UNUSED = 0,
/* interrupt sources */
IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */
- HUDI, GPIOI,
- DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, DMAC_DMTE3,
- DMAC_DMTE4, DMAC_DMTE5, DMAC_DMTE6, DMAC_DMTE7,
- DMAC_DMAE,
+ HUDI, GPIOI, DMAC,
PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON,
PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3,
- TMU3, TMU4, TMU0, TMU1, TMU2_TUNI, TMU2_TICPI,
- RTC_ATI, RTC_PRI, RTC_CUI,
- SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI,
- SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI,
- WDT,
- REF_RCMI, REF_ROVI,
+ TMU3, TMU4, TMU0, TMU1, TMU2, RTC, SCI1, SCIF, WDT, REF,
/* interrupt groups */
- DMAC, PCIC1, TMU2, RTC, SCI1, SCIF, REF,
+ PCIC1,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
- INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460),
- INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
- INTC_VECT(RTC_CUI, 0x4c0),
- INTC_VECT(SCI1_ERI, 0x4e0), INTC_VECT(SCI1_RXI, 0x500),
- INTC_VECT(SCI1_TXI, 0x520), INTC_VECT(SCI1_TEI, 0x540),
- INTC_VECT(SCIF_ERI, 0x700), INTC_VECT(SCIF_RXI, 0x720),
- INTC_VECT(SCIF_BRI, 0x740), INTC_VECT(SCIF_TXI, 0x760),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
+ INTC_VECT(SCI1, 0x4e0), INTC_VECT(SCI1, 0x500),
+ INTC_VECT(SCI1, 0x520), INTC_VECT(SCI1, 0x540),
+ INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720),
+ INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760),
INTC_VECT(WDT, 0x560),
- INTC_VECT(REF_RCMI, 0x580), INTC_VECT(REF_ROVI, 0x5a0),
-};
-
-static struct intc_group groups[] __initdata = {
- INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI),
- INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
- INTC_GROUP(SCI1, SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI),
- INTC_GROUP(SCIF, SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI),
- INTC_GROUP(REF, REF_RCMI, REF_ROVI),
+ INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0),
};
static struct intc_prio_reg prio_registers[] __initdata = {
@@ -136,7 +219,7 @@ static struct intc_prio_reg prio_registers[] __initdata = {
PCIC1, PCIC0_PCISERR } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups,
+static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, NULL,
NULL, prio_registers, NULL);
/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */
@@ -145,39 +228,28 @@ static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, groups,
defined(CONFIG_CPU_SUBTYPE_SH7751) || \
defined(CONFIG_CPU_SUBTYPE_SH7091)
static struct intc_vect vectors_dma4[] __initdata = {
- INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660),
- INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0),
- INTC_VECT(DMAC_DMAE, 0x6c0),
-};
-
-static struct intc_group groups_dma4[] __initdata = {
- INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2,
- DMAC_DMTE3, DMAC_DMAE),
+ INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
+ INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
+ INTC_VECT(DMAC, 0x6c0),
};
static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4",
- vectors_dma4, groups_dma4,
+ vectors_dma4, NULL,
NULL, prio_registers, NULL);
#endif
/* SH7750R and SH7751R both have 8-channel DMA controllers */
#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
static struct intc_vect vectors_dma8[] __initdata = {
- INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660),
- INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0),
- INTC_VECT(DMAC_DMTE4, 0x780), INTC_VECT(DMAC_DMTE5, 0x7a0),
- INTC_VECT(DMAC_DMTE6, 0x7c0), INTC_VECT(DMAC_DMTE7, 0x7e0),
- INTC_VECT(DMAC_DMAE, 0x6c0),
-};
-
-static struct intc_group groups_dma8[] __initdata = {
- INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2,
- DMAC_DMTE3, DMAC_DMTE4, DMAC_DMTE5,
- DMAC_DMTE6, DMAC_DMTE7, DMAC_DMAE),
+ INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
+ INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
+ INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0),
+ INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0),
+ INTC_VECT(DMAC, 0x6c0),
};
static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8",
- vectors_dma8, groups_dma8,
+ vectors_dma8, NULL,
NULL, prio_registers, NULL);
#endif
@@ -285,7 +357,7 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
- ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+ __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
register_intc_controller(&intc_desc_irlm);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
index 254c5c55ab9..973b736b3b9 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -10,17 +10,17 @@
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
#include <linux/serial_sci.h>
+#include <linux/io.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRL0, IRL1, IRL2, IRL3,
- HUDI, GPIOI,
- DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, DMAC_DMTE3,
- DMAC_DMTE4, DMAC_DMTE5, DMAC_DMTE6, DMAC_DMTE7,
- DMAC_DMAE,
+ HUDI, GPIOI, DMAC,
IRQ4, IRQ5, IRQ6, IRQ7,
HCAN20, HCAN21,
SSI0, SSI1,
@@ -35,21 +35,20 @@ enum {
HSPI,
MMCIF0, MMCIF1, MMCIF2, MMCIF3,
MFI, ADC, CMT,
- TMU0, TMU1, TMU2_TUNI, TMU2_TICPI,
- WDT,
- REF_RCMI, REF_ROVI,
+ TMU0, TMU1, TMU2,
+ WDT, REF,
/* interrupt groups */
- DMAC, DMABRG, SCIF0, SCIF1, SCIF2, SIM, MMCIF, TMU2, REF,
+ DMABRG, SCIF0, SCIF1, SCIF2, SIM, MMCIF,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620),
- INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660),
- INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0),
- INTC_VECT(DMAC_DMTE4, 0x780), INTC_VECT(DMAC_DMTE5, 0x7a0),
- INTC_VECT(DMAC_DMTE6, 0x7c0), INTC_VECT(DMAC_DMTE7, 0x7e0),
- INTC_VECT(DMAC_DMAE, 0x6c0),
+ INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
+ INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
+ INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0),
+ INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0),
+ INTC_VECT(DMAC, 0x6c0),
INTC_VECT(IRQ4, 0x800), INTC_VECT(IRQ5, 0x820),
INTC_VECT(IRQ6, 0x840), INTC_VECT(IRQ6, 0x860),
INTC_VECT(HCAN20, 0x900), INTC_VECT(HCAN21, 0x920),
@@ -73,23 +72,18 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(MFI, 0xe80), /* 0xf80 according to data sheet */
INTC_VECT(ADC, 0xf80), INTC_VECT(CMT, 0xfa0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
- INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
INTC_VECT(WDT, 0x560),
- INTC_VECT(REF_RCMI, 0x580), INTC_VECT(REF_ROVI, 0x5a0),
+ INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0),
};
static struct intc_group groups[] __initdata = {
- INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2,
- DMAC_DMTE3, DMAC_DMTE4, DMAC_DMTE5,
- DMAC_DMTE6, DMAC_DMTE7, DMAC_DMAE),
INTC_GROUP(DMABRG, DMABRG0, DMABRG1, DMABRG2),
INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI),
INTC_GROUP(MMCIF, MMCIF0, MMCIF1, MMCIF2, MMCIF3),
- INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI),
- INTC_GROUP(REF, REF_RCMI, REF_ROVI),
};
static struct intc_mask_reg mask_registers[] __initdata = {
@@ -133,42 +127,133 @@ static struct intc_vect vectors_irq[] __initdata = {
static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
mask_registers, prio_registers, NULL);
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xfe600000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 53, 55, 54 },
- }, {
- .mapbase = 0xfe610000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 72, 73, 75, 74 },
- }, {
- .mapbase = 0xfe620000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 77, 79, 78 },
- }, {
- .mapbase = 0xfe480000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCI,
- .irqs = { 80, 81, 82, 0 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xfe600000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x880)),
+ DEFINE_RES_IRQ(evt2irq(0x8a0)),
+ DEFINE_RES_IRQ(evt2irq(0x8e0)),
+ DEFINE_RES_IRQ(evt2irq(0x8c0)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xfe610000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xb00)),
+ DEFINE_RES_IRQ(evt2irq(0xb20)),
+ DEFINE_RES_IRQ(evt2irq(0xb60)),
+ DEFINE_RES_IRQ(evt2irq(0xb40)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xfe620000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xb80)),
+ DEFINE_RES_IRQ(evt2irq(0xba0)),
+ DEFINE_RES_IRQ(evt2irq(0xbe0)),
+ DEFINE_RES_IRQ(evt2irq(0xbc0)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCI,
+ .regshift = 2,
};
-static struct platform_device sci_device = {
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xfe480000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc00)),
+ DEFINE_RES_IRQ(evt2irq(0xc20)),
+ DEFINE_RES_IRQ(evt2irq(0xc40)),
+};
+
+static struct platform_device scif3_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
},
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
};
+
static struct platform_device *sh7760_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &tmu0_device,
};
static int __init sh7760_devices_setup(void)
@@ -176,12 +261,30 @@ static int __init sh7760_devices_setup(void)
return platform_add_devices(sh7760_devices,
ARRAY_SIZE(sh7760_devices));
}
-__initcall(sh7760_devices_setup);
+arch_initcall(sh7760_devices_setup);
+
+static struct platform_device *sh7760_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &tmu0_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7760_early_devices,
+ ARRAY_SIZE(sh7760_early_devices));
+}
+
+#define INTC_ICR 0xffd00000UL
+#define INTC_ICR_IRLM (1 << 7)
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ:
+ __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
register_intc_controller(&intc_desc_irq);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c
index 7b2d337ee41..42edf2e54e8 100644
--- a/arch/sh/kernel/cpu/sh4/softfloat.c
+++ b/arch/sh/kernel/cpu/sh4/softfloat.c
@@ -36,7 +36,8 @@
* and Kamel Khelifi <kamel.khelifi@st.com>
*/
#include <linux/kernel.h>
-#include <asm/cpu/fpu.h>
+#include <cpu/fpu.h>
+#include <asm/div64.h>
#define LIT64( a ) a##LL
@@ -67,16 +68,16 @@ typedef unsigned long long float64;
extern void float_raise(unsigned int flags); /* in fpu.c */
extern int float_rounding_mode(void); /* in fpu.c */
-inline bits64 extractFloat64Frac(float64 a);
-inline flag extractFloat64Sign(float64 a);
-inline int16 extractFloat64Exp(float64 a);
-inline int16 extractFloat32Exp(float32 a);
-inline flag extractFloat32Sign(float32 a);
-inline bits32 extractFloat32Frac(float32 a);
-inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig);
-inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr);
-inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig);
-inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr);
+bits64 extractFloat64Frac(float64 a);
+flag extractFloat64Sign(float64 a);
+int16 extractFloat64Exp(float64 a);
+int16 extractFloat32Exp(float32 a);
+flag extractFloat32Sign(float32 a);
+bits32 extractFloat32Frac(float32 a);
+float64 packFloat64(flag zSign, int16 zExp, bits64 zSig);
+void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr);
+float32 packFloat32(flag zSign, int16 zExp, bits32 zSig);
+void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr);
float64 float64_sub(float64 a, float64 b);
float32 float32_sub(float32 a, float32 b);
float32 float32_add(float32 a, float32 b);
@@ -85,11 +86,12 @@ float64 float64_div(float64 a, float64 b);
float32 float32_div(float32 a, float32 b);
float32 float32_mul(float32 a, float32 b);
float64 float64_mul(float64 a, float64 b);
-inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+float32 float64_to_float32(float64 a);
+void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
bits64 * z1Ptr);
-inline void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
bits64 * z1Ptr);
-inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr);
+void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr);
static int8 countLeadingZeros32(bits32 a);
static int8 countLeadingZeros64(bits64 a);
@@ -109,42 +111,42 @@ static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b);
static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
bits32 * zSigPtr);
-inline bits64 extractFloat64Frac(float64 a)
+bits64 extractFloat64Frac(float64 a)
{
return a & LIT64(0x000FFFFFFFFFFFFF);
}
-inline flag extractFloat64Sign(float64 a)
+flag extractFloat64Sign(float64 a)
{
return a >> 63;
}
-inline int16 extractFloat64Exp(float64 a)
+int16 extractFloat64Exp(float64 a)
{
return (a >> 52) & 0x7FF;
}
-inline int16 extractFloat32Exp(float32 a)
+int16 extractFloat32Exp(float32 a)
{
return (a >> 23) & 0xFF;
}
-inline flag extractFloat32Sign(float32 a)
+flag extractFloat32Sign(float32 a)
{
return a >> 31;
}
-inline bits32 extractFloat32Frac(float32 a)
+bits32 extractFloat32Frac(float32 a)
{
return a & 0x007FFFFF;
}
-inline float64 packFloat64(flag zSign, int16 zExp, bits64 zSig)
+float64 packFloat64(flag zSign, int16 zExp, bits64 zSig)
{
return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig;
}
-inline void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr)
+void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr)
{
bits64 z;
@@ -337,12 +339,12 @@ static float64 addFloat64Sigs(float64 a, float64 b, flag zSign)
}
-inline float32 packFloat32(flag zSign, int16 zExp, bits32 zSig)
+float32 packFloat32(flag zSign, int16 zExp, bits32 zSig)
{
return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig;
}
-inline void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr)
+void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr)
{
bits32 z;
if (count == 0) {
@@ -633,7 +635,7 @@ normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr)
*zExpPtr = 1 - shiftCount;
}
-inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
+void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
bits64 * z1Ptr)
{
bits64 z1;
@@ -643,7 +645,7 @@ inline void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
*z0Ptr = a0 + b0 + (z1 < a1);
}
-inline void
+void
sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
bits64 * z1Ptr)
{
@@ -655,11 +657,14 @@ static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b)
{
bits64 b0, b1;
bits64 rem0, rem1, term0, term1;
- bits64 z;
+ bits64 z, tmp;
if (b <= a0)
return LIT64(0xFFFFFFFFFFFFFFFF);
b0 = b >> 32;
- z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : (a0 / b0) << 32;
+ tmp = a0;
+ do_div(tmp, b0);
+
+ z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : tmp << 32;
mul64To128(b, z, &term0, &term1);
sub128(a0, a1, term0, term1, &rem0, &rem1);
while (((sbits64) rem0) < 0) {
@@ -668,11 +673,13 @@ static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b)
add128(rem0, rem1, b0, b1, &rem0, &rem1);
}
rem0 = (rem0 << 32) | (rem1 >> 32);
- z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : rem0 / b0;
+ tmp = rem0;
+ do_div(tmp, b0);
+ z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : tmp;
return z;
}
-inline void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr)
+void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr)
{
bits32 aHigh, aLow, bHigh, bLow;
bits64 z0, zMiddleA, zMiddleB, z1;
@@ -768,7 +775,8 @@ float32 float32_div(float32 a, float32 b)
{
flag aSign, bSign, zSign;
int16 aExp, bExp, zExp;
- bits32 aSig, bSig, zSig;
+ bits32 aSig, bSig;
+ uint64_t zSig;
aSig = extractFloat32Frac(a);
aExp = extractFloat32Exp(a);
@@ -803,11 +811,13 @@ float32 float32_div(float32 a, float32 b)
aSig >>= 1;
++zExp;
}
- zSig = (((bits64) aSig) << 32) / bSig;
+ zSig = (((bits64) aSig) << 32);
+ do_div(zSig, bSig);
+
if ((zSig & 0x3F) == 0) {
zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32);
}
- return roundAndPackFloat32(zSign, zExp, zSig);
+ return roundAndPackFloat32(zSign, zExp, (bits32)zSig);
}
@@ -890,3 +900,31 @@ float64 float64_mul(float64 a, float64 b)
}
return roundAndPackFloat64(zSign, zExp, zSig0);
}
+
+/*
+ * -------------------------------------------------------------------------------
+ * Returns the result of converting the double-precision floating-point value
+ * `a' to the single-precision floating-point format. The conversion is
+ * performed according to the IEC/IEEE Standard for Binary Floating-point
+ * Arithmetic.
+ * -------------------------------------------------------------------------------
+ * */
+float32 float64_to_float32(float64 a)
+{
+ flag aSign;
+ int16 aExp;
+ bits64 aSig;
+ bits32 zSig;
+
+ aSig = extractFloat64Frac( a );
+ aExp = extractFloat64Exp( a );
+ aSign = extractFloat64Sign( a );
+
+ shift64RightJamming( aSig, 22, &aSig );
+ zSig = aSig;
+ if ( aExp || zSig ) {
+ zSig |= 0x40000000;
+ aExp -= 0x381;
+ }
+ return roundAndPackFloat32(aSign, aExp, zSig);
+}
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 9561b02ade0..0a47bd3e7be 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -13,16 +13,17 @@
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/bitmap.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/io.h>
+#include <linux/prefetch.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
-#include <asm/cpu/sq.h>
+#include <cpu/sq.h>
struct sq_mapping;
@@ -43,9 +44,9 @@ static unsigned long *sq_bitmap;
#define store_queue_barrier() \
do { \
- (void)ctrl_inl(P4SEG_STORE_QUE); \
- ctrl_outl(0, P4SEG_STORE_QUE + 0); \
- ctrl_outl(0, P4SEG_STORE_QUE + 8); \
+ (void)__raw_readl(P4SEG_STORE_QUE); \
+ __raw_writel(0, P4SEG_STORE_QUE + 0); \
+ __raw_writel(0, P4SEG_STORE_QUE + 8); \
} while (0);
/**
@@ -100,7 +101,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map)
spin_unlock_irq(&sq_mapping_lock);
}
-static int __sq_remap(struct sq_mapping *map, unsigned long flags)
+static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
{
#if defined(CONFIG_MMU)
struct vm_struct *vma;
@@ -113,7 +114,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
if (ioremap_page_range((unsigned long)vma->addr,
(unsigned long)vma->addr + map->size,
- vma->phys_addr, __pgprot(flags))) {
+ vma->phys_addr, prot)) {
vunmap(vma->addr);
return -EAGAIN;
}
@@ -123,8 +124,8 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
* straightforward, as we can just load up each queue's QACR with
* the physical address appropriately masked.
*/
- ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
- ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
+ __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
+ __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
#endif
return 0;
@@ -135,14 +136,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
* @phys: Physical address of mapping.
* @size: Length of mapping.
* @name: User invoking mapping.
- * @flags: Protection flags.
+ * @prot: Protection bits.
*
* Remaps the physical address @phys through the next available store queue
* address of @size length. @name is logged at boot time as well as through
* the sysfs interface.
*/
unsigned long sq_remap(unsigned long phys, unsigned int size,
- const char *name, unsigned long flags)
+ const char *name, pgprot_t prot)
{
struct sq_mapping *map;
unsigned long end;
@@ -177,7 +178,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
- ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
+ ret = __sq_remap(map, prot);
if (unlikely(ret != 0))
goto out;
@@ -199,7 +200,7 @@ EXPORT_SYMBOL(sq_remap);
/**
* sq_unmap - Unmap a Store Queue allocation
- * @map: Pre-allocated Store Queue mapping.
+ * @vaddr: Pre-allocated Store Queue mapping.
*
* Unmaps the store queue allocation @map that was previously created by
* sq_remap(). Also frees up the pte that was previously inserted into
@@ -309,8 +310,7 @@ static ssize_t mapping_store(const char *buf, size_t count)
return -EIO;
if (likely(len)) {
- int ret = sq_remap(base, len, "Userspace",
- pgprot_val(PAGE_SHARED));
+ int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
if (ret < 0)
return ret;
} else
@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
NULL,
};
-static struct sysfs_ops sq_sysfs_ops = {
+static const struct sysfs_ops sq_sysfs_ops = {
.show = sq_sysfs_show,
.store = sq_sysfs_store,
};
@@ -337,9 +337,9 @@ static struct kobj_type ktype_percpu_entry = {
.default_attrs = sq_sysfs_attrs,
};
-static int __devinit sq_sysdev_add(struct sys_device *sysdev)
+static int sq_dev_add(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sysdev->id;
+ unsigned int cpu = dev->id;
struct kobject *kobj;
int error;
@@ -348,25 +348,27 @@ static int __devinit sq_sysdev_add(struct sys_device *sysdev)
return -ENOMEM;
kobj = sq_kobject[cpu];
- error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
+ error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj,
"%s", "sq");
if (!error)
kobject_uevent(kobj, KOBJ_ADD);
return error;
}
-static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
+static int sq_dev_remove(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sysdev->id;
+ unsigned int cpu = dev->id;
struct kobject *kobj = sq_kobject[cpu];
kobject_put(kobj);
return 0;
}
-static struct sysdev_driver sq_sysdev_driver = {
- .add = sq_sysdev_add,
- .remove = __devexit_p(sq_sysdev_remove),
+static struct subsys_interface sq_interface = {
+ .name = "sq",
+ .subsys = &cpu_subsys,
+ .add_dev = sq_dev_add,
+ .remove_dev = sq_dev_remove,
};
static int __init sq_api_init(void)
@@ -386,7 +388,7 @@ static int __init sq_api_init(void)
if (unlikely(!sq_bitmap))
goto out;
- ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver);
+ ret = subsys_interface_register(&sq_interface);
if (unlikely(ret != 0))
goto out;
@@ -401,7 +403,7 @@ out:
static void __exit sq_api_exit(void)
{
- sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver);
+ subsys_interface_unregister(&sq_interface);
kfree(sq_bitmap);
kmem_cache_destroy(sq_cache);
}
diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile
index 5d890ac8e79..0705df77520 100644
--- a/arch/sh/kernel/cpu/sh4a/Makefile
+++ b/arch/sh/kernel/cpu/sh4a/Makefile
@@ -3,27 +3,50 @@
#
# CPU subtype setup
+obj-$(CONFIG_CPU_SUBTYPE_SH7757) += setup-sh7757.o
obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o
obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7786) += setup-sh7786.o intc-shx3.o
obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o
-obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o serial-sh7722.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7723) += setup-sh7723.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7724) += setup-sh7724.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7734) += setup-sh7734.o
obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o
-obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o
+obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o intc-shx3.o
# SMP setup
-smp-$(CONFIG_CPU_SUBTYPE_SHX3) := smp-shx3.o
+smp-$(CONFIG_CPU_SHX3) := smp-shx3.o
# Primary on-chip clocks (common)
+clock-$(CONFIG_CPU_SUBTYPE_SH7757) := clock-sh7757.o
clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o
clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o
clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o
clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o
clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o
clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o
-clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7722.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7734) := clock-sh7734.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o
clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o
-obj-y += $(clock-y)
-obj-$(CONFIG_SMP) += $(smp-y)
+# Pinmux setup
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7722) := pinmux-sh7722.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7724) := pinmux-sh7724.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7734) := pinmux-sh7734.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o
+pinmux-$(CONFIG_CPU_SUBTYPE_SHX3) := pinmux-shx3.o
+
+obj-y += $(clock-y)
+obj-$(CONFIG_SMP) += $(smp-y)
+obj-$(CONFIG_GPIOLIB) += $(pinmux-y)
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
index 7adc4f16e95..9edc06c02dc 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c
@@ -1,99 +1,289 @@
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7343.c
*
- * SH7343/SH7722 support for the clock framework
+ * SH7343 clock framework support
*
- * Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2009 Magnus Damm
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/clkdev.h>
#include <asm/clock.h>
-#include <asm/freq.h>
+
+/* SH7343 registers */
+#define FRQCR 0xa4150000
+#define VCLKCR 0xa4150004
+#define SCLKACR 0xa4150008
+#define SCLKBCR 0xa415000c
+#define PLLCR 0xa4150024
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+#define DLLFRQ 0xa4150050
+
+/* Fixed 32 KHz root clock for RTC and Power Management purposes */
+static struct clk r_clk = {
+ .rate = 32768,
+};
/*
- * SH7343/SH7722 uses a common set of multipliers and divisors, so this
- * is quite simple..
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
*/
-static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
-static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
-
-#define pll_calc() (((ctrl_inl(FRQCR) >> 24) & 0x1f) + 1)
+struct clk extal_clk = {
+ .rate = 33333333,
+};
-static void master_clk_init(struct clk *clk)
+/* The dll block multiplies the 32khz r_clk, may be used instead of extal */
+static unsigned long dll_recalc(struct clk *clk)
{
- clk->parent = clk_get(NULL, "cpu_clk");
-}
+ unsigned long mult;
-static void master_clk_recalc(struct clk *clk)
-{
- int idx = (ctrl_inl(FRQCR) & 0x000f);
- clk->rate *= clk->parent->rate * multipliers[idx] / divisors[idx];
+ if (__raw_readl(PLLCR) & 0x1000)
+ mult = __raw_readl(DLLFRQ);
+ else
+ mult = 0;
+
+ return clk->parent->rate * mult;
}
-static struct clk_ops sh7343_master_clk_ops = {
- .init = master_clk_init,
- .recalc = master_clk_recalc,
+static struct sh_clk_ops dll_clk_ops = {
+ .recalc = dll_recalc,
+};
+
+static struct clk dll_clk = {
+ .ops = &dll_clk_ops,
+ .parent = &r_clk,
+ .flags = CLK_ENABLE_ON_INIT,
};
-static void module_clk_init(struct clk *clk)
+static unsigned long pll_recalc(struct clk *clk)
{
- clk->parent = NULL;
- clk->rate = CONFIG_SH_PCLK_FREQ;
+ unsigned long mult = 1;
+
+ if (__raw_readl(PLLCR) & 0x4000)
+ mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
+
+ return clk->parent->rate * mult;
}
-static struct clk_ops sh7343_module_clk_ops = {
- .init = module_clk_init,
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
};
-static void bus_clk_init(struct clk *clk)
-{
- clk->parent = clk_get(NULL, "cpu_clk");
-}
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+};
-static void bus_clk_recalc(struct clk *clk)
-{
- int idx = (ctrl_inl(FRQCR) >> 8) & 0x000f;
- clk->rate = clk->parent->rate * multipliers[idx] / divisors[idx];
-}
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &dll_clk,
+ &pll_clk,
+};
+
+static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
-static struct clk_ops sh7343_bus_clk_ops = {
- .init = bus_clk_init,
- .recalc = bus_clk_recalc,
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+ .multipliers = multipliers,
+ .nr_multipliers = ARRAY_SIZE(multipliers),
};
-static void cpu_clk_init(struct clk *clk)
-{
- clk->parent = clk_get(NULL, "module_clk");
- clk->flags |= CLK_RATE_PROPAGATES;
- clk_set_rate(clk, clk_get_rate(clk));
-}
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
-static void cpu_clk_recalc(struct clk *clk)
-{
- int idx = (ctrl_inl(FRQCR) >> 20) & 0x000f;
- clk->rate = clk->parent->rate * pll_calc() *
- multipliers[idx] / divisors[idx];
-}
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
+ DIV4_SIUA, DIV4_SIUB, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
+};
+
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
+
+#define MSTP(_parent, _reg, _bit, _flags) \
+ SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
+
+enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
+ MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
+ MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
+ MSTP007, MSTP006, MSTP005, MSTP004, MSTP003, MSTP002, MSTP001,
+ MSTP109, MSTP108, MSTP100,
+ MSTP225, MSTP224, MSTP218, MSTP217, MSTP216,
+ MSTP214, MSTP213, MSTP212, MSTP211, MSTP208,
+ MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
+ [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
+ [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
+ [MSTP028] = MSTP(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
+ [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
+ [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
+ [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
+ [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ [MSTP004] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ [MSTP003] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
+ [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
-static struct clk_ops sh7343_cpu_clk_ops = {
- .init = cpu_clk_init,
- .recalc = cpu_clk_recalc,
+ [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+ [MSTP108] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
+
+ [MSTP225] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 25, 0),
+ [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
+ [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
+ [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
+ [MSTP216] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 16, 0),
+ [MSTP214] = MSTP(&r_clk, MSTPCR2, 14, 0),
+ [MSTP213] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 13, 0),
+ [MSTP212] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 12, 0),
+ [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
+ [MSTP208] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
+ [MSTP206] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
+ [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
+ [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
+ [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
-static struct clk_ops *sh7343_clk_ops[] = {
- &sh7343_master_clk_ops,
- &sh7343_module_clk_ops,
- &sh7343_bus_clk_ops,
- &sh7343_cpu_clk_ops,
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP32 clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
+ CLKDEV_CON_ID("uram0", &mstp_clks[MSTP028]),
+ CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
+ CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
+ CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
+ CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[MSTP014]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
+ CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
+
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP007]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP006]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP005]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP004]),
+
+ CLKDEV_CON_ID("sio0", &mstp_clks[MSTP003]),
+ CLKDEV_CON_ID("siof0", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("siof1", &mstp_clks[MSTP001]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP108]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[MSTP225]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[MSTP224]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
+ CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
+ CLKDEV_CON_ID("sim0", &mstp_clks[MSTP216]),
+ CLKDEV_CON_ID("keysc0", &mstp_clks[MSTP214]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP213]),
+ CLKDEV_CON_ID("s3d40", &mstp_clks[MSTP212]),
+ CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
+ CLKDEV_CON_ID("siu0", &mstp_clks[MSTP208]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[MSTP206]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+int __init arch_clk_init(void)
{
- if (idx < ARRAY_SIZE(sh7343_clk_ops))
- *ops = sh7343_clk_ops[idx];
+ int k, ret = 0;
+
+ /* autodetect extal or dll configuration */
+ if (__raw_readl(PLLCR) & 0x1000)
+ pll_clk.parent = &dll_clk;
+ else
+ pll_clk.parent = &extal_clk;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
+
+ return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
new file mode 100644
index 00000000000..955b9add781
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c
@@ -0,0 +1,282 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7366.c
+ *
+ * SH7366 clock framework support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+
+/* SH7366 registers */
+#define FRQCR 0xa4150000
+#define VCLKCR 0xa4150004
+#define SCLKACR 0xa4150008
+#define SCLKBCR 0xa415000c
+#define PLLCR 0xa4150024
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+#define DLLFRQ 0xa4150050
+
+/* Fixed 32 KHz root clock for RTC and Power Management purposes */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+/* The dll block multiplies the 32khz r_clk, may be used instead of extal */
+static unsigned long dll_recalc(struct clk *clk)
+{
+ unsigned long mult;
+
+ if (__raw_readl(PLLCR) & 0x1000)
+ mult = __raw_readl(DLLFRQ);
+ else
+ mult = 0;
+
+ return clk->parent->rate * mult;
+}
+
+static struct sh_clk_ops dll_clk_ops = {
+ .recalc = dll_recalc,
+};
+
+static struct clk dll_clk = {
+ .ops = &dll_clk_ops,
+ .parent = &r_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ unsigned long mult = 1;
+ unsigned long div = 1;
+
+ if (__raw_readl(PLLCR) & 0x4000)
+ mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
+ else
+ div = 2;
+
+ return (clk->parent->rate * mult) / div;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &dll_clk,
+ &pll_clk,
+};
+
+static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+ .multipliers = multipliers,
+ .nr_multipliers = ARRAY_SIZE(multipliers),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
+ DIV4_SIUA, DIV4_SIUB, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
+};
+
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
+
+#define MSTP(_parent, _reg, _bit, _flags) \
+ SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
+
+enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
+ MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
+ MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
+ MSTP007, MSTP006, MSTP005, MSTP002, MSTP001,
+ MSTP109, MSTP100,
+ MSTP227, MSTP226, MSTP224, MSTP223, MSTP222, MSTP218, MSTP217,
+ MSTP211, MSTP207, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* See page 52 of Datasheet V0.40: Overview -> Block Diagram */
+ [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
+ [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
+ [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
+ [MSTP028] = MSTP(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
+ [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
+ [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
+ [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
+ [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
+
+ [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+
+ [MSTP227] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 27, 0),
+ [MSTP226] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 26, 0),
+ [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
+ [MSTP223] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 23, 0),
+ [MSTP222] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 22, 0),
+ [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
+ [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
+ [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
+ [MSTP207] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 7, CLK_ENABLE_ON_INIT),
+ [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
+ [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
+ [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP32 clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
+ CLKDEV_CON_ID("rsmem0", &mstp_clks[MSTP028]),
+ CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
+ CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
+ CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
+ CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[MSTP014]),
+ CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
+ CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
+
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP007]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP006]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP005]),
+
+ CLKDEV_CON_ID("msiof0", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("sbr0", &mstp_clks[MSTP001]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]),
+ CLKDEV_CON_ID("icb0", &mstp_clks[MSTP227]),
+ CLKDEV_CON_ID("meram0", &mstp_clks[MSTP226]),
+ CLKDEV_CON_ID("dacy1", &mstp_clks[MSTP224]),
+ CLKDEV_CON_ID("dacy0", &mstp_clks[MSTP223]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP222]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
+ CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
+ CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[MSTP207]),
+ CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
+ CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
+ CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
+};
+
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
+
+ /* autodetect extal or dll configuration */
+ if (__raw_readl(PLLCR) & 0x1000)
+ pll_clk.parent = &dll_clk;
+ else
+ pll_clk.parent = &extal_clk;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
index 299138ebe16..8f07a1a3869 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c
@@ -1,633 +1,265 @@
/*
* arch/sh/kernel/cpu/sh4a/clock-sh7722.c
*
- * SH7722 & SH7366 support for the clock framework
+ * SH7722 clock framework support
*
- * Copyright (c) 2006-2007 Nomad Global Solutions Inc
- * Based on code for sh7343 by Paul Mundt
+ * Copyright (C) 2009 Magnus Damm
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
-#include <linux/errno.h>
+#include <linux/clkdev.h>
+#include <linux/sh_clk.h>
#include <asm/clock.h>
-#include <asm/freq.h>
-
-#define N (-1)
-#define NM (-2)
-#define ROUND_NEAREST 0
-#define ROUND_DOWN -1
-#define ROUND_UP +1
-
-static int adjust_algos[][3] = {
- {}, /* NO_CHANGE */
- { NM, N, 1 }, /* N:1, N:1 */
- { 3, 2, 2 }, /* 3:2:2 */
- { 5, 2, 2 }, /* 5:2:2 */
- { N, 1, 1 }, /* N:1:1 */
-
- { N, 1 }, /* N:1 */
-
- { N, 1 }, /* N:1 */
- { 3, 2 },
- { 4, 3 },
- { 5, 4 },
-
- { N, 1 }
+#include <cpu/sh7722.h>
+
+/* SH7722 registers */
+#define FRQCR 0xa4150000
+#define VCLKCR 0xa4150004
+#define SCLKACR 0xa4150008
+#define SCLKBCR 0xa415000c
+#define IRDACLKCR 0xa4150018
+#define PLLCR 0xa4150024
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+#define DLLFRQ 0xa4150050
+
+/* Fixed 32 KHz root clock for RTC and Power Management purposes */
+static struct clk r_clk = {
+ .rate = 32768,
};
-static unsigned long adjust_pair_of_clocks(unsigned long r1, unsigned long r2,
- int m1, int m2, int round_flag)
-{
- unsigned long rem, div;
- int the_one = 0;
-
- pr_debug( "Actual values: r1 = %ld\n", r1);
- pr_debug( "...............r2 = %ld\n", r2);
-
- if (m1 == m2) {
- r2 = r1;
- pr_debug( "setting equal rates: r2 now %ld\n", r2);
- } else if ((m2 == N && m1 == 1) ||
- (m2 == NM && m1 == N)) { /* N:1 or NM:N */
- pr_debug( "Setting rates as 1:N (N:N*M)\n");
- rem = r2 % r1;
- pr_debug( "...remainder = %ld\n", rem);
- if (rem) {
- div = r2 / r1;
- pr_debug( "...div = %ld\n", div);
- switch (round_flag) {
- case ROUND_NEAREST:
- the_one = rem >= r1/2 ? 1 : 0; break;
- case ROUND_UP:
- the_one = 1; break;
- case ROUND_DOWN:
- the_one = 0; break;
- }
-
- r2 = r1 * (div + the_one);
- pr_debug( "...setting r2 to %ld\n", r2);
- }
- } else if ((m2 == 1 && m1 == N) ||
- (m2 == N && m1 == NM)) { /* 1:N or N:NM */
- pr_debug( "Setting rates as N:1 (N*M:N)\n");
- rem = r1 % r2;
- pr_debug( "...remainder = %ld\n", rem);
- if (rem) {
- div = r1 / r2;
- pr_debug( "...div = %ld\n", div);
- switch (round_flag) {
- case ROUND_NEAREST:
- the_one = rem > r2/2 ? 1 : 0; break;
- case ROUND_UP:
- the_one = 0; break;
- case ROUND_DOWN:
- the_one = 1; break;
- }
-
- r2 = r1 / (div + the_one);
- pr_debug( "...setting r2 to %ld\n", r2);
- }
- } else { /* value:value */
- pr_debug( "Setting rates as %d:%d\n", m1, m2);
- div = r1 / m1;
- r2 = div * m2;
- pr_debug( "...div = %ld\n", div);
- pr_debug( "...setting r2 to %ld\n", r2);
- }
-
- return r2;
-}
-
-static void adjust_clocks(int originate, int *l, unsigned long v[],
- int n_in_line)
-{
- int x;
-
- pr_debug( "Go down from %d...\n", originate);
- /* go up recalculation clocks */
- for (x = originate; x>0; x -- )
- v[x-1] = adjust_pair_of_clocks(v[x], v[x-1],
- l[x], l[x-1],
- ROUND_UP);
-
- pr_debug( "Go up from %d...\n", originate);
- /* go down recalculation clocks */
- for (x = originate; x<n_in_line - 1; x ++ )
- v[x+1] = adjust_pair_of_clocks(v[x], v[x+1],
- l[x], l[x+1],
- ROUND_UP);
-}
-
-
/*
- * SH7722 uses a common set of multipliers and divisors, so this
- * is quite simple..
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
*/
+struct clk extal_clk = {
+ .rate = 33333333,
+};
-/*
- * Instead of having two separate multipliers/divisors set, like this:
- *
- * static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
- * static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
- *
- * I created the divisors2 array, which is used to calculate rate like
- * rate = parent * 2 / divisors2[ divisor ];
-*/
-static int divisors2[] = { 2, 3, 4, 5, 6, 8, 10, 12, 16, 20, 24, 32, 40 };
-
-static void master_clk_recalc(struct clk *clk)
+/* The dll block multiplies the 32khz r_clk, may be used instead of extal */
+static unsigned long dll_recalc(struct clk *clk)
{
- unsigned frqcr = ctrl_inl(FRQCR);
+ unsigned long mult;
- clk->rate = CONFIG_SH_PCLK_FREQ * (((frqcr >> 24) & 0x1f) + 1);
-}
+ if (__raw_readl(PLLCR) & 0x1000)
+ mult = __raw_readl(DLLFRQ);
+ else
+ mult = 0;
-static void master_clk_init(struct clk *clk)
-{
- clk->parent = NULL;
- clk->flags |= CLK_RATE_PROPAGATES;
- clk->rate = CONFIG_SH_PCLK_FREQ;
- master_clk_recalc(clk);
+ return clk->parent->rate * mult;
}
+static struct sh_clk_ops dll_clk_ops = {
+ .recalc = dll_recalc,
+};
-static void module_clk_recalc(struct clk *clk)
-{
- unsigned long frqcr = ctrl_inl(FRQCR);
-
- clk->rate = clk->parent->rate / (((frqcr >> 24) & 0x1f) + 1);
-}
+static struct clk dll_clk = {
+ .ops = &dll_clk_ops,
+ .parent = &r_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
-static int master_clk_setrate(struct clk *clk, unsigned long rate, int id)
+static unsigned long pll_recalc(struct clk *clk)
{
- int div = rate / clk->rate;
- int master_divs[] = { 2, 3, 4, 6, 8, 16 };
- int index;
- unsigned long frqcr;
+ unsigned long mult = 1;
+ unsigned long div = 1;
- for (index = 1; index < ARRAY_SIZE(master_divs); index++)
- if (div >= master_divs[index - 1] && div < master_divs[index])
- break;
-
- if (index >= ARRAY_SIZE(master_divs))
- index = ARRAY_SIZE(master_divs);
- div = master_divs[index - 1];
-
- frqcr = ctrl_inl(FRQCR);
- frqcr &= ~(0xF << 24);
- frqcr |= ( (div-1) << 24);
- ctrl_outl(frqcr, FRQCR);
+ if (__raw_readl(PLLCR) & 0x4000)
+ mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
+ else
+ div = 2;
- return 0;
+ return (clk->parent->rate * mult) / div;
}
-static struct clk_ops sh7722_master_clk_ops = {
- .init = master_clk_init,
- .recalc = master_clk_recalc,
- .set_rate = master_clk_setrate,
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
};
-static struct clk_ops sh7722_module_clk_ops = {
- .recalc = module_clk_recalc,
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
};
-struct frqcr_context {
- unsigned mask;
- unsigned shift;
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &dll_clk,
+ &pll_clk,
};
-struct frqcr_context sh7722_get_clk_context(const char *name)
-{
- struct frqcr_context ctx = { 0, };
-
- if (!strcmp(name, "peripheral_clk")) {
- ctx.shift = 0;
- ctx.mask = 0xF;
- } else if (!strcmp(name, "sdram_clk")) {
- ctx.shift = 4;
- ctx.mask = 0xF;
- } else if (!strcmp(name, "bus_clk")) {
- ctx.shift = 8;
- ctx.mask = 0xF;
- } else if (!strcmp(name, "sh_clk")) {
- ctx.shift = 12;
- ctx.mask = 0xF;
- } else if (!strcmp(name, "umem_clk")) {
- ctx.shift = 16;
- ctx.mask = 0xF;
- } else if (!strcmp(name, "cpu_clk")) {
- ctx.shift = 20;
- ctx.mask = 7;
- }
- return ctx;
-}
-
-/**
- * sh7722_find_divisors - find divisor for setting rate
- *
- * All sh7722 clocks use the same set of multipliers/divisors. This function
- * chooses correct divisor to set the rate of clock with parent clock that
- * generates frequency of 'parent_rate'
- *
- * @parent_rate: rate of parent clock
- * @rate: requested rate to be set
- */
-static int sh7722_find_divisors(unsigned long parent_rate, unsigned rate)
-{
- unsigned div2 = parent_rate * 2 / rate;
- int index;
-
- if (rate > parent_rate)
- return -EINVAL;
-
- for (index = 1; index < ARRAY_SIZE(divisors2); index++) {
- if (div2 > divisors2[index] && div2 <= divisors2[index])
- break;
- }
- if (index >= ARRAY_SIZE(divisors2))
- index = ARRAY_SIZE(divisors2) - 1;
- return divisors2[index];
-}
-
-static void sh7722_frqcr_recalc(struct clk *clk)
-{
- struct frqcr_context ctx = sh7722_get_clk_context(clk->name);
- unsigned long frqcr = ctrl_inl(FRQCR);
- int index;
-
- index = (frqcr >> ctx.shift) & ctx.mask;
- clk->rate = clk->parent->rate * 2 / divisors2[index];
-}
-
-static int sh7722_frqcr_set_rate(struct clk *clk, unsigned long rate,
- int algo_id)
-{
- struct frqcr_context ctx = sh7722_get_clk_context(clk->name);
- unsigned long parent_rate = clk->parent->rate;
- int div;
- unsigned long frqcr;
- int err = 0;
-
- /* pretty invalid */
- if (parent_rate < rate)
- return -EINVAL;
-
- /* look for multiplier/divisor pair */
- div = sh7722_find_divisors(parent_rate, rate);
- if (div<0)
- return div;
-
- /* calculate new value of clock rate */
- clk->rate = parent_rate * 2 / div;
- frqcr = ctrl_inl(FRQCR);
-
- /* FIXME: adjust as algo_id specifies */
- if (algo_id != NO_CHANGE) {
- int originator;
- char *algo_group_1[] = { "cpu_clk", "umem_clk", "sh_clk" };
- char *algo_group_2[] = { "sh_clk", "bus_clk" };
- char *algo_group_3[] = { "sh_clk", "sdram_clk" };
- char *algo_group_4[] = { "bus_clk", "peripheral_clk" };
- char *algo_group_5[] = { "cpu_clk", "peripheral_clk" };
- char **algo_current = NULL;
- /* 3 is the maximum number of clocks in relation */
- struct clk *ck[3];
- unsigned long values[3]; /* the same comment as above */
- int part_length = -1;
- int i;
-
- /*
- * all the steps below only required if adjustion was
- * requested
- */
- if (algo_id == IUS_N1_N1 ||
- algo_id == IUS_322 ||
- algo_id == IUS_522 ||
- algo_id == IUS_N11) {
- algo_current = algo_group_1;
- part_length = 3;
- }
- if (algo_id == SB_N1) {
- algo_current = algo_group_2;
- part_length = 2;
- }
- if (algo_id == SB3_N1 ||
- algo_id == SB3_32 ||
- algo_id == SB3_43 ||
- algo_id == SB3_54) {
- algo_current = algo_group_3;
- part_length = 2;
- }
- if (algo_id == BP_N1) {
- algo_current = algo_group_4;
- part_length = 2;
- }
- if (algo_id == IP_N1) {
- algo_current = algo_group_5;
- part_length = 2;
- }
- if (!algo_current)
- goto incorrect_algo_id;
-
- originator = -1;
- for (i = 0; i < part_length; i ++ ) {
- if (originator >= 0 && !strcmp(clk->name,
- algo_current[i]))
- originator = i;
- ck[i] = clk_get(NULL, algo_current[i]);
- values[i] = clk_get_rate(ck[i]);
- }
-
- if (originator >= 0)
- adjust_clocks(originator, adjust_algos[algo_id],
- values, part_length);
-
- for (i = 0; i < part_length; i ++ ) {
- struct frqcr_context part_ctx;
- int part_div;
-
- if (likely(!err)) {
- part_div = sh7722_find_divisors(parent_rate,
- rate);
- if (part_div > 0) {
- part_ctx = sh7722_get_clk_context(
- ck[i]->name);
- frqcr &= ~(part_ctx.mask <<
- part_ctx.shift);
- frqcr |= part_div << part_ctx.shift;
- } else
- err = part_div;
- }
-
- ck[i]->ops->recalc(ck[i]);
- clk_put(ck[i]);
- }
- }
-
- /* was there any error during recalculation ? If so, bail out.. */
- if (unlikely(err!=0))
- goto out_err;
-
- /* clear FRQCR bits */
- frqcr &= ~(ctx.mask << ctx.shift);
- frqcr |= div << ctx.shift;
-
- /* ...and perform actual change */
- ctrl_outl(frqcr, FRQCR);
- return 0;
-
-incorrect_algo_id:
- return -EINVAL;
-out_err:
- return err;
-}
-
-static long sh7722_frqcr_round_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent_rate = clk->parent->rate;
- int div;
+static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
- /* look for multiplier/divisor pair */
- div = sh7722_find_divisors(parent_rate, rate);
- if (div < 0)
- return clk->rate;
-
- /* calculate new value of clock rate */
- return parent_rate * 2 / div;
-}
-
-static struct clk_ops sh7722_frqcr_clk_ops = {
- .recalc = sh7722_frqcr_recalc,
- .set_rate = sh7722_frqcr_set_rate,
- .round_rate = sh7722_frqcr_round_rate,
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+ .multipliers = multipliers,
+ .nr_multipliers = ARRAY_SIZE(multipliers),
};
-/*
- * clock ops methods for SIU A/B and IrDA clock
- *
- */
-static int sh7722_siu_which(struct clk *clk)
-{
- if (!strcmp(clk->name, "siu_a_clk"))
- return 0;
- if (!strcmp(clk->name, "siu_b_clk"))
- return 1;
-#if defined(CONFIG_CPU_SUBTYPE_SH7722)
- if (!strcmp(clk->name, "irda_clk"))
- return 2;
-#endif
- return -EINVAL;
-}
-
-static unsigned long sh7722_siu_regs[] = {
- [0] = SCLKACR,
- [1] = SCLKBCR,
-#if defined(CONFIG_CPU_SUBTYPE_SH7722)
- [2] = IrDACLKCR,
-#endif
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
};
-static int sh7722_siu_start_stop(struct clk *clk, int enable)
-{
- int siu = sh7722_siu_which(clk);
- unsigned long r;
-
- if (siu < 0)
- return siu;
- BUG_ON(siu > 2);
- r = ctrl_inl(sh7722_siu_regs[siu]);
- if (enable)
- ctrl_outl(r & ~(1 << 8), sh7722_siu_regs[siu]);
- else
- ctrl_outl(r | (1 << 8), sh7722_siu_regs[siu]);
- return 0;
-}
-
-static void sh7722_siu_enable(struct clk *clk)
-{
- sh7722_siu_start_stop(clk, 1);
-}
-
-static void sh7722_siu_disable(struct clk *clk)
-{
- sh7722_siu_start_stop(clk, 0);
-}
-
-static void sh7722_video_enable(struct clk *clk)
-{
- unsigned long r;
-
- r = ctrl_inl(VCLKCR);
- ctrl_outl( r & ~(1<<8), VCLKCR);
-}
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
-static void sh7722_video_disable(struct clk *clk)
-{
- unsigned long r;
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
- r = ctrl_inl(VCLKCR);
- ctrl_outl( r | (1<<8), VCLKCR);
-}
-
-static int sh7722_video_set_rate(struct clk *clk, unsigned long rate,
- int algo_id)
-{
- unsigned long r;
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
+};
- r = ctrl_inl(VCLKCR);
- r &= ~0x3F;
- r |= ((clk->parent->rate / rate - 1) & 0x3F);
- ctrl_outl(r, VCLKCR);
- return 0;
-}
+enum { DIV4_IRDA, DIV4_ENABLE_NR };
-static void sh7722_video_recalc(struct clk *clk)
-{
- unsigned long r;
+struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
+ [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x1fff, 0),
+};
- r = ctrl_inl(VCLKCR);
- clk->rate = clk->parent->rate / ((r & 0x3F) + 1);
-}
+enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
-static int sh7722_siu_set_rate(struct clk *clk, unsigned long rate, int algo_id)
-{
- int siu = sh7722_siu_which(clk);
- unsigned long r;
- int div;
-
- if (siu < 0)
- return siu;
- BUG_ON(siu > 2);
- r = ctrl_inl(sh7722_siu_regs[siu]);
- div = sh7722_find_divisors(clk->parent->rate, rate);
- if (div < 0)
- return div;
- r = (r & ~0xF) | div;
- ctrl_outl(r, sh7722_siu_regs[siu]);
- return 0;
-}
+struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
+};
-static void sh7722_siu_recalc(struct clk *clk)
-{
- int siu = sh7722_siu_which(clk);
- unsigned long r;
-
- if (siu < 0)
- return /* siu */ ;
- BUG_ON(siu > 2);
- r = ctrl_inl(sh7722_siu_regs[siu]);
- clk->rate = clk->parent->rate * 2 / divisors2[r & 0xF];
-}
+enum { DIV6_V, DIV6_NR };
-static struct clk_ops sh7722_siu_clk_ops = {
- .recalc = sh7722_siu_recalc,
- .set_rate = sh7722_siu_set_rate,
- .enable = sh7722_siu_enable,
- .disable = sh7722_siu_disable,
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
};
-static struct clk_ops sh7722_video_clk_ops = {
- .recalc = sh7722_video_recalc,
- .set_rate = sh7722_video_set_rate,
- .enable = sh7722_video_enable,
- .disable = sh7722_video_disable,
-};
-/*
- * and at last, clock definitions themselves
- */
-static struct clk sh7722_umem_clock = {
- .name = "umem_clk",
- .ops = &sh7722_frqcr_clk_ops,
+static struct clk mstp_clks[HWBLK_NR] = {
+ [HWBLK_URAM] = SH_CLK_MSTP32(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [HWBLK_XYMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
+ [HWBLK_TMU] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0),
+ [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0),
+ [HWBLK_FLCTL] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
+ [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+
+ [HWBLK_IIC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+ [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 8, 0),
+
+ [HWBLK_SDHI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
+ [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR2, 14, 0),
+ [HWBLK_USBF] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
+ [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 9, 0),
+ [HWBLK_SIU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
+ [HWBLK_JPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0),
+ [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [HWBLK_BEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [HWBLK_CEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [HWBLK_VEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0),
+ [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0),
+ [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 0, 0),
};
-static struct clk sh7722_sh_clock = {
- .name = "sh_clk",
- .ops = &sh7722_frqcr_clk_ops,
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]),
+ CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("uram0", &mstp_clks[HWBLK_URAM]),
+ CLKDEV_CON_ID("xymem0", &mstp_clks[HWBLK_XYMEM]),
+
+ CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU]),
+
+ CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
+
+ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
+ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
+ CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
+
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI]),
+ CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("usbf0", &mstp_clks[HWBLK_USBF]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_DEV_ID("siu-pcm-audio", &mstp_clks[HWBLK_SIU]),
+ CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
+ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[HWBLK_CEU]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+ CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]),
};
-static struct clk sh7722_peripheral_clock = {
- .name = "peripheral_clk",
- .ops = &sh7722_frqcr_clk_ops,
-};
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
-static struct clk sh7722_sdram_clock = {
- .name = "sdram_clk",
- .ops = &sh7722_frqcr_clk_ops,
-};
+ /* autodetect extal or dll configuration */
+ if (__raw_readl(PLLCR) & 0x1000)
+ pll_clk.parent = &dll_clk;
+ else
+ pll_clk.parent = &extal_clk;
-/*
- * these three clocks - SIU A, SIU B, IrDA - share the same clk_ops
- * methods of clk_ops determine which register they should access by
- * examining clk->name field
- */
-static struct clk sh7722_siu_a_clock = {
- .name = "siu_a_clk",
- .ops = &sh7722_siu_clk_ops,
-};
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
-static struct clk sh7722_siu_b_clock = {
- .name = "siu_b_clk",
- .ops = &sh7722_siu_clk_ops,
-};
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-#if defined(CONFIG_CPU_SUBTYPE_SH7722)
-static struct clk sh7722_irda_clock = {
- .name = "irda_clk",
- .ops = &sh7722_siu_clk_ops,
-};
-#endif
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
-static struct clk sh7722_video_clock = {
- .name = "video_clk",
- .ops = &sh7722_video_clk_ops,
-};
+ if (!ret)
+ ret = sh_clk_div4_enable_register(div4_enable_clks,
+ DIV4_ENABLE_NR, &div4_table);
-static struct clk *sh7722_clocks[] = {
- &sh7722_umem_clock,
- &sh7722_sh_clock,
- &sh7722_peripheral_clock,
- &sh7722_sdram_clock,
- &sh7722_siu_a_clock,
- &sh7722_siu_b_clock,
-#if defined(CONFIG_CPU_SUBTYPE_SH7722)
- &sh7722_irda_clock,
-#endif
- &sh7722_video_clock,
-};
+ if (!ret)
+ ret = sh_clk_div4_reparent_register(div4_reparent_clks,
+ DIV4_REPARENT_NR, &div4_table);
-/*
- * init in order: master, module, bus, cpu
- */
-struct clk_ops *onchip_ops[] = {
- &sh7722_master_clk_ops,
- &sh7722_module_clk_ops,
- &sh7722_frqcr_clk_ops,
- &sh7722_frqcr_clk_ops,
-};
+ if (!ret)
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
-void __init
-arch_init_clk_ops(struct clk_ops **ops, int type)
-{
- BUG_ON(type < 0 || type > ARRAY_SIZE(onchip_ops));
- *ops = onchip_ops[type];
-}
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
-int __init arch_clk_init(void)
-{
- struct clk *master;
- int i;
-
- master = clk_get(NULL, "master_clk");
- for (i = 0; i < ARRAY_SIZE(sh7722_clocks); i++) {
- pr_debug( "Registering clock '%s'\n", sh7722_clocks[i]->name);
- sh7722_clocks[i]->parent = master;
- clk_register(sh7722_clocks[i]);
- }
- clk_put(master);
- return 0;
+ return ret;
}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
new file mode 100644
index 00000000000..ccbcab550df
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c
@@ -0,0 +1,313 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7723.c
+ *
+ * SH7723 clock framework support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/sh_clk.h>
+#include <asm/clock.h>
+#include <cpu/sh7723.h>
+
+/* SH7723 registers */
+#define FRQCR 0xa4150000
+#define VCLKCR 0xa4150004
+#define SCLKACR 0xa4150008
+#define SCLKBCR 0xa415000c
+#define IRDACLKCR 0xa4150018
+#define PLLCR 0xa4150024
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+#define DLLFRQ 0xa4150050
+
+/* Fixed 32 KHz root clock for RTC and Power Management purposes */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+/* The dll multiplies the 32khz r_clk, may be used instead of extal */
+static unsigned long dll_recalc(struct clk *clk)
+{
+ unsigned long mult;
+
+ if (__raw_readl(PLLCR) & 0x1000)
+ mult = __raw_readl(DLLFRQ);
+ else
+ mult = 0;
+
+ return clk->parent->rate * mult;
+}
+
+static struct sh_clk_ops dll_clk_ops = {
+ .recalc = dll_recalc,
+};
+
+static struct clk dll_clk = {
+ .ops = &dll_clk_ops,
+ .parent = &r_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ unsigned long mult = 1;
+ unsigned long div = 1;
+
+ if (__raw_readl(PLLCR) & 0x4000)
+ mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
+ else
+ div = 2;
+
+ return (clk->parent->rate * mult) / div;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &dll_clk,
+ &pll_clk,
+};
+
+static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+ .multipliers = multipliers,
+ .nr_multipliers = ARRAY_SIZE(multipliers),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCR, 20, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(FRQCR, 16, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCR, 12, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT),
+ [DIV4_B3] = DIV4(FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCR, 0, 0x0dbf, 0),
+};
+
+enum { DIV4_IRDA, DIV4_ENABLE_NR };
+
+struct clk div4_enable_clks[DIV4_ENABLE_NR] = {
+ [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x0dbf, 0),
+};
+
+enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR };
+
+struct clk div4_reparent_clks[DIV4_REPARENT_NR] = {
+ [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x0dbf, 0),
+ [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x0dbf, 0),
+};
+enum { DIV6_V, DIV6_NR };
+
+struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
+};
+
+static struct clk mstp_clks[] = {
+ /* See page 60 of Datasheet V1.0: Overview -> Block Diagram */
+ [HWBLK_TLB] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
+ [HWBLK_IC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
+ [HWBLK_OC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
+ [HWBLK_L2C] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [HWBLK_ILMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 27, CLK_ENABLE_ON_INIT),
+ [HWBLK_FPU] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 24, CLK_ENABLE_ON_INIT),
+ [HWBLK_INTC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 22, CLK_ENABLE_ON_INIT),
+ [HWBLK_DMAC0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 21, 0),
+ [HWBLK_SHYWAY] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 20, CLK_ENABLE_ON_INIT),
+ [HWBLK_HUDI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [HWBLK_UBC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 17, 0),
+ [HWBLK_TMU0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0),
+ [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0),
+ [HWBLK_DMAC1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 12, 0),
+ [HWBLK_TMU1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [HWBLK_FLCTL] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [HWBLK_SCIF3] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 6, 0),
+ [HWBLK_SCIF4] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 5, 0),
+ [HWBLK_SCIF5] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 4, 0),
+ [HWBLK_MSIOF0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 2, 0),
+ [HWBLK_MSIOF1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 1, 0),
+ [HWBLK_MERAM] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 0, 0),
+
+ [HWBLK_IIC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+ [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 8, 0),
+
+ [HWBLK_ATAPI] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR2, 28, 0),
+ [HWBLK_ADC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 27, 0),
+ [HWBLK_TPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 25, 0),
+ [HWBLK_IRDA] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
+ [HWBLK_TSIF] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 22, 0),
+ [HWBLK_ICB] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 21, CLK_ENABLE_ON_INIT),
+ [HWBLK_SDHI0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 18, 0),
+ [HWBLK_SDHI1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 17, 0),
+ [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR2, 14, 0),
+ [HWBLK_USB] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 11, 0),
+ [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 10, 0),
+ [HWBLK_SIU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
+ [HWBLK_VEU2H1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0),
+ [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [HWBLK_BEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [HWBLK_CEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [HWBLK_VEU2H0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0),
+ [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0),
+ [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("dll_clk", &dll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]),
+ CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]),
+ CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
+ CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
+ CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
+ CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
+ CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
+ CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]),
+ CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]),
+ CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[HWBLK_MSIOF0]),
+ CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[HWBLK_MSIOF1]),
+ CLKDEV_DEV_ID("sh_mobile_meram.0", &mstp_clks[HWBLK_MERAM]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
+ CLKDEV_CON_ID("adc0", &mstp_clks[HWBLK_ADC]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
+ CLKDEV_CON_ID("icb0", &mstp_clks[HWBLK_ICB]),
+ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]),
+ CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]),
+ CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_DEV_ID("siu-pcm-audio", &mstp_clks[HWBLK_SIU]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU2H1]),
+ CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]),
+ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[HWBLK_CEU]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU2H0]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+
+ CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]),
+
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[HWBLK_SCIF3]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[HWBLK_SCIF4]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[HWBLK_SCIF5]),
+
+ CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]),
+};
+
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
+
+ /* autodetect extal or dll configuration */
+ if (__raw_readl(PLLCR) & 0x1000)
+ pll_clk.parent = &dll_clk;
+ else
+ pll_clk.parent = &extal_clk;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret |= clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div4_enable_register(div4_enable_clks,
+ DIV4_ENABLE_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div4_reparent_register(div4_reparent_clks,
+ DIV4_REPARENT_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div6_register(div6_clks, DIV6_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
new file mode 100644
index 00000000000..f579dd52819
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -0,0 +1,377 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+ *
+ * SH7724 clock framework support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/sh_clk.h>
+#include <asm/clock.h>
+#include <cpu/sh7724.h>
+
+/* SH7724 registers */
+#define FRQCRA 0xa4150000
+#define FRQCRB 0xa4150004
+#define VCLKCR 0xa4150048
+#define FCLKACR 0xa4150008
+#define FCLKBCR 0xa415000c
+#define IRDACLKCR 0xa4150018
+#define PLLCR 0xa4150024
+#define MSTPCR0 0xa4150030
+#define MSTPCR1 0xa4150034
+#define MSTPCR2 0xa4150038
+#define SPUCLKCR 0xa415003c
+#define FLLFRQ 0xa4150050
+#define LSTATS 0xa4150060
+
+/* Fixed 32 KHz root clock for RTC and Power Management purposes */
+static struct clk r_clk = {
+ .rate = 32768,
+};
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+/* The fll multiplies the 32khz r_clk, may be used instead of extal */
+static unsigned long fll_recalc(struct clk *clk)
+{
+ unsigned long mult = 0;
+ unsigned long div = 1;
+
+ if (__raw_readl(PLLCR) & 0x1000)
+ mult = __raw_readl(FLLFRQ) & 0x3ff;
+
+ if (__raw_readl(FLLFRQ) & 0x4000)
+ div = 2;
+
+ return (clk->parent->rate * mult) / div;
+}
+
+static struct sh_clk_ops fll_clk_ops = {
+ .recalc = fll_recalc,
+};
+
+static struct clk fll_clk = {
+ .ops = &fll_clk_ops,
+ .parent = &r_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ unsigned long mult = 1;
+
+ if (__raw_readl(PLLCR) & 0x4000)
+ mult = (((__raw_readl(FRQCRA) >> 24) & 0x3f) + 1) * 2;
+
+ return clk->parent->rate * mult;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+/* A fixed divide-by-3 block use by the div6 clocks */
+static unsigned long div3_recalc(struct clk *clk)
+{
+ return clk->parent->rate / 3;
+}
+
+static struct sh_clk_ops div3_clk_ops = {
+ .recalc = div3_recalc,
+};
+
+static struct clk div3_clk = {
+ .ops = &div3_clk_ops,
+ .parent = &pll_clk,
+};
+
+/* External input clock (pin name: FSIMCKA/FSIMCKB/DV_CLKI ) */
+struct clk sh7724_fsimcka_clk = {
+};
+
+struct clk sh7724_fsimckb_clk = {
+};
+
+struct clk sh7724_dv_clki = {
+};
+
+static struct clk *main_clks[] = {
+ &r_clk,
+ &extal_clk,
+ &fll_clk,
+ &pll_clk,
+ &div3_clk,
+ &sh7724_fsimcka_clk,
+ &sh7724_fsimckb_clk,
+ &sh7724_dv_clki,
+};
+
+static void div4_kick(struct clk *clk)
+{
+ unsigned long value;
+
+ /* set KICK bit in FRQCRA to update hardware setting */
+ value = __raw_readl(FRQCRA);
+ value |= (1 << 31);
+ __raw_writel(value, FRQCRA);
+}
+
+static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+ .kick = div4_kick,
+};
+
+enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQCRA, 20, 0x2f7d, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQCRA, 0, 0x2f7c, 0),
+ [DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
+};
+
+enum { DIV6_V, DIV6_I, DIV6_S, DIV6_FA, DIV6_FB, DIV6_NR };
+
+/* Indices are important - they are the actual src selecting values */
+static struct clk *common_parent[] = {
+ [0] = &div3_clk,
+ [1] = NULL,
+};
+
+static struct clk *vclkcr_parent[8] = {
+ [0] = &div3_clk,
+ [2] = &sh7724_dv_clki,
+ [4] = &extal_clk,
+};
+
+static struct clk *fclkacr_parent[] = {
+ [0] = &div3_clk,
+ [1] = NULL,
+ [2] = &sh7724_fsimcka_clk,
+ [3] = NULL,
+};
+
+static struct clk *fclkbcr_parent[] = {
+ [0] = &div3_clk,
+ [1] = NULL,
+ [2] = &sh7724_fsimckb_clk,
+ [3] = NULL,
+};
+
+static struct clk div6_clks[DIV6_NR] = {
+ [DIV6_V] = SH_CLK_DIV6_EXT(VCLKCR, 0,
+ vclkcr_parent, ARRAY_SIZE(vclkcr_parent), 12, 3),
+ [DIV6_I] = SH_CLK_DIV6_EXT(IRDACLKCR, 0,
+ common_parent, ARRAY_SIZE(common_parent), 6, 1),
+ [DIV6_S] = SH_CLK_DIV6_EXT(SPUCLKCR, CLK_ENABLE_ON_INIT,
+ common_parent, ARRAY_SIZE(common_parent), 6, 1),
+ [DIV6_FA] = SH_CLK_DIV6_EXT(FCLKACR, 0,
+ fclkacr_parent, ARRAY_SIZE(fclkacr_parent), 6, 2),
+ [DIV6_FB] = SH_CLK_DIV6_EXT(FCLKBCR, 0,
+ fclkbcr_parent, ARRAY_SIZE(fclkbcr_parent), 6, 2),
+};
+
+static struct clk mstp_clks[HWBLK_NR] = {
+ [HWBLK_TLB] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
+ [HWBLK_IC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
+ [HWBLK_OC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
+ [HWBLK_RSMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
+ [HWBLK_ILMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 27, CLK_ENABLE_ON_INIT),
+ [HWBLK_L2C] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
+ [HWBLK_FPU] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 24, CLK_ENABLE_ON_INIT),
+ [HWBLK_INTC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, CLK_ENABLE_ON_INIT),
+ [HWBLK_DMAC0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 21, 0),
+ [HWBLK_SHYWAY] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 20, CLK_ENABLE_ON_INIT),
+ [HWBLK_HUDI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [HWBLK_UBC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 17, 0),
+ [HWBLK_TMU0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0),
+ [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0),
+ [HWBLK_DMAC1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 12, 0),
+ [HWBLK_TMU1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+ [HWBLK_SCIF3] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 6, 0),
+ [HWBLK_SCIF4] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 5, 0),
+ [HWBLK_SCIF5] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 4, 0),
+ [HWBLK_MSIOF0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 2, 0),
+ [HWBLK_MSIOF1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 1, 0),
+
+ [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 12, 0),
+ [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 11, 0),
+ [HWBLK_IIC0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+ [HWBLK_IIC1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
+
+ [HWBLK_MMC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 29, 0),
+ [HWBLK_ETHER] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 28, 0),
+ [HWBLK_ATAPI] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 26, 0),
+ [HWBLK_TPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 25, 0),
+ [HWBLK_IRDA] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
+ [HWBLK_TSIF] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 22, 0),
+ [HWBLK_USB1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 21, 0),
+ [HWBLK_USB0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 20, 0),
+ [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 19, 0),
+ [HWBLK_SDHI0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 18, 0),
+ [HWBLK_SDHI1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 17, 0),
+ [HWBLK_VEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 15, 0),
+ [HWBLK_CEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 13, 0),
+ [HWBLK_BEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 12, 0),
+ [HWBLK_2DDMAC] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR2, 10, 0),
+ [HWBLK_SPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 9, 0),
+ [HWBLK_JPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0),
+ [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
+ [HWBLK_BEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
+ [HWBLK_CEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
+ [HWBLK_VEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0),
+ [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0),
+ [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("rclk", &r_clk),
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("fll_clk", &fll_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+ CLKDEV_CON_ID("div3_clk", &div3_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("vpu_clk", &div4_clks[DIV4_M1]),
+
+ /* DIV6 clocks */
+ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
+ CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FA]),
+ CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FB]),
+ CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]),
+ CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]),
+
+ /* MSTP clocks */
+ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]),
+ CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]),
+ CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]),
+ CLKDEV_CON_ID("rs0", &mstp_clks[HWBLK_RSMEM]),
+ CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]),
+ CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]),
+ CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]),
+ CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]),
+ CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]),
+ CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]),
+ CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]),
+
+ CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]),
+
+ CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[HWBLK_CMT]),
+ CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]),
+ CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]),
+
+ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]),
+ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]),
+ CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]),
+ CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[HWBLK_SCIF3]),
+ CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[HWBLK_SCIF4]),
+ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[HWBLK_SCIF5]),
+
+ CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[HWBLK_MSIOF0]),
+ CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[HWBLK_MSIOF1]),
+ CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC0]),
+ CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[HWBLK_IIC1]),
+ CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[HWBLK_MMC]),
+ CLKDEV_DEV_ID("sh7724-ether.0", &mstp_clks[HWBLK_ETHER]),
+ CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]),
+ CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
+ CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
+ CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
+ CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[HWBLK_USB1]),
+ CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[HWBLK_USB0]),
+ CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
+ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]),
+ CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]),
+ CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU1]),
+ CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[HWBLK_CEU1]),
+ CLKDEV_CON_ID("beu1", &mstp_clks[HWBLK_BEU1]),
+ CLKDEV_CON_ID("2ddmac0", &mstp_clks[HWBLK_2DDMAC]),
+ CLKDEV_DEV_ID("sh_fsi.0", &mstp_clks[HWBLK_SPU]),
+ CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]),
+ CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]),
+ CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU0]),
+ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[HWBLK_CEU0]),
+ CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU0]),
+ CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]),
+ CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]),
+};
+
+int __init arch_clk_init(void)
+{
+ int k, ret = 0;
+
+ /* autodetect extal or fll configuration */
+ if (__raw_readl(PLLCR) & 0x1000)
+ pll_clk.parent = &fll_clk;
+ else
+ pll_clk.parent = &extal_clk;
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
+ ret = clk_register(main_clks[k]);
+
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
+
+ if (!ret)
+ ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c
new file mode 100644
index 00000000000..1fdf1ee672d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c
@@ -0,0 +1,260 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7734.c
+ *
+ * Clock framework for SH7734
+ *
+ * Copyright (C) 2011, 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
+ * Copyright (C) 2011, 2012 Renesas Solutions Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+
+static struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+#define MODEMR (0xFFCC0020)
+#define MODEMR_MASK (0x6)
+#define MODEMR_533MHZ (0x2)
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ int mode = 12;
+ u32 r = __raw_readl(MODEMR);
+
+ if ((r & MODEMR_MASK) & MODEMR_533MHZ)
+ mode = 16;
+
+ return clk->parent->rate * mode;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk *main_clks[] = {
+ &extal_clk,
+ &pll_clk,
+};
+
+static int multipliers[] = { 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+static int divisors[] = { 1, 3, 2, 3, 4, 6, 8, 9, 12, 16, 18, 24 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = divisors,
+ .nr_divisors = ARRAY_SIZE(divisors),
+ .multipliers = multipliers,
+ .nr_multipliers = ARRAY_SIZE(multipliers),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_S, DIV4_B, DIV4_M, DIV4_S1, DIV4_P, DIV4_NR };
+
+#define DIV4(_reg, _bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_I] = DIV4(FRQMR1, 28, 0x0003, CLK_ENABLE_ON_INIT),
+ [DIV4_S] = DIV4(FRQMR1, 20, 0x000C, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(FRQMR1, 16, 0x0140, CLK_ENABLE_ON_INIT),
+ [DIV4_M] = DIV4(FRQMR1, 12, 0x0004, CLK_ENABLE_ON_INIT),
+ [DIV4_S1] = DIV4(FRQMR1, 4, 0x0030, CLK_ENABLE_ON_INIT),
+ [DIV4_P] = DIV4(FRQMR1, 0, 0x0140, CLK_ENABLE_ON_INIT),
+};
+
+#define MSTPCR0 0xFFC80030
+#define MSTPCR1 0xFFC80034
+#define MSTPCR3 0xFFC8003C
+
+enum {
+ MSTP030, MSTP029, /* IIC */
+ MSTP026, MSTP025, MSTP024, /* SCIF */
+ MSTP023,
+ MSTP022, MSTP021,
+ MSTP019, /* HSCIF */
+ MSTP016, MSTP015, MSTP014, /* TMU / TIMER */
+ MSTP012, MSTP011, MSTP010, MSTP009, MSTP008, /* SSI */
+ MSTP007, /* HSPI */
+ MSTP115, /* ADMAC */
+ MSTP114, /* GETHER */
+ MSTP111, /* DMAC */
+ MSTP109, /* VIDEOIN1 */
+ MSTP108, /* VIDEOIN0 */
+ MSTP107, /* RGPVBG */
+ MSTP106, /* 2DG */
+ MSTP103, /* VIEW */
+ MSTP100, /* USB */
+ MSTP331, /* MMC */
+ MSTP330, /* MIMLB */
+ MSTP323, /* SDHI0 */
+ MSTP322, /* SDHI1 */
+ MSTP321, /* SDHI2 */
+ MSTP320, /* RQSPI */
+ MSTP319, /* SRC0 */
+ MSTP318, /* SRC1 */
+ MSTP317, /* RSPI */
+ MSTP316, /* RCAN0 */
+ MSTP315, /* RCAN1 */
+ MSTP314, /* FLTCL */
+ MSTP313, /* ADC */
+ MSTP312, /* MTU */
+ MSTP304, /* IE-BUS */
+ MSTP303, /* RTC */
+ MSTP302, /* HIF */
+ MSTP301, /* STIF0 */
+ MSTP300, /* STIF1 */
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP030] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 30, 0),
+ [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
+ [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ [MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ [MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP019] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
+ [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
+ [MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0),
+ [MSTP012] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 12, 0),
+ [MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [MSTP007] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
+
+ /* MSTPCR1 */
+ [MSTP115] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 15, 0),
+ [MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0),
+ [MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0),
+ [MSTP109] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
+ [MSTP108] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
+ [MSTP107] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 7, 0),
+ [MSTP106] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 6, 0),
+ [MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0),
+ [MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 0, 0),
+
+ /* MSTPCR3 */
+ [MSTP331] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 31, 0),
+ [MSTP330] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 30, 0),
+ [MSTP323] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 23, 0),
+ [MSTP322] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 22, 0),
+ [MSTP321] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 21, 0),
+ [MSTP320] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 20, 0),
+ [MSTP319] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 19, 0),
+ [MSTP318] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 18, 0),
+ [MSTP317] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 17, 0),
+ [MSTP316] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 16, 0),
+ [MSTP315] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 15, 0),
+ [MSTP314] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 14, 0),
+ [MSTP313] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 13, 0),
+ [MSTP312] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 12, 0),
+ [MSTP304] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 4, 0),
+ [MSTP303] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 3, 0),
+ [MSTP302] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 2, 0),
+ [MSTP301] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 1, 0),
+ [MSTP300] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 0, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* clocks */
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_S]),
+ CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_M]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("shyway_clk1", &div4_clks[DIV4_S1]),
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+
+ /* MSTP32 clocks */
+ CLKDEV_DEV_ID("i2c-sh7734.0", &mstp_clks[MSTP030]),
+ CLKDEV_DEV_ID("i2c-sh7734.1", &mstp_clks[MSTP029]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP026]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP024]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP023]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP022]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("hscif", &mstp_clks[MSTP019]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP016]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP015]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.2", &mstp_clks[MSTP014]),
+ CLKDEV_CON_ID("ssi0", &mstp_clks[MSTP012]),
+ CLKDEV_CON_ID("ssi1", &mstp_clks[MSTP011]),
+ CLKDEV_CON_ID("ssi2", &mstp_clks[MSTP010]),
+ CLKDEV_CON_ID("ssi3", &mstp_clks[MSTP009]),
+ CLKDEV_CON_ID("sss", &mstp_clks[MSTP008]),
+ CLKDEV_CON_ID("hspi", &mstp_clks[MSTP007]),
+ CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP100]),
+ CLKDEV_CON_ID("videoin0", &mstp_clks[MSTP109]),
+ CLKDEV_CON_ID("videoin1", &mstp_clks[MSTP108]),
+ CLKDEV_CON_ID("rgpvg", &mstp_clks[MSTP107]),
+ CLKDEV_CON_ID("2dg", &mstp_clks[MSTP106]),
+ CLKDEV_CON_ID("view", &mstp_clks[MSTP103]),
+
+ CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP331]),
+ CLKDEV_CON_ID("mimlb0", &mstp_clks[MSTP330]),
+ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP323]),
+ CLKDEV_CON_ID("sdhi1", &mstp_clks[MSTP322]),
+ CLKDEV_CON_ID("sdhi2", &mstp_clks[MSTP321]),
+ CLKDEV_CON_ID("rqspi0", &mstp_clks[MSTP320]),
+ CLKDEV_CON_ID("src0", &mstp_clks[MSTP319]),
+ CLKDEV_CON_ID("src1", &mstp_clks[MSTP318]),
+ CLKDEV_CON_ID("rsp0", &mstp_clks[MSTP317]),
+ CLKDEV_CON_ID("rcan0", &mstp_clks[MSTP316]),
+ CLKDEV_CON_ID("rcan1", &mstp_clks[MSTP315]),
+ CLKDEV_CON_ID("fltcl0", &mstp_clks[MSTP314]),
+ CLKDEV_CON_ID("adc0", &mstp_clks[MSTP313]),
+ CLKDEV_CON_ID("mtu0", &mstp_clks[MSTP312]),
+ CLKDEV_CON_ID("iebus0", &mstp_clks[MSTP304]),
+ CLKDEV_DEV_ID("sh7734-gether.0", &mstp_clks[MSTP114]),
+ CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP303]),
+ CLKDEV_CON_ID("hif0", &mstp_clks[MSTP302]),
+ CLKDEV_CON_ID("stif0", &mstp_clks[MSTP301]),
+ CLKDEV_CON_ID("stif1", &mstp_clks[MSTP300]),
+};
+
+int __init arch_clk_init(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(main_clks); i++)
+ ret |= clk_register(main_clks[i]);
+
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
new file mode 100644
index 00000000000..9a28fdb3638
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -0,0 +1,155 @@
+/*
+ * arch/sh/kernel/cpu/sh4/clock-sh7757.c
+ *
+ * SH7757 support for the clock framework
+ *
+ * Copyright (C) 2009-2010 Renesas Solutions Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 48000000,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ int multiplier;
+
+ multiplier = test_mode_pin(MODE_PIN0) ? 24 : 16;
+
+ return clk->parent->rate * multiplier;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
+};
+
+static unsigned int div2[] = { 1, 1, 2, 1, 1, 4, 1, 6,
+ 1, 1, 1, 16, 1, 24, 1, 1 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_SH, DIV4_P, DIV4_NR };
+
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQCR, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ /*
+ * P clock is always enable, because some P clock modules is used
+ * by Host PC.
+ */
+ [DIV4_P] = DIV4(0, 0x2800, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(12, 0x00a0, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(20, 0x0004, CLK_ENABLE_ON_INIT),
+};
+
+#define MSTPCR0 0xffc80030
+#define MSTPCR1 0xffc80034
+#define MSTPCR2 0xffc10028
+
+enum { MSTP004, MSTP000, MSTP127, MSTP114, MSTP113, MSTP112,
+ MSTP111, MSTP110, MSTP103, MSTP102, MSTP220,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
+
+ /* MSTPCR1 */
+ [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 27, 0),
+ [MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0),
+ [MSTP113] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 13, 0),
+ [MSTP112] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 12, 0),
+ [MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0),
+ [MSTP110] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 10, 0),
+ [MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0),
+ [MSTP102] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 2, 0),
+
+ /* MSTPCR2 */
+ [MSTP220] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 20, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP004]),
+ CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic3", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic4", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic5", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic6", &mstp_clks[MSTP000]),
+ CLKDEV_CON_ID("riic7", &mstp_clks[MSTP000]),
+
+ CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP113]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP114]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP112]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP111]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP110]),
+
+ CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP103]),
+ CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP102]),
+ CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]),
+ CLKDEV_DEV_ID("rspi.2", &mstp_clks[MSTP127]),
+};
+
+int __init arch_clk_init(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
+
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
index 45889d412c8..7707e35aea4 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c
@@ -12,79 +12,74 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int bfc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
static int p0fc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 };
-static int p1fc_divisors[] = { 1, 1, 1, 16, 1, 1, 1, 1 };
static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= p0fc_divisors[(ctrl_inl(FRQCR) >> 4) & 0x07];
+ clk->rate *= p0fc_divisors[(__raw_readl(FRQCR) >> 4) & 0x07];
}
-static struct clk_ops sh7763_master_clk_ops = {
+static struct sh_clk_ops sh7763_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 4) & 0x07);
- clk->rate = clk->parent->rate / p0fc_divisors[idx];
+ int idx = ((__raw_readl(FRQCR) >> 4) & 0x07);
+ return clk->parent->rate / p0fc_divisors[idx];
}
-static struct clk_ops sh7763_module_clk_ops = {
+static struct sh_clk_ops sh7763_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 16) & 0x07);
- clk->rate = clk->parent->rate / bfc_divisors[idx];
+ int idx = ((__raw_readl(FRQCR) >> 16) & 0x07);
+ return clk->parent->rate / bfc_divisors[idx];
}
-static struct clk_ops sh7763_bus_clk_ops = {
+static struct sh_clk_ops sh7763_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
-{
- clk->rate = clk->parent->rate;
-}
-
-static struct clk_ops sh7763_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
+static struct sh_clk_ops sh7763_cpu_clk_ops = {
+ .recalc = followparent_recalc,
};
-static struct clk_ops *sh7763_clk_ops[] = {
+static struct sh_clk_ops *sh7763_clk_ops[] = {
&sh7763_master_clk_ops,
&sh7763_module_clk_ops,
&sh7763_bus_clk_ops,
&sh7763_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7763_clk_ops))
*ops = sh7763_clk_ops[idx];
}
-static void shyway_clk_recalc(struct clk *clk)
+static unsigned long shyway_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 20) & 0x07);
- clk->rate = clk->parent->rate / cfc_divisors[idx];
+ int idx = ((__raw_readl(FRQCR) >> 20) & 0x07);
+ return clk->parent->rate / cfc_divisors[idx];
}
-static struct clk_ops sh7763_shyway_clk_ops = {
+static struct sh_clk_ops sh7763_shyway_clk_ops = {
.recalc = shyway_clk_recalc,
};
static struct clk sh7763_shyway_clk = {
- .name = "shyway_clk",
- .flags = CLK_ALWAYS_ENABLED,
+ .flags = CLK_ENABLE_ON_INIT,
.ops = &sh7763_shyway_clk_ops,
};
@@ -96,31 +91,29 @@ static struct clk *sh7763_onchip_clocks[] = {
&sh7763_shyway_clk,
};
-static int __init sh7763_clk_init(void)
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("shyway_clk", &sh7763_shyway_clk),
+};
+
+int __init arch_clk_init(void)
{
- struct clk *clk = clk_get(NULL, "master_clk");
- int i;
+ struct clk *clk;
+ int i, ret = 0;
+ cpg_clk_init();
+
+ clk = clk_get(NULL, "master_clk");
for (i = 0; i < ARRAY_SIZE(sh7763_onchip_clocks); i++) {
struct clk *clkp = sh7763_onchip_clocks[i];
clkp->parent = clk;
- clk_register(clkp);
- clk_enable(clkp);
+ ret |= clk_register(clkp);
}
- /*
- * Now that we have the rest of the clocks registered, we need to
- * force the parent clock to propagate so that these clocks will
- * automatically figure out their rate. We cheat by handing the
- * parent clock its current rate and forcing child propagation.
- */
- clk_set_rate(clk, clk_get_rate(clk));
-
clk_put(clk);
- return 0;
-}
-
-arch_initcall(sh7763_clk_init);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
index 8e236062c72..5d36f334bb0 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c
@@ -21,51 +21,51 @@ static int pfc_divisors[] = { 1, 8, 1,10,12,16, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pfc_divisors[(ctrl_inl(FRQCR) >> 28) & 0x000f];
+ clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> 28) & 0x000f];
}
-static struct clk_ops sh7770_master_clk_ops = {
+static struct sh_clk_ops sh7770_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 28) & 0x000f);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ int idx = ((__raw_readl(FRQCR) >> 28) & 0x000f);
+ return clk->parent->rate / pfc_divisors[idx];
}
-static struct clk_ops sh7770_module_clk_ops = {
+static struct sh_clk_ops sh7770_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(FRQCR) & 0x000f);
- clk->rate = clk->parent->rate / bfc_divisors[idx];
+ int idx = (__raw_readl(FRQCR) & 0x000f);
+ return clk->parent->rate / bfc_divisors[idx];
}
-static struct clk_ops sh7770_bus_clk_ops = {
+static struct sh_clk_ops sh7770_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
+static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 24) & 0x000f);
- clk->rate = clk->parent->rate / ifc_divisors[idx];
+ int idx = ((__raw_readl(FRQCR) >> 24) & 0x000f);
+ return clk->parent->rate / ifc_divisors[idx];
}
-static struct clk_ops sh7770_cpu_clk_ops = {
+static struct sh_clk_ops sh7770_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
-static struct clk_ops *sh7770_clk_ops[] = {
+static struct sh_clk_ops *sh7770_clk_ops[] = {
&sh7770_master_clk_ops,
&sh7770_module_clk_ops,
&sh7770_bus_clk_ops,
&sh7770_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7770_clk_ops))
*ops = sh7770_clk_ops[idx];
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
index 01f3da619d3..793dae42a2f 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c
@@ -11,6 +11,8 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
@@ -22,69 +24,68 @@ static int cfc_divisors[] = { 1, 1, 4, 1, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
- clk->rate *= pfc_divisors[ctrl_inl(FRQCR) & 0x0003];
+ clk->rate *= pfc_divisors[__raw_readl(FRQCR) & 0x0003];
}
-static struct clk_ops sh7780_master_clk_ops = {
+static struct sh_clk_ops sh7780_master_clk_ops = {
.init = master_clk_init,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long module_clk_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(FRQCR) & 0x0003);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ int idx = (__raw_readl(FRQCR) & 0x0003);
+ return clk->parent->rate / pfc_divisors[idx];
}
-static struct clk_ops sh7780_module_clk_ops = {
+static struct sh_clk_ops sh7780_module_clk_ops = {
.recalc = module_clk_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
+static unsigned long bus_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 16) & 0x0007);
- clk->rate = clk->parent->rate / bfc_divisors[idx];
+ int idx = ((__raw_readl(FRQCR) >> 16) & 0x0007);
+ return clk->parent->rate / bfc_divisors[idx];
}
-static struct clk_ops sh7780_bus_clk_ops = {
+static struct sh_clk_ops sh7780_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
+static unsigned long cpu_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 24) & 0x0001);
- clk->rate = clk->parent->rate / ifc_divisors[idx];
+ int idx = ((__raw_readl(FRQCR) >> 24) & 0x0001);
+ return clk->parent->rate / ifc_divisors[idx];
}
-static struct clk_ops sh7780_cpu_clk_ops = {
+static struct sh_clk_ops sh7780_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
-static struct clk_ops *sh7780_clk_ops[] = {
+static struct sh_clk_ops *sh7780_clk_ops[] = {
&sh7780_master_clk_ops,
&sh7780_module_clk_ops,
&sh7780_bus_clk_ops,
&sh7780_cpu_clk_ops,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7780_clk_ops))
*ops = sh7780_clk_ops[idx];
}
-static void shyway_clk_recalc(struct clk *clk)
+static unsigned long shyway_clk_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> 20) & 0x0007);
- clk->rate = clk->parent->rate / cfc_divisors[idx];
+ int idx = ((__raw_readl(FRQCR) >> 20) & 0x0007);
+ return clk->parent->rate / cfc_divisors[idx];
}
-static struct clk_ops sh7780_shyway_clk_ops = {
+static struct sh_clk_ops sh7780_shyway_clk_ops = {
.recalc = shyway_clk_recalc,
};
static struct clk sh7780_shyway_clk = {
- .name = "shyway_clk",
- .flags = CLK_ALWAYS_ENABLED,
+ .flags = CLK_ENABLE_ON_INIT,
.ops = &sh7780_shyway_clk_ops,
};
@@ -96,31 +97,29 @@ static struct clk *sh7780_onchip_clocks[] = {
&sh7780_shyway_clk,
};
-static int __init sh7780_clk_init(void)
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("shyway_clk", &sh7780_shyway_clk),
+};
+
+int __init arch_clk_init(void)
{
- struct clk *clk = clk_get(NULL, "master_clk");
- int i;
+ struct clk *clk;
+ int i, ret = 0;
+ cpg_clk_init();
+
+ clk = clk_get(NULL, "master_clk");
for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) {
struct clk *clkp = sh7780_onchip_clocks[i];
clkp->parent = clk;
- clk_register(clkp);
- clk_enable(clkp);
+ ret |= clk_register(clkp);
}
- /*
- * Now that we have the rest of the clocks registered, we need to
- * force the parent clock to propagate so that these clocks will
- * automatically figure out their rate. We cheat by handing the
- * parent clock its current rate and forcing child propagation.
- */
- clk_set_rate(clk, clk_get_rate(clk));
-
clk_put(clk);
- return 0;
-}
-
-arch_initcall(sh7780_clk_init);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
index 805535aa505..17d0ea55a5a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c
@@ -3,7 +3,7 @@
*
* SH7785 support for the clock framework
*
- * Copyright (C) 2007 Paul Mundt
+ * Copyright (C) 2007 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -11,152 +11,167 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
-#include <asm/io.h>
-
-static int ifc_divisors[] = { 1, 2, 4, 6 };
-static int ufc_divisors[] = { 1, 1, 4, 6 };
-static int sfc_divisors[] = { 1, 1, 4, 6 };
-static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 18,
- 24, 32, 36, 48, 1, 1, 1, 1 };
-static int mfc_divisors[] = { 1, 1, 4, 6 };
-static int pfc_divisors[] = { 1, 1, 1, 1, 1, 1, 1, 18,
- 24, 32, 36, 48, 1, 1, 1, 1 };
-
-static void master_clk_init(struct clk *clk)
-{
- clk->rate *= 36;
-}
+#include <cpu/sh7785.h>
-static struct clk_ops sh7785_master_clk_ops = {
- .init = master_clk_init,
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 33333333,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long pll_recalc(struct clk *clk)
{
- int idx = (ctrl_inl(FRQMR1) & 0x000f);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
-}
+ int multiplier;
-static struct clk_ops sh7785_module_clk_ops = {
- .recalc = module_clk_recalc,
-};
+ multiplier = test_mode_pin(MODE_PIN4) ? 36 : 72;
-static void bus_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQMR1) >> 16) & 0x000f);
- clk->rate = clk->parent->rate / bfc_divisors[idx];
+ return clk->parent->rate * multiplier;
}
-static struct clk_ops sh7785_bus_clk_ops = {
- .recalc = bus_clk_recalc,
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
};
-static void cpu_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQMR1) >> 28) & 0x0003);
- clk->rate = clk->parent->rate / ifc_divisors[idx];
-}
-
-static struct clk_ops sh7785_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
};
-static struct clk_ops *sh7785_clk_ops[] = {
- &sh7785_master_clk_ops,
- &sh7785_module_clk_ops,
- &sh7785_bus_clk_ops,
- &sh7785_cpu_clk_ops,
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
-{
- if (idx < ARRAY_SIZE(sh7785_clk_ops))
- *ops = sh7785_clk_ops[idx];
-}
-
-static void shyway_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQMR1) >> 20) & 0x0003);
- clk->rate = clk->parent->rate / sfc_divisors[idx];
-}
+static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
+ 24, 32, 36, 48 };
-static struct clk_ops sh7785_shyway_clk_ops = {
- .recalc = shyway_clk_recalc,
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
};
-static struct clk sh7785_shyway_clk = {
- .name = "shyway_clk",
- .flags = CLK_ALWAYS_ENABLED,
- .ops = &sh7785_shyway_clk_ops,
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
};
-static void ddr_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQMR1) >> 12) & 0x0003);
- clk->rate = clk->parent->rate / mfc_divisors[idx];
-}
-
-static struct clk_ops sh7785_ddr_clk_ops = {
- .recalc = ddr_clk_recalc,
-};
-
-static struct clk sh7785_ddr_clk = {
- .name = "ddr_clk",
- .flags = CLK_ALWAYS_ENABLED,
- .ops = &sh7785_ddr_clk_ops,
-};
-
-static void ram_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQMR1) >> 24) & 0x0003);
- clk->rate = clk->parent->rate / ufc_divisors[idx];
-}
-
-static struct clk_ops sh7785_ram_clk_ops = {
- .recalc = ram_clk_recalc,
+enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA,
+ DIV4_DU, DIV4_P, DIV4_NR };
+
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_P] = DIV4(0, 0x0f80, 0),
+ [DIV4_DU] = DIV4(4, 0x0ff0, 0),
+ [DIV4_GA] = DIV4(8, 0x0030, 0),
+ [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_U] = DIV4(24, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
};
-static struct clk sh7785_ram_clk = {
- .name = "ram_clk",
- .flags = CLK_ALWAYS_ENABLED,
- .ops = &sh7785_ram_clk_ops,
+#define MSTPCR0 0xffc80030
+#define MSTPCR1 0xffc80034
+
+enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
+ MSTP021, MSTP020, MSTP017, MSTP016,
+ MSTP013, MSTP012, MSTP009, MSTP008, MSTP003, MSTP002,
+ MSTP119, MSTP117, MSTP105, MSTP104, MSTP100,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
+ [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
+ [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
+ [MSTP013] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 13, 0),
+ [MSTP012] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 12, 0),
+ [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
+ [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+
+ /* MSTPCR1 */
+ [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
+ [MSTP117] = SH_CLK_MSTP32(NULL, MSTPCR1, 17, 0),
+ [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
+ [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
+ [MSTP100] = SH_CLK_MSTP32(NULL, MSTPCR1, 0, 0),
};
-/*
- * Additional SH7785-specific on-chip clocks that aren't already part of the
- * clock framework
- */
-static struct clk *sh7785_onchip_clocks[] = {
- &sh7785_shyway_clk,
- &sh7785_ddr_clk,
- &sh7785_ram_clk,
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
+ CLKDEV_CON_ID("ga_clk", &div4_clks[DIV4_GA]),
+ CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
+
+ CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
+ CLKDEV_CON_ID("mmcif_fck", &mstp_clks[MSTP013]),
+ CLKDEV_CON_ID("flctl_fck", &mstp_clks[MSTP012]),
+
+ CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
+
+ CLKDEV_CON_ID("siof_fck", &mstp_clks[MSTP003]),
+ CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
+ CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP117]),
+ CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
+ CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
+ CLKDEV_CON_ID("gdta_fck", &mstp_clks[MSTP100]),
};
-static int __init sh7785_clk_init(void)
+int __init arch_clk_init(void)
{
- struct clk *clk = clk_get(NULL, "master_clk");
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sh7785_onchip_clocks); i++) {
- struct clk *clkp = sh7785_onchip_clocks[i];
-
- clkp->parent = clk;
- clk_register(clkp);
- clk_enable(clkp);
- }
+ int i, ret = 0;
- /*
- * Now that we have the rest of the clocks registered, we need to
- * force the parent clock to propagate so that these clocks will
- * automatically figure out their rate. We cheat by handing the
- * parent clock its current rate and forcing child propagation.
- */
- clk_set_rate(clk, clk_get_rate(clk));
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
- clk_put(clk);
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
- return 0;
+ return ret;
}
-arch_initcall(sh7785_clk_init);
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
new file mode 100644
index 00000000000..bec2a83f1ba
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c
@@ -0,0 +1,192 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/clock-sh7786.c
+ *
+ * SH7786 support for the clock framework
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 33333333,
+};
+
+static unsigned long pll_recalc(struct clk *clk)
+{
+ int multiplier;
+
+ /*
+ * Clock modes 0, 1, and 2 use an x64 multiplier against PLL1,
+ * while modes 3, 4, and 5 use an x32.
+ */
+ multiplier = (sh_mv.mv_mode_pins() & 0xf) < 3 ? 64 : 32;
+
+ return clk->parent->rate * multiplier;
+}
+
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
+};
+
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
+
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
+};
+
+static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
+ 24, 32, 36, 48 };
+
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
+};
+
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
+};
+
+enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR };
+
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
+
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_P] = DIV4(0, 0x0b40, 0),
+ [DIV4_DU] = DIV4(4, 0x0010, 0),
+ [DIV4_DDR] = DIV4(12, 0x0002, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(16, 0x0360, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(20, 0x0002, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(28, 0x0006, CLK_ENABLE_ON_INIT),
+};
+
+#define MSTPCR0 0xffc40030
+#define MSTPCR1 0xffc40034
+
+enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024,
+ MSTP023, MSTP022, MSTP021, MSTP020, MSTP017, MSTP016,
+ MSTP015, MSTP014, MSTP011, MSTP010, MSTP009, MSTP008,
+ MSTP005, MSTP004, MSTP002,
+ MSTP112, MSTP110, MSTP109, MSTP108,
+ MSTP105, MSTP104, MSTP103, MSTP102,
+ MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0),
+ [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0),
+ [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ [MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
+ [MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
+ [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
+ [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
+ [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
+ [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0),
+ [MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
+ [MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0),
+ [MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
+ [MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
+ [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [MSTP005] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
+ [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
+ [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+
+ /* MSTPCR1 */
+ [MSTP112] = SH_CLK_MSTP32(NULL, MSTPCR1, 12, 0),
+ [MSTP110] = SH_CLK_MSTP32(NULL, MSTPCR1, 10, 0),
+ [MSTP109] = SH_CLK_MSTP32(NULL, MSTPCR1, 9, 0),
+ [MSTP108] = SH_CLK_MSTP32(NULL, MSTPCR1, 8, 0),
+ [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
+ [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
+ [MSTP103] = SH_CLK_MSTP32(NULL, MSTPCR1, 3, 0),
+ [MSTP102] = SH_CLK_MSTP32(NULL, MSTPCR1, 2, 0),
+};
+
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]),
+ CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.5", &mstp_clks[MSTP029]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.4", &mstp_clks[MSTP028]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
+
+ CLKDEV_CON_ID("ssi3_fck", &mstp_clks[MSTP023]),
+ CLKDEV_CON_ID("ssi2_fck", &mstp_clks[MSTP022]),
+ CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]),
+ CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]),
+ CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]),
+ CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]),
+ CLKDEV_CON_ID("i2c1_fck", &mstp_clks[MSTP015]),
+ CLKDEV_CON_ID("i2c0_fck", &mstp_clks[MSTP014]),
+
+ CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.2", &mstp_clks[MSTP010]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.3", &mstp_clks[MSTP011]),
+
+ CLKDEV_CON_ID("sdif1_fck", &mstp_clks[MSTP005]),
+ CLKDEV_CON_ID("sdif0_fck", &mstp_clks[MSTP004]),
+ CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP112]),
+ CLKDEV_CON_ID("pcie2_fck", &mstp_clks[MSTP110]),
+ CLKDEV_CON_ID("pcie1_fck", &mstp_clks[MSTP109]),
+ CLKDEV_CON_ID("pcie0_fck", &mstp_clks[MSTP108]),
+ CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
+ CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
+ CLKDEV_CON_ID("du_fck", &mstp_clks[MSTP103]),
+ CLKDEV_CON_ID("ether_fck", &mstp_clks[MSTP102]),
+};
+
+int __init arch_clk_init(void)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
+
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
+
+ return ret;
+}
diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
index c630b29e06a..9a49a44f6f9 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2006-2007 Renesas Technology Corp.
* Copyright (C) 2006-2007 Renesas Solutions Corp.
- * Copyright (C) 2006-2007 Paul Mundt
+ * Copyright (C) 2006-2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -13,123 +13,139 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
-#include <asm/io.h>
-
-static int ifc_divisors[] = { 1, 2, 4 ,6 };
-static int bfc_divisors[] = { 1, 1, 1, 1, 1, 12, 16, 18, 24, 32, 36, 48 };
-static int pfc_divisors[] = { 1, 1, 1, 1, 1, 1, 1, 18, 24, 32, 36, 48 };
-static int cfc_divisors[] = { 1, 1, 4, 6 };
-
-#define IFC_POS 28
-#define IFC_MSK 0x0003
-#define BFC_MSK 0x000f
-#define PFC_MSK 0x000f
-#define CFC_MSK 0x0003
-#define BFC_POS 16
-#define PFC_POS 0
-#define CFC_POS 20
-
-static void master_clk_init(struct clk *clk)
-{
- clk->rate *= pfc_divisors[(ctrl_inl(FRQCR) >> PFC_POS) & PFC_MSK];
-}
-static struct clk_ops shx3_master_clk_ops = {
- .init = master_clk_init,
+/*
+ * Default rate for the root input clock, reset this with clk_set_rate()
+ * from the platform code.
+ */
+static struct clk extal_clk = {
+ .rate = 16666666,
};
-static void module_clk_recalc(struct clk *clk)
+static unsigned long pll_recalc(struct clk *clk)
{
- int idx = ((ctrl_inl(FRQCR) >> PFC_POS) & PFC_MSK);
- clk->rate = clk->parent->rate / pfc_divisors[idx];
+ /* PLL1 has a fixed x72 multiplier. */
+ return clk->parent->rate * 72;
}
-static struct clk_ops shx3_module_clk_ops = {
- .recalc = module_clk_recalc,
+static struct sh_clk_ops pll_clk_ops = {
+ .recalc = pll_recalc,
};
-static void bus_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQCR) >> BFC_POS) & BFC_MSK);
- clk->rate = clk->parent->rate / bfc_divisors[idx];
-}
+static struct clk pll_clk = {
+ .ops = &pll_clk_ops,
+ .parent = &extal_clk,
+ .flags = CLK_ENABLE_ON_INIT,
+};
-static struct clk_ops shx3_bus_clk_ops = {
- .recalc = bus_clk_recalc,
+static struct clk *clks[] = {
+ &extal_clk,
+ &pll_clk,
};
-static void cpu_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQCR) >> IFC_POS) & IFC_MSK);
- clk->rate = clk->parent->rate / ifc_divisors[idx];
-}
+static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18,
+ 24, 32, 36, 48 };
-static struct clk_ops shx3_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
+static struct clk_div_mult_table div4_div_mult_table = {
+ .divisors = div2,
+ .nr_divisors = ARRAY_SIZE(div2),
};
-static struct clk_ops *shx3_clk_ops[] = {
- &shx3_master_clk_ops,
- &shx3_module_clk_ops,
- &shx3_bus_clk_ops,
- &shx3_cpu_clk_ops,
+static struct clk_div4_table div4_table = {
+ .div_mult_table = &div4_div_mult_table,
};
-void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
-{
- if (idx < ARRAY_SIZE(shx3_clk_ops))
- *ops = shx3_clk_ops[idx];
-}
+enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_SHA, DIV4_P, DIV4_NR };
-static void shyway_clk_recalc(struct clk *clk)
-{
- int idx = ((ctrl_inl(FRQCR) >> CFC_POS) & CFC_MSK);
- clk->rate = clk->parent->rate / cfc_divisors[idx];
-}
+#define DIV4(_bit, _mask, _flags) \
+ SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags)
-static struct clk_ops shx3_shyway_clk_ops = {
- .recalc = shyway_clk_recalc,
+struct clk div4_clks[DIV4_NR] = {
+ [DIV4_P] = DIV4(0, 0x0f80, 0),
+ [DIV4_SHA] = DIV4(4, 0x0ff0, 0),
+ [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT),
+ [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT),
+ [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT),
};
-static struct clk shx3_shyway_clk = {
- .name = "shyway_clk",
- .flags = CLK_ALWAYS_ENABLED,
- .ops = &shx3_shyway_clk_ops,
+#define MSTPCR0 0xffc00030
+#define MSTPCR1 0xffc00034
+
+enum { MSTP027, MSTP026, MSTP025, MSTP024,
+ MSTP009, MSTP008, MSTP003, MSTP002,
+ MSTP001, MSTP000, MSTP119, MSTP105,
+ MSTP104, MSTP_NR };
+
+static struct clk mstp_clks[MSTP_NR] = {
+ /* MSTPCR0 */
+ [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0),
+ [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0),
+ [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0),
+ [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0),
+ [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0),
+ [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0),
+ [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
+ [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
+ [MSTP001] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
+ [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0),
+
+ /* MSTPCR1 */
+ [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0),
+ [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0),
+ [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0),
};
-/*
- * Additional SHx3-specific on-chip clocks that aren't already part of the
- * clock framework
- */
-static struct clk *shx3_onchip_clocks[] = {
- &shx3_shyway_clk,
+static struct clk_lookup lookups[] = {
+ /* main clocks */
+ CLKDEV_CON_ID("extal", &extal_clk),
+ CLKDEV_CON_ID("pll_clk", &pll_clk),
+
+ /* DIV4 clocks */
+ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
+ CLKDEV_CON_ID("shywaya_clk", &div4_clks[DIV4_SHA]),
+ CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]),
+ CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
+ CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
+ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
+
+ /* MSTP32 clocks */
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP027]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP026]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP025]),
+ CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP024]),
+
+ CLKDEV_CON_ID("h8ex_fck", &mstp_clks[MSTP003]),
+ CLKDEV_CON_ID("csm_fck", &mstp_clks[MSTP002]),
+ CLKDEV_CON_ID("fe1_fck", &mstp_clks[MSTP001]),
+ CLKDEV_CON_ID("fe0_fck", &mstp_clks[MSTP000]),
+
+ CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]),
+ CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]),
+
+ CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]),
+ CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]),
+ CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]),
};
-static int __init shx3_clk_init(void)
+int __init arch_clk_init(void)
{
- struct clk *clk = clk_get(NULL, "master_clk");
- int i;
-
- for (i = 0; i < ARRAY_SIZE(shx3_onchip_clocks); i++) {
- struct clk *clkp = shx3_onchip_clocks[i];
-
- clkp->parent = clk;
- clk_register(clkp);
- clk_enable(clkp);
- }
+ int i, ret = 0;
- /*
- * Now that we have the rest of the clocks registered, we need to
- * force the parent clock to propagate so that these clocks will
- * automatically figure out their rate. We cheat by handing the
- * parent clock its current rate and forcing child propagation.
- */
- clk_set_rate(clk, clk_get_rate(clk));
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ ret |= clk_register(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(lookups); i++)
+ clkdev_add(&lookups[i]);
- clk_put(clk);
+ if (!ret)
+ ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks),
+ &div4_table);
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
- return 0;
+ return ret;
}
-arch_initcall(shx3_clk_init);
diff --git a/arch/sh/kernel/cpu/sh4a/intc-shx3.c b/arch/sh/kernel/cpu/sh4a/intc-shx3.c
new file mode 100644
index 00000000000..78c971486b4
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/intc-shx3.c
@@ -0,0 +1,34 @@
+/*
+ * Shared support for SH-X3 interrupt controllers.
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/init.h>
+
+#define INTACK 0xfe4100b8
+#define INTACKCLR 0xfe4100bc
+#define INTC_USERIMASK 0xfe411000
+
+#ifdef CONFIG_INTC_BALANCING
+unsigned int irq_lookup(unsigned int irq)
+{
+ return __raw_readl(INTACK) & 1 ? irq : NO_IRQ_IGNORE;
+}
+
+void irq_finish(unsigned int irq)
+{
+ __raw_writel(irq2evt(irq), INTACKCLR);
+}
+#endif
+
+static int __init shx3_irq_setup(void)
+{
+ return register_intc_userimask(INTC_USERIMASK);
+}
+arch_initcall(shx3_irq_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c
new file mode 100644
index 00000000000..84a2c396cee
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/perf_event.c
@@ -0,0 +1,302 @@
+/*
+ * Performance events support for SH-4A performance counters
+ *
+ * Copyright (C) 2009, 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <asm/processor.h>
+
+#define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
+#define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
+
+#define CCBR_CIT_MASK (0x7ff << 6)
+#define CCBR_DUC (1 << 3)
+#define CCBR_CMDS (1 << 1)
+#define CCBR_PPCE (1 << 0)
+
+#ifdef CONFIG_CPU_SHX3
+/*
+ * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
+ * and PMCTR locations remains tentatively constant. This change remains
+ * wholly undocumented, and was simply found through trial and error.
+ *
+ * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
+ * it's unclear when this ceased to be the case. For now we always use
+ * the new location (if future parts keep up with this trend then
+ * scanning for them at runtime also remains a viable option.)
+ *
+ * The gap in the register space also suggests that there are other
+ * undocumented counters, so this will need to be revisited at a later
+ * point in time.
+ */
+#define PPC_PMCAT 0xfc100240
+#else
+#define PPC_PMCAT 0xfc100080
+#endif
+
+#define PMCAT_OVF3 (1 << 27)
+#define PMCAT_CNN3 (1 << 26)
+#define PMCAT_CLR3 (1 << 25)
+#define PMCAT_OVF2 (1 << 19)
+#define PMCAT_CLR2 (1 << 17)
+#define PMCAT_OVF1 (1 << 11)
+#define PMCAT_CNN1 (1 << 10)
+#define PMCAT_CLR1 (1 << 9)
+#define PMCAT_OVF0 (1 << 3)
+#define PMCAT_CLR0 (1 << 1)
+
+static struct sh_pmu sh4a_pmu;
+
+/*
+ * Supported raw event codes:
+ *
+ * Event Code Description
+ * ---------- -----------
+ *
+ * 0x0000 number of elapsed cycles
+ * 0x0200 number of elapsed cycles in privileged mode
+ * 0x0280 number of elapsed cycles while SR.BL is asserted
+ * 0x0202 instruction execution
+ * 0x0203 instruction execution in parallel
+ * 0x0204 number of unconditional branches
+ * 0x0208 number of exceptions
+ * 0x0209 number of interrupts
+ * 0x0220 UTLB miss caused by instruction fetch
+ * 0x0222 UTLB miss caused by operand access
+ * 0x02a0 number of ITLB misses
+ * 0x0028 number of accesses to instruction memories
+ * 0x0029 number of accesses to instruction cache
+ * 0x002a instruction cache miss
+ * 0x022e number of access to instruction X/Y memory
+ * 0x0030 number of reads to operand memories
+ * 0x0038 number of writes to operand memories
+ * 0x0031 number of operand cache read accesses
+ * 0x0039 number of operand cache write accesses
+ * 0x0032 operand cache read miss
+ * 0x003a operand cache write miss
+ * 0x0236 number of reads to operand X/Y memory
+ * 0x023e number of writes to operand X/Y memory
+ * 0x0237 number of reads to operand U memory
+ * 0x023f number of writes to operand U memory
+ * 0x0337 number of U memory read buffer misses
+ * 0x02b4 number of wait cycles due to operand read access
+ * 0x02bc number of wait cycles due to operand write access
+ * 0x0033 number of wait cycles due to operand cache read miss
+ * 0x003b number of wait cycles due to operand cache write miss
+ */
+
+/*
+ * Special reserved bits used by hardware emulators, read values will
+ * vary, but writes must always be 0.
+ */
+#define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
+
+static const int sh4a_general_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x0000,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,
+ [PERF_COUNT_HW_BRANCH_MISSES] = -1,
+ [PERF_COUNT_HW_BUS_CYCLES] = -1,
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+static const int sh4a_cache_events
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0031,
+ [ C(RESULT_MISS) ] = 0x0032,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0039,
+ [ C(RESULT_MISS) ] = 0x003a,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0029,
+ [ C(RESULT_MISS) ] = 0x002a,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0030,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0038,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0222,
+ [ C(RESULT_MISS) ] = 0x0220,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x02a0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+static int sh4a_event_map(int event)
+{
+ return sh4a_general_events[event];
+}
+
+static u64 sh4a_pmu_read(int idx)
+{
+ return __raw_readl(PPC_PMCTR(idx));
+}
+
+static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(PPC_CCBR(idx));
+ tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
+ __raw_writel(tmp, PPC_CCBR(idx));
+}
+
+static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(PPC_PMCAT);
+ tmp &= ~PMCAT_EMU_CLR_MASK;
+ tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
+ __raw_writel(tmp, PPC_PMCAT);
+
+ tmp = __raw_readl(PPC_CCBR(idx));
+ tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
+ __raw_writel(tmp, PPC_CCBR(idx));
+
+ __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
+}
+
+static void sh4a_pmu_disable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh4a_pmu.num_events; i++)
+ __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
+}
+
+static void sh4a_pmu_enable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh4a_pmu.num_events; i++)
+ __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
+}
+
+static struct sh_pmu sh4a_pmu = {
+ .name = "sh4a",
+ .num_events = 2,
+ .event_map = sh4a_event_map,
+ .max_events = ARRAY_SIZE(sh4a_general_events),
+ .raw_event_mask = 0x3ff,
+ .cache_events = &sh4a_cache_events,
+ .read = sh4a_pmu_read,
+ .disable = sh4a_pmu_disable,
+ .enable = sh4a_pmu_enable,
+ .disable_all = sh4a_pmu_disable_all,
+ .enable_all = sh4a_pmu_enable_all,
+};
+
+static int __init sh4a_pmu_init(void)
+{
+ /*
+ * Make sure this CPU actually has perf counters.
+ */
+ if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
+ pr_notice("HW perf events unsupported, software events only.\n");
+ return -ENODEV;
+ }
+
+ return register_sh_pmu(&sh4a_pmu);
+}
+early_initcall(sh4a_pmu_init);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
new file mode 100644
index 00000000000..271bbc86492
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c
@@ -0,0 +1,20 @@
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7722_pfc_resources[] = {
+ [0] = {
+ .start = 0xa4050100,
+ .end = 0xa405018f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7722", sh7722_pfc_resources,
+ ARRAY_SIZE(sh7722_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c
new file mode 100644
index 00000000000..99c637d5bf7
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c
@@ -0,0 +1,30 @@
+/*
+ * SH7723 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7723_pfc_resources[] = {
+ [0] = {
+ .start = 0xa4050100,
+ .end = 0xa405016f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7723", sh7723_pfc_resources,
+ ARRAY_SIZE(sh7723_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c
new file mode 100644
index 00000000000..63be4749e34
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c
@@ -0,0 +1,35 @@
+/*
+ * SH7724 Pinmux
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on SH7723 Pinmux
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7724_pfc_resources[] = {
+ [0] = {
+ .start = 0xa4050100,
+ .end = 0xa405016f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7724", sh7724_pfc_resources,
+ ARRAY_SIZE(sh7724_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c
new file mode 100644
index 00000000000..ea2db632a76
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c
@@ -0,0 +1,35 @@
+/*
+ * SH7734 processor support - PFC hardware block
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7734_pfc_resources[] = {
+ [0] = { /* PFC */
+ .start = 0xFFFC0000,
+ .end = 0xFFFC011C,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = { /* GPIO */
+ .start = 0xFFC40000,
+ .end = 0xFFC4502B,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7734", sh7734_pfc_resources,
+ ARRAY_SIZE(sh7734_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
new file mode 100644
index 00000000000..567745d4422
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c
@@ -0,0 +1,35 @@
+/*
+ * SH7757 (B0 step) Pinmux
+ *
+ * Copyright (C) 2009-2010 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ *
+ * Based on SH7723 Pinmux
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7757_pfc_resources[] = {
+ [0] = {
+ .start = 0xffec0000,
+ .end = 0xffec008f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7757", sh7757_pfc_resources,
+ ARRAY_SIZE(sh7757_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c
new file mode 100644
index 00000000000..e336ab8b512
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c
@@ -0,0 +1,30 @@
+/*
+ * SH7785 Pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7785_pfc_resources[] = {
+ [0] = {
+ .start = 0xffe70000,
+ .end = 0xffe7008f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7785", sh7785_pfc_resources,
+ ARRAY_SIZE(sh7785_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c
new file mode 100644
index 00000000000..9a459556a2f
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c
@@ -0,0 +1,35 @@
+/*
+ * SH7786 Pinmux
+ *
+ * Copyright (C) 2008, 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on SH7785 pinmux
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource sh7786_pfc_resources[] = {
+ [0] = {
+ .start = 0xffcc0000,
+ .end = 0xffcc008f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-sh7786", sh7786_pfc_resources,
+ ARRAY_SIZE(sh7786_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c
new file mode 100644
index 00000000000..444bf25c60f
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c
@@ -0,0 +1,29 @@
+/*
+ * SH-X3 prototype CPU pinmux
+ *
+ * Copyright (C) 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <cpu/pfc.h>
+
+static struct resource shx3_pfc_resources[] = {
+ [0] = {
+ .start = 0xffc70000,
+ .end = 0xffc7001f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static int __init plat_pinmux_setup(void)
+{
+ return sh_pfc_register("pfc-shx3", shx3_pfc_resources,
+ ARRAY_SIZE(shx3_pfc_resources));
+}
+arch_initcall(plat_pinmux_setup);
diff --git a/arch/sh/kernel/cpu/sh4a/serial-sh7722.c b/arch/sh/kernel/cpu/sh4a/serial-sh7722.c
new file mode 100644
index 00000000000..59bc3a72702
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/serial-sh7722.c
@@ -0,0 +1,23 @@
+#include <linux/serial_sci.h>
+#include <linux/serial_core.h>
+#include <linux/io.h>
+
+#define PSCR 0xA405011E
+
+static void sh7722_sci_init_pins(struct uart_port *port, unsigned int cflag)
+{
+ unsigned short data;
+
+ if (port->mapbase == 0xffe00000) {
+ data = __raw_readw(PSCR);
+ data &= ~0x03cf;
+ if (!(cflag & CRTSCTS))
+ data |= 0x0340;
+
+ __raw_writew(data, PSCR);
+ }
+}
+
+struct plat_sci_port_ops sh7722_sci_port_ops = {
+ .init_pins = sh7722_sci_init_pins,
+};
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index 6d4f50cd4aa..ceb3dedad98 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -11,37 +11,440 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/uio_driver.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <asm/clock.h>
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 81, 83, 82 },
- }, {
- .flags = 0,
- }
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+ .type = PORT_SCIF,
};
-static struct platform_device sci_device = {
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffe00000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc00)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xffe10000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc20)),
+};
+
+static struct platform_device scif1_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif1_platform_data,
},
};
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xffe20000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc40)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xffe30000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc60)),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct resource iic0_resources[] = {
+ [0] = {
+ .name = "IIC0",
+ .start = 0x04470000,
+ .end = 0x04470017,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0xe00),
+ .end = evt2irq(0xe60),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic0_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic0_resources),
+ .resource = iic0_resources,
+};
+
+static struct resource iic1_resources[] = {
+ [0] = {
+ .name = "IIC1",
+ .start = 0x04750000,
+ .end = 0x04750017,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0x780),
+ .end = evt2irq(0x7e0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic1_device = {
+ .name = "i2c-sh_mobile",
+ .id = 1, /* "i2c1" clock */
+ .num_resources = ARRAY_SIZE(iic1_resources),
+ .resource = iic1_resources,
+};
+
+static struct uio_info vpu_platform_data = {
+ .name = "VPU4",
+ .version = "0",
+ .irq = evt2irq(0x980),
+};
+
+static struct resource vpu_resources[] = {
+ [0] = {
+ .name = "VPU",
+ .start = 0xfe900000,
+ .end = 0xfe9022eb,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device vpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpu_platform_data,
+ },
+ .resource = vpu_resources,
+ .num_resources = ARRAY_SIZE(vpu_resources),
+};
+
+static struct uio_info veu_platform_data = {
+ .name = "VEU",
+ .version = "0",
+ .irq = evt2irq(0x8c0),
+};
+
+static struct resource veu_resources[] = {
+ [0] = {
+ .name = "VEU",
+ .start = 0xfe920000,
+ .end = 0xfe9200b7,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 1,
+ .dev = {
+ .platform_data = &veu_platform_data,
+ },
+ .resource = veu_resources,
+ .num_resources = ARRAY_SIZE(veu_resources),
+};
+
+static struct uio_info jpu_platform_data = {
+ .name = "JPU",
+ .version = "0",
+ .irq = evt2irq(0x560),
+};
+
+static struct resource jpu_resources[] = {
+ [0] = {
+ .name = "JPU",
+ .start = 0xfea00000,
+ .end = 0xfea102d3,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device jpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 2,
+ .dev = {
+ .platform_data = &jpu_platform_data,
+ },
+ .resource = jpu_resources,
+ .num_resources = ARRAY_SIZE(jpu_resources),
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 0x20,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0x044a0000, 0x70),
+ DEFINE_RES_IRQ(evt2irq(0xf00)),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-32",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
static struct platform_device *sh7343_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt_device,
+ &tmu0_device,
+ &iic0_device,
+ &iic1_device,
+ &vpu_device,
+ &veu_device,
+ &jpu_device,
};
static int __init sh7343_devices_setup(void)
{
+ platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20);
+ platform_resource_setup_memory(&veu_device, "veu", 2 << 20);
+ platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
+
return platform_add_devices(sh7343_devices,
ARRAY_SIZE(sh7343_devices));
}
-__initcall(sh7343_devices_setup);
+arch_initcall(sh7343_devices_setup);
+
+static struct platform_device *sh7343_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &cmt_device,
+ &tmu0_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7343_early_devices,
+ ARRAY_SIZE(sh7343_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+ ENABLED,
+ DISABLED,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ DMAC0, DMAC1, DMAC2, DMAC3,
+ VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU,
+ MFI, VPU, TPU, Z3D4, USBI0, USBI1,
+ MMC_ERR, MMC_TRAN, MMC_FSTAT, MMC_FRDY,
+ DMAC4, DMAC5, DMAC_DADERR,
+ KEYSC,
+ SCIF, SCIF1, SCIF2, SCIF3,
+ SIOF0, SIOF1, SIO,
+ FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
+ I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
+ I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
+ SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI,
+ IRDA, SDHI, CMT, TSIF, SIU,
+ TMU0, TMU1, TMU2,
+ JPU, LCDC,
+
+ /* interrupt groups */
+
+ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, USB,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+ INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
+ INTC_VECT(I2C1_ALI, 0x780), INTC_VECT(I2C1_TACKI, 0x7a0),
+ INTC_VECT(I2C1_WAITI, 0x7c0), INTC_VECT(I2C1_DTEI, 0x7e0),
+ INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820),
+ INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860),
+ INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0),
+ INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0),
+ INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980),
+ INTC_VECT(TPU, 0x9a0), INTC_VECT(Z3D4, 0x9e0),
+ INTC_VECT(USBI0, 0xa20), INTC_VECT(USBI1, 0xa40),
+ INTC_VECT(MMC_ERR, 0xb00), INTC_VECT(MMC_TRAN, 0xb20),
+ INTC_VECT(MMC_FSTAT, 0xb40), INTC_VECT(MMC_FRDY, 0xb60),
+ INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0),
+ INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(KEYSC, 0xbe0),
+ INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIF1, 0xc20),
+ INTC_VECT(SCIF2, 0xc40), INTC_VECT(SCIF3, 0xc60),
+ INTC_VECT(SIOF0, 0xc80), INTC_VECT(SIOF1, 0xca0),
+ INTC_VECT(SIO, 0xd00),
+ INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0),
+ INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
+ INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20),
+ INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60),
+ INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+ INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
+ INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
+ INTC_VECT(SIU, 0xf80),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440),
+ INTC_VECT(JPU, 0x560), INTC_VECT(LCDC, 0x580),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3),
+ INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU),
+ INTC_GROUP(MMC, MMC_FRDY, MMC_FSTAT, MMC_TRAN, MMC_ERR),
+ INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR),
+ INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
+ FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
+ INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
+ INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
+ INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI),
+ INTC_GROUP(USB, USBI0, USBI1),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
+ { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } },
+ { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
+ { 0, 0, 0, VPU, 0, 0, 0, MFI } },
+ { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
+ { SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, 0, 0, 0, IRDA } },
+ { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
+ { 0, TMU2, TMU1, TMU0, JPU, 0, 0, LCDC } },
+ { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
+ { KEYSC, DMAC_DADERR, DMAC5, DMAC4, SCIF3, SCIF2, SCIF1, SCIF } },
+ { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
+ { 0, 0, 0, SIO, Z3D4, 0, SIOF1, SIOF0 } },
+ { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
+ { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
+ FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
+ { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
+ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
+ { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
+ { 0, 0, 0, CMT, 0, USBI1, USBI0 } },
+ { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
+ { MMC_FRDY, MMC_FSTAT, MMC_TRAN, MMC_ERR } },
+ { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
+ { I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI, TPU, 0, 0, TSIF } },
+ { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } },
+ { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, SIM } },
+ { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } },
+ { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC45, USB, CMT } },
+ { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIF1, SCIF2, SCIF3 } },
+ { 0xa408001c, 0, 16, 4, /* IPRH */ { SIOF0, SIOF1, FLCTL, I2C0 } },
+ { 0xa4080020, 0, 16, 4, /* IPRI */ { SIO, 0, TSIF, I2C1 } },
+ { 0xa4080024, 0, 16, 4, /* IPRJ */ { Z3D4, 0, SIU } },
+ { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } },
+ { 0xa408002c, 0, 16, 4, /* IPRL */ { 0, 0, TPU } },
+ { 0xa4140010, 0, 32, 4, /* INTPRI00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa414001c, 16, 2, /* ICR1 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4140024, 0, 8, /* INTREQ00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7343",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
void __init plat_irq_setup(void)
{
+ register_intc_controller(&intc_desc);
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index f26b5cdad0d..f75f6734313 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -13,39 +13,246 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/uio_driver.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <linux/usb/r8a66597.h>
+#include <asm/clock.h>
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .port_reg = 0xa405013e,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
};
-static struct platform_device sci_device = {
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffe00000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc00)),
+};
+
+static struct platform_device scif0_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct resource iic_resources[] = {
+ [0] = {
+ .name = "IIC",
+ .start = 0x04470000,
+ .end = 0x04470017,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0xe00),
+ .end = evt2irq(0xe60),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic_resources),
+ .resource = iic_resources,
+};
+
+static struct r8a66597_platdata r8a66597_data = {
+ .on_chip = 1,
+};
+
+static struct resource usb_host_resources[] = {
+ [0] = {
+ .start = 0xa4d80000,
+ .end = 0xa4d800ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0xa20),
+ .end = evt2irq(0xa20),
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+ },
+};
+
+static struct platform_device usb_host_device = {
+ .name = "r8a66597_hcd",
+ .id = -1,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &r8a66597_data,
+ },
+ .num_resources = ARRAY_SIZE(usb_host_resources),
+ .resource = usb_host_resources,
+};
+
+static struct uio_info vpu_platform_data = {
+ .name = "VPU5",
+ .version = "0",
+ .irq = evt2irq(0x980),
+};
+
+static struct resource vpu_resources[] = {
+ [0] = {
+ .name = "VPU",
+ .start = 0xfe900000,
+ .end = 0xfe902807,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device vpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpu_platform_data,
+ },
+ .resource = vpu_resources,
+ .num_resources = ARRAY_SIZE(vpu_resources),
+};
+
+static struct uio_info veu0_platform_data = {
+ .name = "VEU",
+ .version = "0",
+ .irq = evt2irq(0x8c0),
+};
+
+static struct resource veu0_resources[] = {
+ [0] = {
+ .name = "VEU(1)",
+ .start = 0xfe920000,
+ .end = 0xfe9200b7,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 1,
+ .dev = {
+ .platform_data = &veu0_platform_data,
},
+ .resource = veu0_resources,
+ .num_resources = ARRAY_SIZE(veu0_resources),
+};
+
+static struct uio_info veu1_platform_data = {
+ .name = "VEU",
+ .version = "0",
+ .irq = evt2irq(0x560),
+};
+
+static struct resource veu1_resources[] = {
+ [0] = {
+ .name = "VEU(2)",
+ .start = 0xfe924000,
+ .end = 0xfe9240b7,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 2,
+ .dev = {
+ .platform_data = &veu1_platform_data,
+ },
+ .resource = veu1_resources,
+ .num_resources = ARRAY_SIZE(veu1_resources),
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 0x20,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0x044a0000, 0x70),
+ DEFINE_RES_IRQ(evt2irq(0xf00)),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-32",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh7366_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &cmt_device,
+ &tmu0_device,
+ &iic_device,
+ &usb_host_device,
+ &vpu_device,
+ &veu0_device,
+ &veu1_device,
};
static int __init sh7366_devices_setup(void)
{
+ platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
+ platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
+ platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
+
return platform_add_devices(sh7366_devices,
ARRAY_SIZE(sh7366_devices));
}
-__initcall(sh7366_devices_setup);
+arch_initcall(sh7366_devices_setup);
+
+static struct platform_device *sh7366_early_devices[] __initdata = {
+ &scif0_device,
+ &cmt_device,
+ &tmu0_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7366_early_devices,
+ ARRAY_SIZE(sh7366_early_devices));
+}
enum {
UNUSED=0,
+ ENABLED,
+ DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -59,14 +266,13 @@ enum {
DENC, MSIOF,
FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
- SDHI0, SDHI1, SDHI2, SDHI3,
- CMT, TSIF, SIU,
+ SDHI, CMT, TSIF, SIU,
TMU0, TMU1, TMU2,
VEU2, LCDC,
/* interrupt groups */
- DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI,
+ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C,
};
static struct intc_vect vectors[] __initdata = {
@@ -91,13 +297,13 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
- INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
- INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+ INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+ INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
INTC_VECT(SIU, 0xf80),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440),
- INTC_VECT(VEU2, 0x580), INTC_VECT(LCDC, 0x580),
+ INTC_VECT(VEU2, 0x560), INTC_VECT(LCDC, 0x580),
};
static struct intc_group groups[] __initdata = {
@@ -108,7 +314,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
- INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
};
static struct intc_mask_reg mask_registers[] __initdata = {
@@ -130,7 +335,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
- { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } },
+ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB, } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -163,8 +368,18 @@ static struct intc_sense_reg sense_registers[] __initdata = {
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7366", vectors, groups,
- mask_registers, prio_registers, sense_registers);
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4140024, 0, 8, /* INTREQ00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7366",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
void __init plat_irq_setup(void)
{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index b98b4bc93ec..57f83a92a50 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -1,91 +1,531 @@
/*
* SH7722 Setup
*
- * Copyright (C) 2006 - 2007 Paul Mundt
+ * Copyright (C) 2006 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-#include <linux/platform_device.h>
#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
-#include <linux/mm.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <linux/uio_driver.h>
+#include <linux/usb/m66592.h>
+
+#include <asm/clock.h>
#include <asm/mmzone.h>
+#include <asm/siu.h>
+
+#include <cpu/dma-register.h>
+#include <cpu/sh7722.h>
+#include <cpu/serial.h>
+
+static const struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SCIF0_TX,
+ .addr = 0xffe0000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF0_RX,
+ .addr = 0xffe00014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF1_TX,
+ .addr = 0xffe1000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x25,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF1_RX,
+ .addr = 0xffe10014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x26,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF2_TX,
+ .addr = 0xffe2000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF2_RX,
+ .addr = 0xffe20014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ }, {
+ .slave_id = SHDMA_SLAVE_SIUA_TX,
+ .addr = 0xa454c098,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xb1,
+ }, {
+ .slave_id = SHDMA_SLAVE_SIUA_RX,
+ .addr = 0xa454c090,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xb2,
+ }, {
+ .slave_id = SHDMA_SLAVE_SIUB_TX,
+ .addr = 0xa454c09c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xb5,
+ }, {
+ .slave_id = SHDMA_SLAVE_SIUB_RX,
+ .addr = 0xa454c094,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xb6,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_TX,
+ .addr = 0x04ce0030,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc1,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_RX,
+ .addr = 0x04ce0030,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc2,
+ },
+};
+
+static const struct sh_dmae_channel sh7722_dmae_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma_platform_data = {
+ .slave = sh7722_dmae_slaves,
+ .slave_num = ARRAY_SIZE(sh7722_dmae_slaves),
+ .channel = sh7722_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7722_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct resource sh7722_dmae_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xfe008020,
+ .end = 0xfe00808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xfe009000,
+ .end = 0xfe00900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = evt2irq(0xbc0),
+ .end = evt2irq(0xbc0),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 0-3 */
+ .start = evt2irq(0x800),
+ .end = evt2irq(0x860),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 4-5 */
+ .start = evt2irq(0xb80),
+ .end = evt2irq(0xba0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device dma_device = {
+ .name = "sh-dma-engine",
+ .id = -1,
+ .resource = sh7722_dmae_resources,
+ .num_resources = ARRAY_SIZE(sh7722_dmae_resources),
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .ops = &sh7722_sci_port_ops,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffe00000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc00)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .ops = &sh7722_sci_port_ops,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xffe10000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc20)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .ops = &sh7722_sci_port_ops,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xffe20000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc40)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xa465fec0,
+ .end = 0xa465fec0 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = evt2irq(0x7a0),
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = evt2irq(0x7c0),
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = evt2irq(0x780),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct m66592_platdata usbf_platdata = {
+ .on_chip = 1,
+};
static struct resource usbf_resources[] = {
[0] = {
- .name = "m66592_udc",
- .start = 0xA4480000,
- .end = 0xA44800FF,
+ .name = "USBF",
+ .start = 0x04480000,
+ .end = 0x044800FF,
.flags = IORESOURCE_MEM,
},
[1] = {
- .name = "m66592_udc",
- .start = 65,
- .end = 65,
+ .start = evt2irq(0xa20),
+ .end = evt2irq(0xa20),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usbf_device = {
.name = "m66592_udc",
- .id = -1,
+ .id = 0, /* "usbf0" clock */
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
+ .platform_data = &usbf_platdata,
},
.num_resources = ARRAY_SIZE(usbf_resources),
.resource = usbf_resources,
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 80, 80, 80, 80 },
+static struct resource iic_resources[] = {
+ [0] = {
+ .name = "IIC",
+ .start = 0x04470000,
+ .end = 0x04470017,
+ .flags = IORESOURCE_MEM,
},
- {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 81, 81, 81, 81 },
+ [1] = {
+ .start = evt2irq(0xe00),
+ .end = evt2irq(0xe60),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic_resources),
+ .resource = iic_resources,
+};
+
+static struct uio_info vpu_platform_data = {
+ .name = "VPU4",
+ .version = "0",
+ .irq = evt2irq(0x980),
+};
+
+static struct resource vpu_resources[] = {
+ [0] = {
+ .name = "VPU",
+ .start = 0xfe900000,
+ .end = 0xfe9022eb,
+ .flags = IORESOURCE_MEM,
},
- {
- .mapbase = 0xffe20000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 82, 82, 82, 82 },
+ [1] = {
+ /* place holder for contiguous memory */
},
- {
- .flags = 0,
- }
};
-static struct platform_device sci_device = {
- .name = "sh-sci",
+static struct platform_device vpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpu_platform_data,
+ },
+ .resource = vpu_resources,
+ .num_resources = ARRAY_SIZE(vpu_resources),
+};
+
+static struct uio_info veu_platform_data = {
+ .name = "VEU",
+ .version = "0",
+ .irq = evt2irq(0x8c0),
+};
+
+static struct resource veu_resources[] = {
+ [0] = {
+ .name = "VEU",
+ .start = 0xfe920000,
+ .end = 0xfe9200b7,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 1,
+ .dev = {
+ .platform_data = &veu_platform_data,
+ },
+ .resource = veu_resources,
+ .num_resources = ARRAY_SIZE(veu_resources),
+};
+
+static struct uio_info jpu_platform_data = {
+ .name = "JPU",
+ .version = "0",
+ .irq = evt2irq(0x560),
+};
+
+static struct resource jpu_resources[] = {
+ [0] = {
+ .name = "JPU",
+ .start = 0xfea00000,
+ .end = 0xfea102d3,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device jpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 2,
+ .dev = {
+ .platform_data = &jpu_platform_data,
+ },
+ .resource = jpu_resources,
+ .num_resources = ARRAY_SIZE(jpu_resources),
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 0x20,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0x044a0000, 0x70),
+ DEFINE_RES_IRQ(evt2irq(0xf00)),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-32",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct siu_platform siu_platform_data = {
+ .dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX,
+ .dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX,
+ .dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX,
+ .dma_slave_rx_b = SHDMA_SLAVE_SIUB_RX,
+};
+
+static struct resource siu_resources[] = {
+ [0] = {
+ .start = 0xa4540000,
+ .end = 0xa454c10f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0xf80),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device siu_device = {
+ .name = "siu-pcm-audio",
.id = -1,
- .dev = {
- .platform_data = sci_platform_data,
+ .dev = {
+ .platform_data = &siu_platform_data,
},
+ .resource = siu_resources,
+ .num_resources = ARRAY_SIZE(siu_resources),
};
static struct platform_device *sh7722_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &cmt_device,
+ &tmu0_device,
+ &rtc_device,
&usbf_device,
- &sci_device,
+ &iic_device,
+ &vpu_device,
+ &veu_device,
+ &jpu_device,
+ &siu_device,
+ &dma_device,
};
static int __init sh7722_devices_setup(void)
{
+ platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20);
+ platform_resource_setup_memory(&veu_device, "veu", 2 << 20);
+ platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
+
return platform_add_devices(sh7722_devices,
ARRAY_SIZE(sh7722_devices));
}
-__initcall(sh7722_devices_setup);
+arch_initcall(sh7722_devices_setup);
+
+static struct platform_device *sh7722_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &cmt_device,
+ &tmu0_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7722_early_devices,
+ ARRAY_SIZE(sh7722_early_devices));
+}
enum {
UNUSED=0,
+ ENABLED,
+ DISABLED,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -101,13 +541,11 @@ enum {
SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO,
FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
- SDHI0, SDHI1, SDHI2, SDHI3,
CMT, TSIF, SIU, TWODG,
TMU0, TMU1, TMU2,
IRDA, JPU, LCDC,
/* interrupt groups */
-
SIM, RTC, DMAC0123, VIOVOU, USB, DMAC45, FLCTL, I2C, SDHI,
};
@@ -135,8 +573,8 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
- INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0),
- INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0),
+ INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
+ INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -154,7 +592,6 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
- INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
};
static struct intc_mask_reg mask_registers[] __initdata = {
@@ -176,7 +613,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
{ 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
- { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, TWODG, SIU } },
+ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
{ 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
{ 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
{ 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -209,8 +646,18 @@ static struct intc_sense_reg sense_registers[] __initdata = {
{ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7722", vectors, groups,
- mask_registers, prio_registers, sense_registers);
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4140024, 0, 8, /* INTREQ00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7722",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
void __init plat_irq_setup(void)
{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
new file mode 100644
index 00000000000..3533b56dd46
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -0,0 +1,661 @@
+/*
+ * SH7723 Setup
+ *
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/mm.h>
+#include <linux/serial_sci.h>
+#include <linux/uio_driver.h>
+#include <linux/usb/r8a66597.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <linux/io.h>
+#include <asm/clock.h>
+#include <asm/mmzone.h>
+#include <cpu/sh7723.h>
+
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .port_reg = 0xa4050160,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffe00000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc00)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xffe10000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc20)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xffe20000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc40)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .sampling_rate = 8,
+ .type = PORT_SCIFA,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xa4e30000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x900)),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .sampling_rate = 8,
+ .type = PORT_SCIFA,
+};
+
+static struct resource scif4_resources[] = {
+ DEFINE_RES_MEM(0xa4e40000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xd00)),
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .resource = scif4_resources,
+ .num_resources = ARRAY_SIZE(scif4_resources),
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .sampling_rate = 8,
+ .type = PORT_SCIFA,
+};
+
+static struct resource scif5_resources[] = {
+ DEFINE_RES_MEM(0xa4e50000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xfa0)),
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .resource = scif5_resources,
+ .num_resources = ARRAY_SIZE(scif5_resources),
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct uio_info vpu_platform_data = {
+ .name = "VPU5",
+ .version = "0",
+ .irq = evt2irq(0x980),
+};
+
+static struct resource vpu_resources[] = {
+ [0] = {
+ .name = "VPU",
+ .start = 0xfe900000,
+ .end = 0xfe902807,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device vpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpu_platform_data,
+ },
+ .resource = vpu_resources,
+ .num_resources = ARRAY_SIZE(vpu_resources),
+};
+
+static struct uio_info veu0_platform_data = {
+ .name = "VEU2H",
+ .version = "0",
+ .irq = evt2irq(0x8c0),
+};
+
+static struct resource veu0_resources[] = {
+ [0] = {
+ .name = "VEU2H0",
+ .start = 0xfe920000,
+ .end = 0xfe92027b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 1,
+ .dev = {
+ .platform_data = &veu0_platform_data,
+ },
+ .resource = veu0_resources,
+ .num_resources = ARRAY_SIZE(veu0_resources),
+};
+
+static struct uio_info veu1_platform_data = {
+ .name = "VEU2H",
+ .version = "0",
+ .irq = evt2irq(0x560),
+};
+
+static struct resource veu1_resources[] = {
+ [0] = {
+ .name = "VEU2H1",
+ .start = 0xfe924000,
+ .end = 0xfe92427b,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 2,
+ .dev = {
+ .platform_data = &veu1_platform_data,
+ },
+ .resource = veu1_resources,
+ .num_resources = ARRAY_SIZE(veu1_resources),
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 0x20,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0x044a0000, 0x70),
+ DEFINE_RES_IRQ(evt2irq(0xf00)),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-32",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu1_resources[] = {
+ DEFINE_RES_MEM(0xffd90000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x920)),
+ DEFINE_RES_IRQ(evt2irq(0x940)),
+ DEFINE_RES_IRQ(evt2irq(0x960)),
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh-tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xa465fec0,
+ .end = 0xa465fec0 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = evt2irq(0xaa0),
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = evt2irq(0xac0),
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = evt2irq(0xa80),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct r8a66597_platdata r8a66597_data = {
+ .on_chip = 1,
+};
+
+static struct resource sh7723_usb_host_resources[] = {
+ [0] = {
+ .start = 0xa4d80000,
+ .end = 0xa4d800ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0xa20),
+ .end = evt2irq(0xa20),
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
+ },
+};
+
+static struct platform_device sh7723_usb_host_device = {
+ .name = "r8a66597_hcd",
+ .id = 0,
+ .dev = {
+ .dma_mask = NULL, /* not use dma */
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &r8a66597_data,
+ },
+ .num_resources = ARRAY_SIZE(sh7723_usb_host_resources),
+ .resource = sh7723_usb_host_resources,
+};
+
+static struct resource iic_resources[] = {
+ [0] = {
+ .name = "IIC",
+ .start = 0x04470000,
+ .end = 0x04470017,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0xe00),
+ .end = evt2irq(0xe60),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic_resources),
+ .resource = iic_resources,
+};
+
+static struct platform_device *sh7723_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &rtc_device,
+ &iic_device,
+ &sh7723_usb_host_device,
+ &vpu_device,
+ &veu0_device,
+ &veu1_device,
+};
+
+static int __init sh7723_devices_setup(void)
+{
+ platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
+ platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
+ platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
+
+ return platform_add_devices(sh7723_devices,
+ ARRAY_SIZE(sh7723_devices));
+}
+arch_initcall(sh7723_devices_setup);
+
+static struct platform_device *sh7723_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7723_early_devices,
+ ARRAY_SIZE(sh7723_early_devices));
+}
+
+#define RAMCR_CACHE_L2FC 0x0002
+#define RAMCR_CACHE_L2E 0x0001
+#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
+
+void l2_cache_init(void)
+{
+ /* Enable L2 cache */
+ __raw_writel(L2_CACHE_ENABLE, RAMCR);
+}
+
+enum {
+ UNUSED=0,
+ ENABLED,
+ DISABLED,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ HUDI,
+ DMAC1A_DEI0,DMAC1A_DEI1,DMAC1A_DEI2,DMAC1A_DEI3,
+ _2DG_TRI,_2DG_INI,_2DG_CEI,
+ DMAC0A_DEI0,DMAC0A_DEI1,DMAC0A_DEI2,DMAC0A_DEI3,
+ VIO_CEUI,VIO_BEUI,VIO_VEU2HI,VIO_VOUI,
+ SCIFA_SCIFA0,
+ VPU_VPUI,
+ TPU_TPUI,
+ ADC_ADI,
+ USB_USI0,
+ RTC_ATI,RTC_PRI,RTC_CUI,
+ DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR,
+ DMAC0B_DEI4,DMAC0B_DEI5,DMAC0B_DADERR,
+ KEYSC_KEYI,
+ SCIF_SCIF0,SCIF_SCIF1,SCIF_SCIF2,
+ MSIOF_MSIOFI0,MSIOF_MSIOFI1,
+ SCIFA_SCIFA1,
+ FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I,
+ I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI,
+ CMT_CMTI,
+ TSIF_TSIFI,
+ SIU_SIUI,
+ SCIFA_SCIFA2,
+ TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
+ IRDA_IRDAI,
+ ATAPI_ATAPII,
+ VEU2H1_VEU2HI,
+ LCDC_LCDCI,
+ TMU1_TUNI0,TMU1_TUNI1,TMU1_TUNI2,
+
+ /* interrupt groups */
+ DMAC1A, DMAC0A, VIO, DMAC0B, FLCTL, I2C, _2DG,
+ SDHI1, RTC, DMAC1B, SDHI0,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+ INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
+
+ INTC_VECT(DMAC1A_DEI0,0x700),
+ INTC_VECT(DMAC1A_DEI1,0x720),
+ INTC_VECT(DMAC1A_DEI2,0x740),
+ INTC_VECT(DMAC1A_DEI3,0x760),
+
+ INTC_VECT(_2DG_TRI, 0x780),
+ INTC_VECT(_2DG_INI, 0x7A0),
+ INTC_VECT(_2DG_CEI, 0x7C0),
+
+ INTC_VECT(DMAC0A_DEI0,0x800),
+ INTC_VECT(DMAC0A_DEI1,0x820),
+ INTC_VECT(DMAC0A_DEI2,0x840),
+ INTC_VECT(DMAC0A_DEI3,0x860),
+
+ INTC_VECT(VIO_CEUI,0x880),
+ INTC_VECT(VIO_BEUI,0x8A0),
+ INTC_VECT(VIO_VEU2HI,0x8C0),
+ INTC_VECT(VIO_VOUI,0x8E0),
+
+ INTC_VECT(SCIFA_SCIFA0,0x900),
+ INTC_VECT(VPU_VPUI,0x980),
+ INTC_VECT(TPU_TPUI,0x9A0),
+ INTC_VECT(ADC_ADI,0x9E0),
+ INTC_VECT(USB_USI0,0xA20),
+
+ INTC_VECT(RTC_ATI,0xA80),
+ INTC_VECT(RTC_PRI,0xAA0),
+ INTC_VECT(RTC_CUI,0xAC0),
+
+ INTC_VECT(DMAC1B_DEI4,0xB00),
+ INTC_VECT(DMAC1B_DEI5,0xB20),
+ INTC_VECT(DMAC1B_DADERR,0xB40),
+
+ INTC_VECT(DMAC0B_DEI4,0xB80),
+ INTC_VECT(DMAC0B_DEI5,0xBA0),
+ INTC_VECT(DMAC0B_DADERR,0xBC0),
+
+ INTC_VECT(KEYSC_KEYI,0xBE0),
+ INTC_VECT(SCIF_SCIF0,0xC00),
+ INTC_VECT(SCIF_SCIF1,0xC20),
+ INTC_VECT(SCIF_SCIF2,0xC40),
+ INTC_VECT(MSIOF_MSIOFI0,0xC80),
+ INTC_VECT(MSIOF_MSIOFI1,0xCA0),
+ INTC_VECT(SCIFA_SCIFA1,0xD00),
+
+ INTC_VECT(FLCTL_FLSTEI,0xD80),
+ INTC_VECT(FLCTL_FLTENDI,0xDA0),
+ INTC_VECT(FLCTL_FLTREQ0I,0xDC0),
+ INTC_VECT(FLCTL_FLTREQ1I,0xDE0),
+
+ INTC_VECT(I2C_ALI,0xE00),
+ INTC_VECT(I2C_TACKI,0xE20),
+ INTC_VECT(I2C_WAITI,0xE40),
+ INTC_VECT(I2C_DTEI,0xE60),
+
+ INTC_VECT(SDHI0, 0xE80),
+ INTC_VECT(SDHI0, 0xEA0),
+ INTC_VECT(SDHI0, 0xEC0),
+
+ INTC_VECT(CMT_CMTI,0xF00),
+ INTC_VECT(TSIF_TSIFI,0xF20),
+ INTC_VECT(SIU_SIUI,0xF80),
+ INTC_VECT(SCIFA_SCIFA2,0xFA0),
+
+ INTC_VECT(TMU0_TUNI0,0x400),
+ INTC_VECT(TMU0_TUNI1,0x420),
+ INTC_VECT(TMU0_TUNI2,0x440),
+
+ INTC_VECT(IRDA_IRDAI,0x480),
+ INTC_VECT(ATAPI_ATAPII,0x4A0),
+
+ INTC_VECT(SDHI1, 0x4E0),
+ INTC_VECT(SDHI1, 0x500),
+ INTC_VECT(SDHI1, 0x520),
+
+ INTC_VECT(VEU2H1_VEU2HI,0x560),
+ INTC_VECT(LCDC_LCDCI,0x580),
+
+ INTC_VECT(TMU1_TUNI0,0x920),
+ INTC_VECT(TMU1_TUNI1,0x940),
+ INTC_VECT(TMU1_TUNI2,0x960),
+
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(DMAC1A,DMAC1A_DEI0,DMAC1A_DEI1,DMAC1A_DEI2,DMAC1A_DEI3),
+ INTC_GROUP(DMAC0A,DMAC0A_DEI0,DMAC0A_DEI1,DMAC0A_DEI2,DMAC0A_DEI3),
+ INTC_GROUP(VIO, VIO_CEUI,VIO_BEUI,VIO_VEU2HI,VIO_VOUI),
+ INTC_GROUP(DMAC0B, DMAC0B_DEI4,DMAC0B_DEI5,DMAC0B_DADERR),
+ INTC_GROUP(FLCTL,FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I),
+ INTC_GROUP(I2C,I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI),
+ INTC_GROUP(_2DG, _2DG_TRI,_2DG_INI,_2DG_CEI),
+ INTC_GROUP(RTC, RTC_ATI,RTC_PRI,RTC_CUI),
+ INTC_GROUP(DMAC1B, DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
+ { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
+ 0, ENABLED, ENABLED, ENABLED } },
+ { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
+ { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } },
+ { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
+ { 0, 0, 0, VPU_VPUI,0,0,0,SCIFA_SCIFA0 } },
+ { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
+ { DMAC1A_DEI3,DMAC1A_DEI2,DMAC1A_DEI1,DMAC1A_DEI0,0,0,0,IRDA_IRDAI } },
+ { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
+ { 0,TMU0_TUNI2,TMU0_TUNI1,TMU0_TUNI0,VEU2H1_VEU2HI,0,0,LCDC_LCDCI } },
+ { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
+ { KEYSC_KEYI,DMAC0B_DADERR,DMAC0B_DEI5,DMAC0B_DEI4,0,SCIF_SCIF2,SCIF_SCIF1,SCIF_SCIF0 } },
+ { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
+ { 0,0,0,SCIFA_SCIFA1,ADC_ADI,0,MSIOF_MSIOFI1,MSIOF_MSIOFI0 } },
+ { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
+ { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
+ FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
+ { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
+ { 0, ENABLED, ENABLED, ENABLED,
+ 0, 0, SCIFA_SCIFA2, SIU_SIUI } },
+ { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
+ { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } },
+ { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
+ { 0, DMAC1B_DADERR,DMAC1B_DEI5,DMAC1B_DEI4,0,RTC_ATI,RTC_PRI,RTC_CUI } },
+ { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
+ { 0,_2DG_CEI,_2DG_INI,_2DG_TRI,0,TPU_TPUI,0,TSIF_TSIFI } },
+ { 0xa40800b0, 0xa40800f0, 8, /* IMR12 / IMCR12 */
+ { 0,0,0,0,0,0,0,ATAPI_ATAPII } },
+ { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, IRDA_IRDAI } },
+ { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2H1_VEU2HI, LCDC_LCDCI, DMAC1A, 0} },
+ { 0xa4080008, 0, 16, 4, /* IPRC */ { TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2, 0} },
+ { 0xa408000c, 0, 16, 4, /* IPRD */ { } },
+ { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0A, VIO, SCIFA_SCIFA0, VPU_VPUI } },
+ { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC_KEYI, DMAC0B, USB_USI0, CMT_CMTI } },
+ { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2,0 } },
+ { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF_MSIOFI0,MSIOF_MSIOFI1, FLCTL, I2C } },
+ { 0xa4080020, 0, 16, 4, /* IPRI */ { SCIFA_SCIFA1,0,TSIF_TSIFI,_2DG } },
+ { 0xa4080024, 0, 16, 4, /* IPRJ */ { ADC_ADI,0,SIU_SIUI,SDHI1 } },
+ { 0xa4080028, 0, 16, 4, /* IPRK */ { RTC,DMAC1B,0,SDHI0 } },
+ { 0xa408002c, 0, 16, 4, /* IPRL */ { SCIFA_SCIFA2,0,TPU_TPUI,ATAPI_ATAPII } },
+ { 0xa4140010, 0, 32, 4, /* INTPRI00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa414001c, 16, 2, /* ICR1 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4140024, 0, 8, /* INTREQ00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7723",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
new file mode 100644
index 00000000000..b9e84b1d3aa
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -0,0 +1,1305 @@
+/*
+ * SH7724 Setup
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on SH7723 Setup
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/mm.h>
+#include <linux/serial_sci.h>
+#include <linux/uio_driver.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <linux/io.h>
+#include <linux/notifier.h>
+
+#include <asm/suspend.h>
+#include <asm/clock.h>
+#include <asm/mmzone.h>
+
+#include <cpu/dma-register.h>
+#include <cpu/sh7724.h>
+
+/* DMA */
+static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SCIF0_TX,
+ .addr = 0xffe0000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF0_RX,
+ .addr = 0xffe00014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF1_TX,
+ .addr = 0xffe1000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x25,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF1_RX,
+ .addr = 0xffe10014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x26,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF2_TX,
+ .addr = 0xffe2000c,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF2_RX,
+ .addr = 0xffe20014,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF3_TX,
+ .addr = 0xa4e30020,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2d,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF3_RX,
+ .addr = 0xa4e30024,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2e,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF4_TX,
+ .addr = 0xa4e40020,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x31,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF4_RX,
+ .addr = 0xa4e40024,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x32,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF5_TX,
+ .addr = 0xa4e50020,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x35,
+ }, {
+ .slave_id = SHDMA_SLAVE_SCIF5_RX,
+ .addr = 0xa4e50024,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x36,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D0_TX,
+ .addr = 0xA4D80100,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x73,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D0_RX,
+ .addr = 0xA4D80100,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x73,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D1_TX,
+ .addr = 0xA4D80120,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x77,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0D1_RX,
+ .addr = 0xA4D80120,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0x77,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D0_TX,
+ .addr = 0xA4D90100,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xab,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D0_RX,
+ .addr = 0xA4D90100,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xab,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D1_TX,
+ .addr = 0xA4D90120,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xaf,
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1D1_RX,
+ .addr = 0xA4D90120,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xaf,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_TX,
+ .addr = 0x04ce0030,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc1,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_RX,
+ .addr = 0x04ce0030,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc2,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI1_TX,
+ .addr = 0x04cf0030,
+ .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc9,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI1_RX,
+ .addr = 0x04cf0030,
+ .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xca,
+ },
+};
+
+static const struct sh_dmae_channel sh7724_dmae_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma_platform_data = {
+ .slave = sh7724_dmae_slaves,
+ .slave_num = ARRAY_SIZE(sh7724_dmae_slaves),
+ .channel = sh7724_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7724_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+/* Resource order important! */
+static struct resource sh7724_dmae0_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfe008020,
+ .end = 0xfe00808f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMARSx */
+ .start = 0xfe009000,
+ .end = 0xfe00900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = evt2irq(0xbc0),
+ .end = evt2irq(0xbc0),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 0-3 */
+ .start = evt2irq(0x800),
+ .end = evt2irq(0x860),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 4-5 */
+ .start = evt2irq(0xb80),
+ .end = evt2irq(0xba0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* Resource order important! */
+static struct resource sh7724_dmae1_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfdc08020,
+ .end = 0xfdc0808f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMARSx */
+ .start = 0xfdc09000,
+ .end = 0xfdc0900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = evt2irq(0xb40),
+ .end = evt2irq(0xb40),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 0-3 */
+ .start = evt2irq(0x700),
+ .end = evt2irq(0x760),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 4-5 */
+ .start = evt2irq(0xb00),
+ .end = evt2irq(0xb20),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = sh7724_dmae0_resources,
+ .num_resources = ARRAY_SIZE(sh7724_dmae0_resources),
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
+static struct platform_device dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = sh7724_dmae1_resources,
+ .num_resources = ARRAY_SIZE(sh7724_dmae1_resources),
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
+/* Serial */
+static struct plat_sci_port scif0_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffe00000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc00)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xffe10000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc20)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xffe20000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xc40)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE,
+ .sampling_rate = 8,
+ .type = PORT_SCIFA,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xa4e30000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x900)),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE,
+ .sampling_rate = 8,
+ .type = PORT_SCIFA,
+};
+
+static struct resource scif4_resources[] = {
+ DEFINE_RES_MEM(0xa4e40000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xd00)),
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .resource = scif4_resources,
+ .num_resources = ARRAY_SIZE(scif4_resources),
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .port_reg = SCIx_NOT_SUPPORTED,
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE,
+ .sampling_rate = 8,
+ .type = PORT_SCIFA,
+};
+
+static struct resource scif5_resources[] = {
+ DEFINE_RES_MEM(0xa4e50000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xfa0)),
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .resource = scif5_resources,
+ .num_resources = ARRAY_SIZE(scif5_resources),
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+/* RTC */
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xa465fec0,
+ .end = 0xa465fec0 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = evt2irq(0xaa0),
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = evt2irq(0xac0),
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = evt2irq(0xa80),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+/* I2C0 */
+static struct resource iic0_resources[] = {
+ [0] = {
+ .name = "IIC0",
+ .start = 0x04470000,
+ .end = 0x04470018 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0xe00),
+ .end = evt2irq(0xe60),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic0_device = {
+ .name = "i2c-sh_mobile",
+ .id = 0, /* "i2c0" clock */
+ .num_resources = ARRAY_SIZE(iic0_resources),
+ .resource = iic0_resources,
+};
+
+/* I2C1 */
+static struct resource iic1_resources[] = {
+ [0] = {
+ .name = "IIC1",
+ .start = 0x04750000,
+ .end = 0x04750018 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0xd80),
+ .end = evt2irq(0xde0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device iic1_device = {
+ .name = "i2c-sh_mobile",
+ .id = 1, /* "i2c1" clock */
+ .num_resources = ARRAY_SIZE(iic1_resources),
+ .resource = iic1_resources,
+};
+
+/* VPU */
+static struct uio_info vpu_platform_data = {
+ .name = "VPU5F",
+ .version = "0",
+ .irq = evt2irq(0x980),
+};
+
+static struct resource vpu_resources[] = {
+ [0] = {
+ .name = "VPU",
+ .start = 0xfe900000,
+ .end = 0xfe902807,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device vpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 0,
+ .dev = {
+ .platform_data = &vpu_platform_data,
+ },
+ .resource = vpu_resources,
+ .num_resources = ARRAY_SIZE(vpu_resources),
+};
+
+/* VEU0 */
+static struct uio_info veu0_platform_data = {
+ .name = "VEU3F0",
+ .version = "0",
+ .irq = evt2irq(0xc60),
+};
+
+static struct resource veu0_resources[] = {
+ [0] = {
+ .name = "VEU3F0",
+ .start = 0xfe920000,
+ .end = 0xfe9200cb,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 1,
+ .dev = {
+ .platform_data = &veu0_platform_data,
+ },
+ .resource = veu0_resources,
+ .num_resources = ARRAY_SIZE(veu0_resources),
+};
+
+/* VEU1 */
+static struct uio_info veu1_platform_data = {
+ .name = "VEU3F1",
+ .version = "0",
+ .irq = evt2irq(0x8c0),
+};
+
+static struct resource veu1_resources[] = {
+ [0] = {
+ .name = "VEU3F1",
+ .start = 0xfe924000,
+ .end = 0xfe9240cb,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device veu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 2,
+ .dev = {
+ .platform_data = &veu1_platform_data,
+ },
+ .resource = veu1_resources,
+ .num_resources = ARRAY_SIZE(veu1_resources),
+};
+
+/* BEU0 */
+static struct uio_info beu0_platform_data = {
+ .name = "BEU0",
+ .version = "0",
+ .irq = evt2irq(0x8A0),
+};
+
+static struct resource beu0_resources[] = {
+ [0] = {
+ .name = "BEU0",
+ .start = 0xfe930000,
+ .end = 0xfe933400,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device beu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 6,
+ .dev = {
+ .platform_data = &beu0_platform_data,
+ },
+ .resource = beu0_resources,
+ .num_resources = ARRAY_SIZE(beu0_resources),
+};
+
+/* BEU1 */
+static struct uio_info beu1_platform_data = {
+ .name = "BEU1",
+ .version = "0",
+ .irq = evt2irq(0xA00),
+};
+
+static struct resource beu1_resources[] = {
+ [0] = {
+ .name = "BEU1",
+ .start = 0xfe940000,
+ .end = 0xfe943400,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device beu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 7,
+ .dev = {
+ .platform_data = &beu1_platform_data,
+ },
+ .resource = beu1_resources,
+ .num_resources = ARRAY_SIZE(beu1_resources),
+};
+
+static struct sh_timer_config cmt_platform_data = {
+ .channels_mask = 0x20,
+};
+
+static struct resource cmt_resources[] = {
+ DEFINE_RES_MEM(0x044a0000, 0x70),
+ DEFINE_RES_IRQ(evt2irq(0xf00)),
+};
+
+static struct platform_device cmt_device = {
+ .name = "sh-cmt-32",
+ .id = 0,
+ .dev = {
+ .platform_data = &cmt_platform_data,
+ },
+ .resource = cmt_resources,
+ .num_resources = ARRAY_SIZE(cmt_resources),
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu1_resources[] = {
+ DEFINE_RES_MEM(0xffd90000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x920)),
+ DEFINE_RES_IRQ(evt2irq(0x940)),
+ DEFINE_RES_IRQ(evt2irq(0x960)),
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh-tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+/* JPU */
+static struct uio_info jpu_platform_data = {
+ .name = "JPU",
+ .version = "0",
+ .irq = evt2irq(0x560),
+};
+
+static struct resource jpu_resources[] = {
+ [0] = {
+ .name = "JPU",
+ .start = 0xfe980000,
+ .end = 0xfe9902d3,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device jpu_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 3,
+ .dev = {
+ .platform_data = &jpu_platform_data,
+ },
+ .resource = jpu_resources,
+ .num_resources = ARRAY_SIZE(jpu_resources),
+};
+
+/* SPU2DSP0 */
+static struct uio_info spu0_platform_data = {
+ .name = "SPU2DSP0",
+ .version = "0",
+ .irq = evt2irq(0xcc0),
+};
+
+static struct resource spu0_resources[] = {
+ [0] = {
+ .name = "SPU2DSP0",
+ .start = 0xFE200000,
+ .end = 0xFE2FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device spu0_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 4,
+ .dev = {
+ .platform_data = &spu0_platform_data,
+ },
+ .resource = spu0_resources,
+ .num_resources = ARRAY_SIZE(spu0_resources),
+};
+
+/* SPU2DSP1 */
+static struct uio_info spu1_platform_data = {
+ .name = "SPU2DSP1",
+ .version = "0",
+ .irq = evt2irq(0xce0),
+};
+
+static struct resource spu1_resources[] = {
+ [0] = {
+ .name = "SPU2DSP1",
+ .start = 0xFE300000,
+ .end = 0xFE3FFFFF,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device spu1_device = {
+ .name = "uio_pdrv_genirq",
+ .id = 5,
+ .dev = {
+ .platform_data = &spu1_platform_data,
+ },
+ .resource = spu1_resources,
+ .num_resources = ARRAY_SIZE(spu1_resources),
+};
+
+static struct platform_device *sh7724_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+ &dma0_device,
+ &dma1_device,
+ &rtc_device,
+ &iic0_device,
+ &iic1_device,
+ &vpu_device,
+ &veu0_device,
+ &veu1_device,
+ &beu0_device,
+ &beu1_device,
+ &jpu_device,
+ &spu0_device,
+ &spu1_device,
+};
+
+static int __init sh7724_devices_setup(void)
+{
+ platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20);
+ platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20);
+ platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20);
+ platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20);
+ platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20);
+ platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20);
+
+ return platform_add_devices(sh7724_devices,
+ ARRAY_SIZE(sh7724_devices));
+}
+arch_initcall(sh7724_devices_setup);
+
+static struct platform_device *sh7724_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &cmt_device,
+ &tmu0_device,
+ &tmu1_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7724_early_devices,
+ ARRAY_SIZE(sh7724_early_devices));
+}
+
+#define RAMCR_CACHE_L2FC 0x0002
+#define RAMCR_CACHE_L2E 0x0001
+#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
+
+void l2_cache_init(void)
+{
+ /* Enable L2 cache */
+ __raw_writel(L2_CACHE_ENABLE, RAMCR);
+}
+
+enum {
+ UNUSED = 0,
+ ENABLED,
+ DISABLED,
+
+ /* interrupt sources */
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ HUDI,
+ DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3,
+ _2DG_TRI, _2DG_INI, _2DG_CEI,
+ DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3,
+ VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU,
+ SCIFA3,
+ VPU,
+ TPU,
+ CEU1,
+ BEU1,
+ USB0, USB1,
+ ATAPI,
+ RTC_ATI, RTC_PRI, RTC_CUI,
+ DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR,
+ DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR,
+ KEYSC,
+ SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2,
+ VEU0,
+ MSIOF_MSIOFI0, MSIOF_MSIOFI1,
+ SPU_SPUI0, SPU_SPUI1,
+ SCIFA4,
+ ICB,
+ ETHI,
+ I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
+ I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
+ CMT,
+ TSIF,
+ FSI,
+ SCIFA5,
+ TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2,
+ IRDA,
+ JPU,
+ _2DDMAC,
+ MMC_MMC2I, MMC_MMC3I,
+ LCDC,
+ TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2,
+
+ /* interrupt groups */
+ DMAC1A, _2DG, DMAC0A, VIO, USB, RTC,
+ DMAC1B, DMAC0B, I2C0, I2C1, SDHI0, SDHI1, SPU, MMCIF,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
+ INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
+ INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
+ INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0),
+
+ INTC_VECT(DMAC1A_DEI0, 0x700),
+ INTC_VECT(DMAC1A_DEI1, 0x720),
+ INTC_VECT(DMAC1A_DEI2, 0x740),
+ INTC_VECT(DMAC1A_DEI3, 0x760),
+
+ INTC_VECT(_2DG_TRI, 0x780),
+ INTC_VECT(_2DG_INI, 0x7A0),
+ INTC_VECT(_2DG_CEI, 0x7C0),
+
+ INTC_VECT(DMAC0A_DEI0, 0x800),
+ INTC_VECT(DMAC0A_DEI1, 0x820),
+ INTC_VECT(DMAC0A_DEI2, 0x840),
+ INTC_VECT(DMAC0A_DEI3, 0x860),
+
+ INTC_VECT(VIO_CEU0, 0x880),
+ INTC_VECT(VIO_BEU0, 0x8A0),
+ INTC_VECT(VIO_VEU1, 0x8C0),
+ INTC_VECT(VIO_VOU, 0x8E0),
+
+ INTC_VECT(SCIFA3, 0x900),
+ INTC_VECT(VPU, 0x980),
+ INTC_VECT(TPU, 0x9A0),
+ INTC_VECT(CEU1, 0x9E0),
+ INTC_VECT(BEU1, 0xA00),
+ INTC_VECT(USB0, 0xA20),
+ INTC_VECT(USB1, 0xA40),
+ INTC_VECT(ATAPI, 0xA60),
+
+ INTC_VECT(RTC_ATI, 0xA80),
+ INTC_VECT(RTC_PRI, 0xAA0),
+ INTC_VECT(RTC_CUI, 0xAC0),
+
+ INTC_VECT(DMAC1B_DEI4, 0xB00),
+ INTC_VECT(DMAC1B_DEI5, 0xB20),
+ INTC_VECT(DMAC1B_DADERR, 0xB40),
+
+ INTC_VECT(DMAC0B_DEI4, 0xB80),
+ INTC_VECT(DMAC0B_DEI5, 0xBA0),
+ INTC_VECT(DMAC0B_DADERR, 0xBC0),
+
+ INTC_VECT(KEYSC, 0xBE0),
+ INTC_VECT(SCIF_SCIF0, 0xC00),
+ INTC_VECT(SCIF_SCIF1, 0xC20),
+ INTC_VECT(SCIF_SCIF2, 0xC40),
+ INTC_VECT(VEU0, 0xC60),
+ INTC_VECT(MSIOF_MSIOFI0, 0xC80),
+ INTC_VECT(MSIOF_MSIOFI1, 0xCA0),
+ INTC_VECT(SPU_SPUI0, 0xCC0),
+ INTC_VECT(SPU_SPUI1, 0xCE0),
+ INTC_VECT(SCIFA4, 0xD00),
+
+ INTC_VECT(ICB, 0xD20),
+ INTC_VECT(ETHI, 0xD60),
+
+ INTC_VECT(I2C1_ALI, 0xD80),
+ INTC_VECT(I2C1_TACKI, 0xDA0),
+ INTC_VECT(I2C1_WAITI, 0xDC0),
+ INTC_VECT(I2C1_DTEI, 0xDE0),
+
+ INTC_VECT(I2C0_ALI, 0xE00),
+ INTC_VECT(I2C0_TACKI, 0xE20),
+ INTC_VECT(I2C0_WAITI, 0xE40),
+ INTC_VECT(I2C0_DTEI, 0xE60),
+
+ INTC_VECT(SDHI0, 0xE80),
+ INTC_VECT(SDHI0, 0xEA0),
+ INTC_VECT(SDHI0, 0xEC0),
+ INTC_VECT(SDHI0, 0xEE0),
+
+ INTC_VECT(CMT, 0xF00),
+ INTC_VECT(TSIF, 0xF20),
+ INTC_VECT(FSI, 0xF80),
+ INTC_VECT(SCIFA5, 0xFA0),
+
+ INTC_VECT(TMU0_TUNI0, 0x400),
+ INTC_VECT(TMU0_TUNI1, 0x420),
+ INTC_VECT(TMU0_TUNI2, 0x440),
+
+ INTC_VECT(IRDA, 0x480),
+
+ INTC_VECT(SDHI1, 0x4E0),
+ INTC_VECT(SDHI1, 0x500),
+ INTC_VECT(SDHI1, 0x520),
+
+ INTC_VECT(JPU, 0x560),
+ INTC_VECT(_2DDMAC, 0x4A0),
+
+ INTC_VECT(MMC_MMC2I, 0x5A0),
+ INTC_VECT(MMC_MMC3I, 0x5C0),
+
+ INTC_VECT(LCDC, 0xF40),
+
+ INTC_VECT(TMU1_TUNI0, 0x920),
+ INTC_VECT(TMU1_TUNI1, 0x940),
+ INTC_VECT(TMU1_TUNI2, 0x960),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(DMAC1A, DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3),
+ INTC_GROUP(_2DG, _2DG_TRI, _2DG_INI, _2DG_CEI),
+ INTC_GROUP(DMAC0A, DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3),
+ INTC_GROUP(VIO, VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU),
+ INTC_GROUP(USB, USB0, USB1),
+ INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
+ INTC_GROUP(DMAC1B, DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR),
+ INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR),
+ INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
+ INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
+ INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1),
+ INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
+ { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
+ 0, ENABLED, ENABLED, ENABLED } },
+ { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
+ { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0,
+ DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } },
+ { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
+ { 0, 0, 0, VPU, ATAPI, ETHI, 0, SCIFA3 } },
+ { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */
+ { DMAC1A_DEI3, DMAC1A_DEI2, DMAC1A_DEI1, DMAC1A_DEI0,
+ SPU_SPUI1, SPU_SPUI0, BEU1, IRDA } },
+ { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */
+ { 0, TMU0_TUNI2, TMU0_TUNI1, TMU0_TUNI0,
+ JPU, 0, 0, LCDC } },
+ { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */
+ { KEYSC, DMAC0B_DADERR, DMAC0B_DEI5, DMAC0B_DEI4,
+ VEU0, SCIF_SCIF2, SCIF_SCIF1, SCIF_SCIF0 } },
+ { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */
+ { 0, 0, ICB, SCIFA4,
+ CEU1, 0, MSIOF_MSIOFI1, MSIOF_MSIOFI0 } },
+ { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */
+ { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
+ I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } },
+ { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
+ { DISABLED, ENABLED, ENABLED, ENABLED,
+ 0, 0, SCIFA5, FSI } },
+ { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
+ { 0, 0, 0, CMT, 0, USB1, USB0, 0 } },
+ { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
+ { 0, DMAC1B_DADERR, DMAC1B_DEI5, DMAC1B_DEI4,
+ 0, RTC_CUI, RTC_PRI, RTC_ATI } },
+ { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */
+ { 0, _2DG_CEI, _2DG_INI, _2DG_TRI,
+ 0, TPU, 0, TSIF } },
+ { 0xa40800b0, 0xa40800f0, 8, /* IMR12 / IMCR12 */
+ { 0, 0, MMC_MMC3I, MMC_MMC2I, 0, 0, 0, _2DDMAC } },
+ { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0_TUNI0, TMU0_TUNI1,
+ TMU0_TUNI2, IRDA } },
+ { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, DMAC1A, BEU1 } },
+ { 0xa4080008, 0, 16, 4, /* IPRC */ { TMU1_TUNI0, TMU1_TUNI1,
+ TMU1_TUNI2, SPU } },
+ { 0xa408000c, 0, 16, 4, /* IPRD */ { 0, MMCIF, 0, ATAPI } },
+ { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0A, VIO, SCIFA3, VPU } },
+ { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC0B, USB, CMT } },
+ { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF_SCIF0, SCIF_SCIF1,
+ SCIF_SCIF2, VEU0 } },
+ { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF_MSIOFI0, MSIOF_MSIOFI1,
+ I2C1, I2C0 } },
+ { 0xa4080020, 0, 16, 4, /* IPRI */ { SCIFA4, ICB, TSIF, _2DG } },
+ { 0xa4080024, 0, 16, 4, /* IPRJ */ { CEU1, ETHI, FSI, SDHI1 } },
+ { 0xa4080028, 0, 16, 4, /* IPRK */ { RTC, DMAC1B, 0, SDHI0 } },
+ { 0xa408002c, 0, 16, 4, /* IPRL */ { SCIFA5, 0, TPU, _2DDMAC } },
+ { 0xa4140010, 0, 32, 4, /* INTPRI00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xa414001c, 16, 2, /* ICR1 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xa4140024, 0, 8, /* INTREQ00 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_desc intc_desc __initdata = {
+ .name = "sh7724",
+ .force_enable = ENABLED,
+ .force_disable = DISABLED,
+ .hw = INTC_HW_DESC(vectors, groups, mask_registers,
+ prio_registers, sense_registers, ack_registers),
+};
+
+void __init plat_irq_setup(void)
+{
+ register_intc_controller(&intc_desc);
+}
+
+static struct {
+ /* BSC */
+ unsigned long mmselr;
+ unsigned long cs0bcr;
+ unsigned long cs4bcr;
+ unsigned long cs5abcr;
+ unsigned long cs5bbcr;
+ unsigned long cs6abcr;
+ unsigned long cs6bbcr;
+ unsigned long cs4wcr;
+ unsigned long cs5awcr;
+ unsigned long cs5bwcr;
+ unsigned long cs6awcr;
+ unsigned long cs6bwcr;
+ /* INTC */
+ unsigned short ipra;
+ unsigned short iprb;
+ unsigned short iprc;
+ unsigned short iprd;
+ unsigned short ipre;
+ unsigned short iprf;
+ unsigned short iprg;
+ unsigned short iprh;
+ unsigned short ipri;
+ unsigned short iprj;
+ unsigned short iprk;
+ unsigned short iprl;
+ unsigned char imr0;
+ unsigned char imr1;
+ unsigned char imr2;
+ unsigned char imr3;
+ unsigned char imr4;
+ unsigned char imr5;
+ unsigned char imr6;
+ unsigned char imr7;
+ unsigned char imr8;
+ unsigned char imr9;
+ unsigned char imr10;
+ unsigned char imr11;
+ unsigned char imr12;
+ /* RWDT */
+ unsigned short rwtcnt;
+ unsigned short rwtcsr;
+ /* CPG */
+ unsigned long irdaclk;
+ unsigned long spuclk;
+} sh7724_rstandby_state;
+
+static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb,
+ unsigned long flags, void *unused)
+{
+ if (!(flags & SUSP_SH_RSTANDBY))
+ return NOTIFY_DONE;
+
+ /* BCR */
+ sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */
+ sh7724_rstandby_state.mmselr |= 0xa5a50000;
+ sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */
+ sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */
+ sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */
+ sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */
+ sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */
+ sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */
+ sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */
+ sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */
+ sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */
+ sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */
+ sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */
+
+ /* INTC */
+ sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */
+ sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */
+ sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */
+ sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */
+ sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */
+ sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */
+ sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */
+ sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */
+ sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */
+ sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */
+ sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */
+ sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */
+ sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */
+ sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */
+ sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */
+ sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */
+ sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */
+ sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */
+ sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */
+ sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */
+ sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */
+ sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */
+ sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */
+ sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */
+ sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */
+
+ /* RWDT */
+ sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */
+ sh7724_rstandby_state.rwtcnt |= 0x5a00;
+ sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */
+ sh7724_rstandby_state.rwtcsr |= 0xa500;
+ __raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004);
+
+ /* CPG */
+ sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */
+ sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */
+
+ return NOTIFY_DONE;
+}
+
+static int sh7724_post_sleep_notifier_call(struct notifier_block *nb,
+ unsigned long flags, void *unused)
+{
+ if (!(flags & SUSP_SH_RSTANDBY))
+ return NOTIFY_DONE;
+
+ /* BCR */
+ __raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */
+ __raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */
+ __raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */
+ __raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */
+ __raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */
+ __raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */
+ __raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */
+ __raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */
+ __raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */
+ __raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */
+ __raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */
+ __raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */
+
+ /* INTC */
+ __raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */
+ __raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */
+ __raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */
+ __raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */
+ __raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */
+ __raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */
+ __raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */
+ __raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */
+ __raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */
+ __raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */
+ __raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */
+ __raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */
+ __raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */
+ __raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */
+ __raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */
+ __raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */
+ __raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */
+ __raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */
+ __raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */
+ __raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */
+ __raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */
+ __raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */
+ __raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */
+ __raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */
+ __raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */
+
+ /* RWDT */
+ __raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */
+ __raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */
+
+ /* CPG */
+ __raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */
+ __raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sh7724_pre_sleep_notifier = {
+ .notifier_call = sh7724_pre_sleep_notifier_call,
+ .priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU),
+};
+
+static struct notifier_block sh7724_post_sleep_notifier = {
+ .notifier_call = sh7724_post_sleep_notifier_call,
+ .priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU),
+};
+
+static int __init sh7724_sleep_setup(void)
+{
+ atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list,
+ &sh7724_pre_sleep_notifier);
+
+ atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list,
+ &sh7724_post_sleep_notifier);
+ return 0;
+}
+arch_initcall(sh7724_sleep_setup);
+
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7734.c b/arch/sh/kernel/cpu/sh4a/setup-sh7734.c
new file mode 100644
index 00000000000..f617bcb734d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7734.c
@@ -0,0 +1,629 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/setup-sh7734.c
+
+ * SH7734 Setup
+ *
+ * Copyright (C) 2011,2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
+ * Copyright (C) 2011,2012 Renesas Solutions Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/io.h>
+#include <asm/clock.h>
+#include <asm/irq.h>
+#include <cpu/sh7734.h>
+
+/* SCIF */
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffe40000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x8c0)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xffe41000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x8e0)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xffe42000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x900)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_REGTYPE,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xffe43000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x920)),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_REGTYPE,
+};
+
+static struct resource scif4_resources[] = {
+ DEFINE_RES_MEM(0xffe44000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x940)),
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .resource = scif4_resources,
+ .num_resources = ARRAY_SIZE(scif4_resources),
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_REGTYPE,
+};
+
+static struct resource scif5_resources[] = {
+ DEFINE_RES_MEM(0xffe43000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x960)),
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .resource = scif5_resources,
+ .num_resources = ARRAY_SIZE(scif5_resources),
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+/* RTC */
+static struct resource rtc_resources[] = {
+ [0] = {
+ .name = "rtc",
+ .start = 0xFFFC5000,
+ .end = 0xFFFC5000 + 0x26 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ .start = evt2irq(0xC00),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+/* I2C 0 */
+static struct resource i2c0_resources[] = {
+ [0] = {
+ .name = "IIC0",
+ .start = 0xFFC70000,
+ .end = 0xFFC7000A - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0x860),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device i2c0_device = {
+ .name = "i2c-sh7734",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(i2c0_resources),
+ .resource = i2c0_resources,
+};
+
+/* TMU */
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu1_resources[] = {
+ DEFINE_RES_MEM(0xffd81000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x480)),
+ DEFINE_RES_IRQ(evt2irq(0x4a0)),
+ DEFINE_RES_IRQ(evt2irq(0x4c0)),
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh-tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu2_resources[] = {
+ DEFINE_RES_MEM(0xffd82000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x500)),
+ DEFINE_RES_IRQ(evt2irq(0x520)),
+ DEFINE_RES_IRQ(evt2irq(0x540)),
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh-tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct platform_device *sh7734_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+ &rtc_device,
+};
+
+static struct platform_device *sh7734_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7734_early_devices,
+ ARRAY_SIZE(sh7734_early_devices));
+}
+
+#define GROUP 0
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+
+ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
+
+ IRQ0, IRQ1, IRQ2, IRQ3,
+ DU,
+ TMU00, TMU10, TMU20, TMU21,
+ TMU30, TMU40, TMU50, TMU51,
+ TMU60, TMU70, TMU80,
+ RESET_WDT,
+ USB,
+ HUDI,
+ SHDMAC,
+ SSI0, SSI1, SSI2, SSI3,
+ VIN0,
+ RGPVG,
+ _2DG,
+ MMC,
+ HSPI,
+ LBSCATA,
+ I2C0,
+ RCAN0,
+ MIMLB,
+ SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5,
+ LBSCDMAC0, LBSCDMAC1, LBSCDMAC2,
+ RCAN1,
+ SDHI0, SDHI1,
+ IEBUS,
+ HPBDMAC0_3, HPBDMAC4_10, HPBDMAC11_18, HPBDMAC19_22, HPBDMAC23_25_27_28,
+ RTC,
+ VIN1,
+ LCDC,
+ SRC0, SRC1,
+ GETHER,
+ SDHI2,
+ GPIO0_3, GPIO4_5,
+ STIF0, STIF1,
+ ADMAC,
+ HIF,
+ FLCTL,
+ ADC,
+ MTU2,
+ RSPI,
+ QSPI,
+ HSCIF,
+ VEU3F_VE3,
+
+ /* Group */
+ /* Mask */
+ STIF_M,
+ GPIO_M,
+ HPBDMAC_M,
+ LBSCDMAC_M,
+ RCAN_M,
+ SRC_M,
+ SCIF_M,
+ LCDC_M,
+ _2DG_M,
+ VIN_M,
+ TMU_3_M,
+ TMU_0_M,
+
+ /* Priority */
+ RCAN_P,
+ LBSCDMAC_P,
+
+ /* Common */
+ SDHI,
+ SSI,
+ SPI,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(DU, 0x3E0),
+ INTC_VECT(TMU00, 0x400),
+ INTC_VECT(TMU10, 0x420),
+ INTC_VECT(TMU20, 0x440),
+ INTC_VECT(TMU30, 0x480),
+ INTC_VECT(TMU40, 0x4A0),
+ INTC_VECT(TMU50, 0x4C0),
+ INTC_VECT(TMU51, 0x4E0),
+ INTC_VECT(TMU60, 0x500),
+ INTC_VECT(TMU70, 0x520),
+ INTC_VECT(TMU80, 0x540),
+ INTC_VECT(RESET_WDT, 0x560),
+ INTC_VECT(USB, 0x580),
+ INTC_VECT(HUDI, 0x600),
+ INTC_VECT(SHDMAC, 0x620),
+ INTC_VECT(SSI0, 0x6C0),
+ INTC_VECT(SSI1, 0x6E0),
+ INTC_VECT(SSI2, 0x700),
+ INTC_VECT(SSI3, 0x720),
+ INTC_VECT(VIN0, 0x740),
+ INTC_VECT(RGPVG, 0x760),
+ INTC_VECT(_2DG, 0x780),
+ INTC_VECT(MMC, 0x7A0),
+ INTC_VECT(HSPI, 0x7E0),
+ INTC_VECT(LBSCATA, 0x840),
+ INTC_VECT(I2C0, 0x860),
+ INTC_VECT(RCAN0, 0x880),
+ INTC_VECT(SCIF0, 0x8A0),
+ INTC_VECT(SCIF1, 0x8C0),
+ INTC_VECT(SCIF2, 0x900),
+ INTC_VECT(SCIF3, 0x920),
+ INTC_VECT(SCIF4, 0x940),
+ INTC_VECT(SCIF5, 0x960),
+ INTC_VECT(LBSCDMAC0, 0x9E0),
+ INTC_VECT(LBSCDMAC1, 0xA00),
+ INTC_VECT(LBSCDMAC2, 0xA20),
+ INTC_VECT(RCAN1, 0xA60),
+ INTC_VECT(SDHI0, 0xAE0),
+ INTC_VECT(SDHI1, 0xB00),
+ INTC_VECT(IEBUS, 0xB20),
+ INTC_VECT(HPBDMAC0_3, 0xB60),
+ INTC_VECT(HPBDMAC4_10, 0xB80),
+ INTC_VECT(HPBDMAC11_18, 0xBA0),
+ INTC_VECT(HPBDMAC19_22, 0xBC0),
+ INTC_VECT(HPBDMAC23_25_27_28, 0xBE0),
+ INTC_VECT(RTC, 0xC00),
+ INTC_VECT(VIN1, 0xC20),
+ INTC_VECT(LCDC, 0xC40),
+ INTC_VECT(SRC0, 0xC60),
+ INTC_VECT(SRC1, 0xC80),
+ INTC_VECT(GETHER, 0xCA0),
+ INTC_VECT(SDHI2, 0xCC0),
+ INTC_VECT(GPIO0_3, 0xCE0),
+ INTC_VECT(GPIO4_5, 0xD00),
+ INTC_VECT(STIF0, 0xD20),
+ INTC_VECT(STIF1, 0xD40),
+ INTC_VECT(ADMAC, 0xDA0),
+ INTC_VECT(HIF, 0xDC0),
+ INTC_VECT(FLCTL, 0xDE0),
+ INTC_VECT(ADC, 0xE00),
+ INTC_VECT(MTU2, 0xE20),
+ INTC_VECT(RSPI, 0xE40),
+ INTC_VECT(QSPI, 0xE60),
+ INTC_VECT(HSCIF, 0xFC0),
+ INTC_VECT(VEU3F_VE3, 0xF40),
+};
+
+static struct intc_group groups[] __initdata = {
+ /* Common */
+ INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2),
+ INTC_GROUP(SPI, HSPI, RSPI, QSPI),
+ INTC_GROUP(SSI, SSI0, SSI1, SSI2, SSI3),
+
+ /* Mask group */
+ INTC_GROUP(STIF_M, STIF0, STIF1), /* 22 */
+ INTC_GROUP(GPIO_M, GPIO0_3, GPIO4_5), /* 21 */
+ INTC_GROUP(HPBDMAC_M, HPBDMAC0_3, HPBDMAC4_10, HPBDMAC11_18,
+ HPBDMAC19_22, HPBDMAC23_25_27_28), /* 19 */
+ INTC_GROUP(LBSCDMAC_M, LBSCDMAC0, LBSCDMAC1, LBSCDMAC2), /* 18 */
+ INTC_GROUP(RCAN_M, RCAN0, RCAN1, IEBUS), /* 17 */
+ INTC_GROUP(SRC_M, SRC0, SRC1), /* 16 */
+ INTC_GROUP(SCIF_M, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5,
+ HSCIF), /* 14 */
+ INTC_GROUP(LCDC_M, LCDC, MIMLB), /* 13 */
+ INTC_GROUP(_2DG_M, _2DG, RGPVG), /* 12 */
+ INTC_GROUP(VIN_M, VIN0, VIN1), /* 10 */
+ INTC_GROUP(TMU_3_M, TMU30, TMU40, TMU50, TMU51,
+ TMU60, TMU60, TMU70, TMU80), /* 2 */
+ INTC_GROUP(TMU_0_M, TMU00, TMU10, TMU20, TMU21), /* 1 */
+
+ /* Priority group*/
+ INTC_GROUP(RCAN_P, RCAN0, RCAN1), /* INT2PRI5 */
+ INTC_GROUP(LBSCDMAC_P, LBSCDMAC0, LBSCDMAC1), /* INT2PRI5 */
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xFF804040, 0xFF804044, 32, /* INT2MSKRG / INT2MSKCR */
+ { 0,
+ VEU3F_VE3,
+ SDHI, /* SDHI 0-2 */
+ ADMAC,
+ FLCTL,
+ RESET_WDT,
+ HIF,
+ ADC,
+ MTU2,
+ STIF_M, /* STIF 0,1 */
+ GPIO_M, /* GPIO 0-5*/
+ GETHER,
+ HPBDMAC_M, /* HPBDMAC 0_3 - 23_25_27_28 */
+ LBSCDMAC_M, /* LBSCDMAC 0 - 2 */
+ RCAN_M, /* RCAN, IEBUS */
+ SRC_M, /* SRC 0,1 */
+ LBSCATA,
+ SCIF_M, /* SCIF 0-5, HSCIF */
+ LCDC_M, /* LCDC, MIMLB */
+ _2DG_M, /* 2DG, RGPVG */
+ SPI, /* HSPI, RSPI, QSPI */
+ VIN_M, /* VIN0, 1 */
+ SSI, /* SSI 0-3 */
+ USB,
+ SHDMAC,
+ HUDI,
+ MMC,
+ RTC,
+ I2C0, /* I2C */ /* I2C 0, 1*/
+ TMU_3_M, /* TMU30 - TMU80 */
+ TMU_0_M, /* TMU00 - TMU21 */
+ DU } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xFF804000, 0, 32, 8, /* INT2PRI0 */
+ { DU, TMU00, TMU10, TMU20 } },
+ { 0xFF804004, 0, 32, 8, /* INT2PRI1 */
+ { TMU30, TMU60, RTC, SDHI } },
+ { 0xFF804008, 0, 32, 8, /* INT2PRI2 */
+ { HUDI, SHDMAC, USB, SSI } },
+ { 0xFF80400C, 0, 32, 8, /* INT2PRI3 */
+ { VIN0, SPI, _2DG, LBSCATA } },
+ { 0xFF804010, 0, 32, 8, /* INT2PRI4 */
+ { SCIF0, SCIF3, HSCIF, LCDC } },
+ { 0xFF804014, 0, 32, 8, /* INT2PRI5 */
+ { RCAN_P, LBSCDMAC_P, LBSCDMAC2, MMC } },
+ { 0xFF804018, 0, 32, 8, /* INT2PRI6 */
+ { HPBDMAC0_3, HPBDMAC4_10, HPBDMAC11_18, HPBDMAC19_22 } },
+ { 0xFF80401C, 0, 32, 8, /* INT2PRI7 */
+ { HPBDMAC23_25_27_28, I2C0, SRC0, SRC1 } },
+ { 0xFF804020, 0, 32, 8, /* INT2PRI8 */
+ { 0 /* ADIF */, VIN1, RESET_WDT, HIF } },
+ { 0xFF804024, 0, 32, 8, /* INT2PRI9 */
+ { ADMAC, FLCTL, GPIO0_3, GPIO4_5 } },
+ { 0xFF804028, 0, 32, 8, /* INT2PRI10 */
+ { STIF0, STIF1, VEU3F_VE3, GETHER } },
+ { 0xFF80402C, 0, 32, 8, /* INT2PRI11 */
+ { MTU2, RGPVG, MIMLB, IEBUS } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7734", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+/* Support for external interrupt pins in IRQ mode */
+
+static struct intc_vect irq3210_vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
+ INTC_VECT(IRQ2, 0x2C0), INTC_VECT(IRQ3, 0x300),
+};
+
+static struct intc_sense_reg irq3210_sense_registers[] __initdata = {
+ { 0xFF80201C, 32, 2, /* ICR1 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, } },
+};
+
+static struct intc_mask_reg irq3210_ack_registers[] __initdata = {
+ { 0xFF802024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, } },
+};
+
+static struct intc_mask_reg irq3210_mask_registers[] __initdata = {
+ { 0xFF802044, 0xFF802064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, } },
+};
+
+static struct intc_prio_reg irq3210_prio_registers[] __initdata = {
+ { 0xFF802010, 0, 32, 4, /* INTPRI */
+ { IRQ0, IRQ1, IRQ2, IRQ3, } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq3210, "sh7734-irq3210",
+ irq3210_vectors, NULL,
+ irq3210_mask_registers, irq3210_prio_registers,
+ irq3210_sense_registers, irq3210_ack_registers);
+
+/* External interrupt pins in IRL mode */
+
+static struct intc_vect vectors_irl3210[] __initdata = {
+ INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
+ INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
+ INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
+ INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
+ INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
+ INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
+ INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
+ INTC_VECT(IRL0_HHHL, 0x3c0),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irl3210, "sh7734-irl3210",
+ vectors_irl3210, NULL, mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xFF802000
+#define INTC_INTMSK0 0xFF802044
+#define INTC_INTMSK1 0xFF802048
+#define INTC_INTMSKCLR0 0xFF802064
+#define INTC_INTMSKCLR1 0xFF802068
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ3-0 */
+ __raw_writel(0xF0000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 */
+ __raw_writel(0x80000000, INTC_INTMSK1);
+
+ /* select IRL mode for IRL3-0 */
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00800000, INTC_ICR0);
+
+ /* disable holding function, ie enable "SH-4 Mode (LVLMODE)" */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ3210:
+ /* select IRQ mode for IRL3-0 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq3210);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xf0000000, INTC_INTMSKCLR0);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ __raw_writel(0x80000000, INTC_INTMSKCLR0);
+ register_intc_controller(&intc_desc_irl3210);
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
new file mode 100644
index 00000000000..7b24ec4b409
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -0,0 +1,1247 @@
+/*
+ * SH7757 Setup
+ *
+ * Copyright (C) 2009, 2011 Renesas Solutions Corp.
+ *
+ * based on setup-sh7785.c : Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_intc.h>
+#include <linux/usb/ohci_pdriver.h>
+#include <cpu/dma-register.h>
+#include <cpu/sh7757.h>
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xfe4b0000, 0x100), /* SCIF2 */
+ DEFINE_RES_IRQ(evt2irq(0x700)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xfe4c0000, 0x100), /* SCIF3 */
+ DEFINE_RES_IRQ(evt2irq(0xb80)),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif4_resources[] = {
+ DEFINE_RES_MEM(0xfe4d0000, 0x100), /* SCIF4 */
+ DEFINE_RES_IRQ(evt2irq(0xf00)),
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif4_resources,
+ .num_resources = ARRAY_SIZE(scif4_resources),
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 3,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xfe430000, 0x20),
+ DEFINE_RES_IRQ(evt2irq(0x580)),
+ DEFINE_RES_IRQ(evt2irq(0x5a0)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct resource spi0_resources[] = {
+ [0] = {
+ .start = 0xfe002000,
+ .end = 0xfe0020ff,
+ .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
+ },
+ [1] = {
+ .start = evt2irq(0xcc0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* DMA */
+static const struct sh_dmae_slave_config sh7757_dmae0_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SDHI_TX,
+ .addr = 0x1fe50030,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc5,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SDHI_RX,
+ .addr = 0x1fe50030,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc6,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_MMCIF_TX,
+ .addr = 0x1fcb0034,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xd3,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_MMCIF_RX,
+ .addr = 0x1fcb0034,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xd7,
+ },
+};
+
+static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SCIF2_TX,
+ .addr = 0x1f4b000c,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF2_RX,
+ .addr = 0x1f4b0014,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF3_TX,
+ .addr = 0x1f4c000c,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF3_RX,
+ .addr = 0x1f4c0014,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF4_TX,
+ .addr = 0x1f4d000c,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x41,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF4_RX,
+ .addr = 0x1f4d0014,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x42,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RSPI_TX,
+ .addr = 0xfe480004,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc1,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RSPI_RX,
+ .addr = 0xfe480004,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc2,
+ },
+};
+
+static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_RIIC0_TX,
+ .addr = 0x1e500012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC0_RX,
+ .addr = 0x1e500013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC1_TX,
+ .addr = 0x1e510012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC1_RX,
+ .addr = 0x1e510013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC2_TX,
+ .addr = 0x1e520012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xa1,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC2_RX,
+ .addr = 0x1e520013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xa2,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC3_TX,
+ .addr = 0x1e530012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xa9,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC3_RX,
+ .addr = 0x1e530013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xaf,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC4_TX,
+ .addr = 0x1e540012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xc5,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC4_RX,
+ .addr = 0x1e540013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xc6,
+ },
+};
+
+static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_RIIC5_TX,
+ .addr = 0x1e550012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC5_RX,
+ .addr = 0x1e550013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC6_TX,
+ .addr = 0x1e560012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC6_RX,
+ .addr = 0x1e560013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC7_TX,
+ .addr = 0x1e570012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x41,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC7_RX,
+ .addr = 0x1e570013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x42,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC8_TX,
+ .addr = 0x1e580012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x45,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC8_RX,
+ .addr = 0x1e580013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x46,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC9_TX,
+ .addr = 0x1e590012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x51,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC9_RX,
+ .addr = 0x1e590013,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x52,
+ },
+};
+
+static const struct sh_dmae_channel sh7757_dmae_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .slave = sh7757_dmae0_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae0_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma1_platform_data = {
+ .slave = sh7757_dmae1_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae1_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma2_platform_data = {
+ .slave = sh7757_dmae2_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae2_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma3_platform_data = {
+ .slave = sh7757_dmae3_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae3_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+/* channel 0 to 5 */
+static struct resource sh7757_dmae0_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff608020,
+ .end = 0xff60808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff609000,
+ .end = 0xff60900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = evt2irq(0x640),
+ .end = evt2irq(0x640),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+/* channel 6 to 11 */
+static struct resource sh7757_dmae1_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff618020,
+ .end = 0xff61808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff619000,
+ .end = 0xff61900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = evt2irq(0x640),
+ .end = evt2irq(0x640),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 4 */
+ .start = evt2irq(0x7c0),
+ .end = evt2irq(0x7c0),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 5 */
+ .start = evt2irq(0x7c0),
+ .end = evt2irq(0x7c0),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 6 */
+ .start = evt2irq(0xd00),
+ .end = evt2irq(0xd00),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 7 */
+ .start = evt2irq(0xd00),
+ .end = evt2irq(0xd00),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 8 */
+ .start = evt2irq(0xd00),
+ .end = evt2irq(0xd00),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 9 */
+ .start = evt2irq(0xd00),
+ .end = evt2irq(0xd00),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 10 */
+ .start = evt2irq(0xd00),
+ .end = evt2irq(0xd00),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 11 */
+ .start = evt2irq(0xd00),
+ .end = evt2irq(0xd00),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+/* channel 12 to 17 */
+static struct resource sh7757_dmae2_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff708020,
+ .end = 0xff70808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff709000,
+ .end = 0xff70900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = evt2irq(0x2a60),
+ .end = evt2irq(0x2a60),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 12 to 16 */
+ .start = evt2irq(0x2400),
+ .end = evt2irq(0x2480),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channel 17 */
+ .start = evt2irq(0x24e0),
+ .end = evt2irq(0x24e0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* channel 18 to 23 */
+static struct resource sh7757_dmae3_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff718020,
+ .end = 0xff71808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff719000,
+ .end = 0xff71900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = evt2irq(0x2a80),
+ .end = evt2irq(0x2a80),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 18 to 22 */
+ .start = evt2irq(0x2500),
+ .end = evt2irq(0x2580),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channel 23 */
+ .start = evt2irq(0x2600),
+ .end = evt2irq(0x2600),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = sh7757_dmae0_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
+static struct platform_device dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = sh7757_dmae1_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae1_resources),
+ .dev = {
+ .platform_data = &dma1_platform_data,
+ },
+};
+
+static struct platform_device dma2_device = {
+ .name = "sh-dma-engine",
+ .id = 2,
+ .resource = sh7757_dmae2_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae2_resources),
+ .dev = {
+ .platform_data = &dma2_platform_data,
+ },
+};
+
+static struct platform_device dma3_device = {
+ .name = "sh-dma-engine",
+ .id = 3,
+ .resource = sh7757_dmae3_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae3_resources),
+ .dev = {
+ .platform_data = &dma3_platform_data,
+ },
+};
+
+static struct platform_device spi0_device = {
+ .name = "sh_spi",
+ .id = 0,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(spi0_resources),
+ .resource = spi0_resources,
+};
+
+static struct resource spi1_resources[] = {
+ {
+ .start = 0xffd8ee70,
+ .end = 0xffd8eeff,
+ .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
+ },
+ {
+ .start = evt2irq(0x8c0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device spi1_device = {
+ .name = "sh_spi",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(spi1_resources),
+ .resource = spi1_resources,
+};
+
+static struct resource rspi_resources[] = {
+ {
+ .start = 0xfe480000,
+ .end = 0xfe4800ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = evt2irq(0x1d80),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rspi_device = {
+ .name = "rspi",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(rspi_resources),
+ .resource = rspi_resources,
+};
+
+static struct resource usb_ehci_resources[] = {
+ [0] = {
+ .start = 0xfe4f1000,
+ .end = 0xfe4f10ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0x920),
+ .end = evt2irq(0x920),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usb_ehci_device = {
+ .name = "sh_ehci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(usb_ehci_resources),
+ .resource = usb_ehci_resources,
+};
+
+static struct resource usb_ohci_resources[] = {
+ [0] = {
+ .start = 0xfe4f1800,
+ .end = 0xfe4f18ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0x920),
+ .end = evt2irq(0x920),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct usb_ohci_pdata usb_ohci_pdata;
+
+static struct platform_device usb_ohci_device = {
+ .name = "ohci-platform",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &usb_ohci_pdata,
+ },
+ .num_resources = ARRAY_SIZE(usb_ohci_resources),
+ .resource = usb_ohci_resources,
+};
+
+static struct platform_device *sh7757_devices[] __initdata = {
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &tmu0_device,
+ &dma0_device,
+ &dma1_device,
+ &dma2_device,
+ &dma3_device,
+ &spi0_device,
+ &spi1_device,
+ &rspi_device,
+ &usb_ehci_device,
+ &usb_ohci_device,
+};
+
+static int __init sh7757_devices_setup(void)
+{
+ return platform_add_devices(sh7757_devices,
+ ARRAY_SIZE(sh7757_devices));
+}
+arch_initcall(sh7757_devices_setup);
+
+static struct platform_device *sh7757_early_devices[] __initdata = {
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &tmu0_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7757_early_devices,
+ ARRAY_SIZE(sh7757_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+
+ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
+
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+
+ SDHI, DVC,
+ IRQ8, IRQ9, IRQ11, IRQ10, IRQ12, IRQ13, IRQ14, IRQ15,
+ TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5,
+ HUDI,
+ ARC4,
+ DMAC0_5, DMAC6_7, DMAC8_11,
+ SCIF0, SCIF1, SCIF2, SCIF3, SCIF4,
+ USB0, USB1,
+ JMC,
+ SPI0, SPI1,
+ TMR01, TMR23, TMR45,
+ FRT,
+ LPC, LPC5, LPC6, LPC7, LPC8,
+ PECI0, PECI1, PECI2, PECI3, PECI4, PECI5,
+ ETHERC,
+ ADC0, ADC1,
+ SIM,
+ IIC0_0, IIC0_1, IIC0_2, IIC0_3,
+ IIC1_0, IIC1_1, IIC1_2, IIC1_3,
+ IIC2_0, IIC2_1, IIC2_2, IIC2_3,
+ IIC3_0, IIC3_1, IIC3_2, IIC3_3,
+ IIC4_0, IIC4_1, IIC4_2, IIC4_3,
+ IIC5_0, IIC5_1, IIC5_2, IIC5_3,
+ IIC6_0, IIC6_1, IIC6_2, IIC6_3,
+ IIC7_0, IIC7_1, IIC7_2, IIC7_3,
+ IIC8_0, IIC8_1, IIC8_2, IIC8_3,
+ IIC9_0, IIC9_1, IIC9_2, IIC9_3,
+ ONFICTL,
+ MMC1, MMC2,
+ ECCU,
+ PCIC,
+ G200,
+ RSPI,
+ SGPIO,
+ DMINT12, DMINT13, DMINT14, DMINT15, DMINT16, DMINT17, DMINT18, DMINT19,
+ DMINT20, DMINT21, DMINT22, DMINT23,
+ DDRECC,
+ TSIP,
+ PCIE_BRIDGE,
+ WDT0B, WDT1B, WDT2B, WDT3B, WDT4B, WDT5B, WDT6B, WDT7B, WDT8B,
+ GETHER0, GETHER1, GETHER2,
+ PBIA, PBIB, PBIC,
+ DMAE2, DMAE3,
+ SERMUX2, SERMUX3,
+
+ /* interrupt groups */
+
+ TMU012, TMU345,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(SDHI, 0x480), INTC_VECT(SDHI, 0x04a0),
+ INTC_VECT(SDHI, 0x4c0),
+ INTC_VECT(DVC, 0x4e0),
+ INTC_VECT(IRQ8, 0x500), INTC_VECT(IRQ9, 0x520),
+ INTC_VECT(IRQ10, 0x540),
+ INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
+ INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
+ INTC_VECT(HUDI, 0x600),
+ INTC_VECT(ARC4, 0x620),
+ INTC_VECT(DMAC0_5, 0x640), INTC_VECT(DMAC0_5, 0x660),
+ INTC_VECT(DMAC0_5, 0x680), INTC_VECT(DMAC0_5, 0x6a0),
+ INTC_VECT(DMAC0_5, 0x6c0),
+ INTC_VECT(IRQ11, 0x6e0),
+ INTC_VECT(SCIF2, 0x700), INTC_VECT(SCIF2, 0x720),
+ INTC_VECT(SCIF2, 0x740), INTC_VECT(SCIF2, 0x760),
+ INTC_VECT(DMAC0_5, 0x780), INTC_VECT(DMAC0_5, 0x7a0),
+ INTC_VECT(DMAC6_7, 0x7c0), INTC_VECT(DMAC6_7, 0x7e0),
+ INTC_VECT(USB0, 0x840),
+ INTC_VECT(IRQ12, 0x880),
+ INTC_VECT(JMC, 0x8a0),
+ INTC_VECT(SPI1, 0x8c0),
+ INTC_VECT(IRQ13, 0x8e0), INTC_VECT(IRQ14, 0x900),
+ INTC_VECT(USB1, 0x920),
+ INTC_VECT(TMR01, 0xa00), INTC_VECT(TMR23, 0xa20),
+ INTC_VECT(TMR45, 0xa40),
+ INTC_VECT(FRT, 0xa80),
+ INTC_VECT(LPC, 0xaa0), INTC_VECT(LPC, 0xac0),
+ INTC_VECT(LPC, 0xae0), INTC_VECT(LPC, 0xb00),
+ INTC_VECT(LPC, 0xb20),
+ INTC_VECT(SCIF0, 0xb40), INTC_VECT(SCIF1, 0xb60),
+ INTC_VECT(SCIF3, 0xb80), INTC_VECT(SCIF3, 0xba0),
+ INTC_VECT(SCIF3, 0xbc0), INTC_VECT(SCIF3, 0xbe0),
+ INTC_VECT(PECI0, 0xc00), INTC_VECT(PECI1, 0xc20),
+ INTC_VECT(PECI2, 0xc40),
+ INTC_VECT(IRQ15, 0xc60),
+ INTC_VECT(ETHERC, 0xc80), INTC_VECT(ETHERC, 0xca0),
+ INTC_VECT(SPI0, 0xcc0),
+ INTC_VECT(ADC1, 0xce0),
+ INTC_VECT(DMAC8_11, 0xd00), INTC_VECT(DMAC8_11, 0xd20),
+ INTC_VECT(DMAC8_11, 0xd40), INTC_VECT(DMAC8_11, 0xd60),
+ INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0),
+ INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0),
+ INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
+ INTC_VECT(TMU5, 0xe40),
+ INTC_VECT(ADC0, 0xe60),
+ INTC_VECT(SCIF4, 0xf00), INTC_VECT(SCIF4, 0xf20),
+ INTC_VECT(SCIF4, 0xf40), INTC_VECT(SCIF4, 0xf60),
+ INTC_VECT(IIC0_0, 0x1400), INTC_VECT(IIC0_1, 0x1420),
+ INTC_VECT(IIC0_2, 0x1440), INTC_VECT(IIC0_3, 0x1460),
+ INTC_VECT(IIC1_0, 0x1480), INTC_VECT(IIC1_1, 0x14e0),
+ INTC_VECT(IIC1_2, 0x1500), INTC_VECT(IIC1_3, 0x1520),
+ INTC_VECT(IIC2_0, 0x1540), INTC_VECT(IIC2_1, 0x1560),
+ INTC_VECT(IIC2_2, 0x1580), INTC_VECT(IIC2_3, 0x1600),
+ INTC_VECT(IIC3_0, 0x1620), INTC_VECT(IIC3_1, 0x1640),
+ INTC_VECT(IIC3_2, 0x16e0), INTC_VECT(IIC3_3, 0x1700),
+ INTC_VECT(IIC4_0, 0x17c0), INTC_VECT(IIC4_1, 0x1800),
+ INTC_VECT(IIC4_2, 0x1820), INTC_VECT(IIC4_3, 0x1840),
+ INTC_VECT(IIC5_0, 0x1860), INTC_VECT(IIC5_1, 0x1880),
+ INTC_VECT(IIC5_2, 0x18a0), INTC_VECT(IIC5_3, 0x18c0),
+ INTC_VECT(IIC6_0, 0x18e0), INTC_VECT(IIC6_1, 0x1900),
+ INTC_VECT(IIC6_2, 0x1920),
+ INTC_VECT(ONFICTL, 0x1960),
+ INTC_VECT(IIC6_3, 0x1980),
+ INTC_VECT(IIC7_0, 0x19a0), INTC_VECT(IIC7_1, 0x1a00),
+ INTC_VECT(IIC7_2, 0x1a20), INTC_VECT(IIC7_3, 0x1a40),
+ INTC_VECT(IIC8_0, 0x1a60), INTC_VECT(IIC8_1, 0x1a80),
+ INTC_VECT(IIC8_2, 0x1aa0), INTC_VECT(IIC8_3, 0x1b40),
+ INTC_VECT(IIC9_0, 0x1b60), INTC_VECT(IIC9_1, 0x1b80),
+ INTC_VECT(IIC9_2, 0x1c00), INTC_VECT(IIC9_3, 0x1c20),
+ INTC_VECT(MMC1, 0x1c60), INTC_VECT(MMC2, 0x1c80),
+ INTC_VECT(ECCU, 0x1cc0),
+ INTC_VECT(PCIC, 0x1ce0),
+ INTC_VECT(G200, 0x1d00),
+ INTC_VECT(RSPI, 0x1d80), INTC_VECT(RSPI, 0x1da0),
+ INTC_VECT(RSPI, 0x1dc0), INTC_VECT(RSPI, 0x1de0),
+ INTC_VECT(PECI3, 0x1ec0), INTC_VECT(PECI4, 0x1ee0),
+ INTC_VECT(PECI5, 0x1f00),
+ INTC_VECT(SGPIO, 0x1f80), INTC_VECT(SGPIO, 0x1fa0),
+ INTC_VECT(SGPIO, 0x1fc0),
+ INTC_VECT(DMINT12, 0x2400), INTC_VECT(DMINT13, 0x2420),
+ INTC_VECT(DMINT14, 0x2440), INTC_VECT(DMINT15, 0x2460),
+ INTC_VECT(DMINT16, 0x2480), INTC_VECT(DMINT17, 0x24e0),
+ INTC_VECT(DMINT18, 0x2500), INTC_VECT(DMINT19, 0x2520),
+ INTC_VECT(DMINT20, 0x2540), INTC_VECT(DMINT21, 0x2560),
+ INTC_VECT(DMINT22, 0x2580), INTC_VECT(DMINT23, 0x2600),
+ INTC_VECT(DDRECC, 0x2620),
+ INTC_VECT(TSIP, 0x2640),
+ INTC_VECT(PCIE_BRIDGE, 0x27c0),
+ INTC_VECT(WDT0B, 0x2800), INTC_VECT(WDT1B, 0x2820),
+ INTC_VECT(WDT2B, 0x2840), INTC_VECT(WDT3B, 0x2860),
+ INTC_VECT(WDT4B, 0x2880), INTC_VECT(WDT5B, 0x28a0),
+ INTC_VECT(WDT6B, 0x28c0), INTC_VECT(WDT7B, 0x28e0),
+ INTC_VECT(WDT8B, 0x2900),
+ INTC_VECT(GETHER0, 0x2960), INTC_VECT(GETHER1, 0x2980),
+ INTC_VECT(GETHER2, 0x29a0),
+ INTC_VECT(PBIA, 0x2a00), INTC_VECT(PBIB, 0x2a20),
+ INTC_VECT(PBIC, 0x2a40),
+ INTC_VECT(DMAE2, 0x2a60), INTC_VECT(DMAE3, 0x2a80),
+ INTC_VECT(SERMUX2, 0x2aa0), INTC_VECT(SERMUX3, 0x2b40),
+ INTC_VECT(LPC5, 0x2b60), INTC_VECT(LPC6, 0x2b80),
+ INTC_VECT(LPC7, 0x2c00), INTC_VECT(LPC8, 0x2c20),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
+ INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
+
+ { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */
+ { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, DMAC8_11, 0, PECI0, LPC, FRT, 0, TMR45,
+ TMR23, TMR01, 0, 0, 0, 0, 0, DMAC0_5,
+ HUDI, 0, 0, SCIF3, SCIF2, SDHI, TMU345, TMU012
+ } },
+
+ { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */
+ { IRQ15, IRQ14, IRQ13, IRQ12, IRQ11, IRQ10, SCIF4, ETHERC,
+ IRQ9, IRQ8, SCIF1, SCIF0, USB0, 0, 0, USB1,
+ ADC1, 0, DMAC6_7, ADC0, SPI0, SIM, PECI2, PECI1,
+ ARC4, 0, SPI1, JMC, 0, 0, 0, DVC
+ } },
+
+ { 0xffd10038, 0xffd1003c, 32, /* INT2MSKR2 / INT2MSKCR2 */
+ { IIC4_1, IIC4_2, IIC5_0, ONFICTL, 0, 0, SGPIO, 0,
+ 0, G200, 0, IIC9_2, IIC8_2, IIC8_1, IIC8_0, IIC7_3,
+ IIC7_2, IIC7_1, IIC6_3, IIC0_0, IIC0_1, IIC0_2, IIC0_3, IIC3_1,
+ IIC2_3, 0, IIC2_1, IIC9_1, IIC3_3, IIC1_0, 0, IIC2_2
+ } },
+
+ { 0xffd100d0, 0xffd100d4, 32, /* INT2MSKR3 / INT2MSKCR3 */
+ { MMC1, IIC6_1, IIC6_0, IIC5_1, IIC3_2, IIC2_0, PECI5, MMC2,
+ IIC1_3, IIC1_2, IIC9_0, IIC8_3, IIC4_3, IIC7_0, 0, IIC6_2,
+ PCIC, 0, IIC4_0, 0, ECCU, RSPI, 0, IIC9_3,
+ IIC3_0, 0, IIC5_3, IIC5_2, 0, 0, 0, IIC1_1
+ } },
+
+ { 0xffd20038, 0xffd2003c, 32, /* INT2MSKR4 / INT2MSKCR4 */
+ { WDT0B, WDT1B, WDT3B, GETHER0, 0, 0, 0, 0,
+ 0, 0, 0, LPC7, SERMUX2, DMAE3, DMAE2, PBIC,
+ PBIB, PBIA, GETHER1, DMINT12, DMINT13, DMINT14, DMINT15, TSIP,
+ DMINT23, 0, DMINT21, LPC6, 0, DMINT16, 0, DMINT22
+ } },
+
+ { 0xffd200d0, 0xffd200d4, 32, /* INT2MSKR5 / INT2MSKCR5 */
+ { 0, WDT8B, WDT7B, WDT4B, 0, DMINT20, 0, 0,
+ DMINT19, DMINT18, LPC5, SERMUX3, WDT2B, GETHER2, 0, 0,
+ 0, 0, PCIE_BRIDGE, 0, 0, 0, 0, LPC8,
+ DDRECC, 0, WDT6B, WDT5B, 0, 0, 0, DMINT17
+ } },
+};
+
+#define INTPRI 0xffd00010
+#define INT2PRI0 0xffd40000
+#define INT2PRI1 0xffd40004
+#define INT2PRI2 0xffd40008
+#define INT2PRI3 0xffd4000c
+#define INT2PRI4 0xffd40010
+#define INT2PRI5 0xffd40014
+#define INT2PRI6 0xffd40018
+#define INT2PRI7 0xffd4001c
+#define INT2PRI8 0xffd400a0
+#define INT2PRI9 0xffd400a4
+#define INT2PRI10 0xffd400a8
+#define INT2PRI11 0xffd400ac
+#define INT2PRI12 0xffd400b0
+#define INT2PRI13 0xffd400b4
+#define INT2PRI14 0xffd400b8
+#define INT2PRI15 0xffd400bc
+#define INT2PRI16 0xffd10000
+#define INT2PRI17 0xffd10004
+#define INT2PRI18 0xffd10008
+#define INT2PRI19 0xffd1000c
+#define INT2PRI20 0xffd10010
+#define INT2PRI21 0xffd10014
+#define INT2PRI22 0xffd10018
+#define INT2PRI23 0xffd1001c
+#define INT2PRI24 0xffd100a0
+#define INT2PRI25 0xffd100a4
+#define INT2PRI26 0xffd100a8
+#define INT2PRI27 0xffd100ac
+#define INT2PRI28 0xffd100b0
+#define INT2PRI29 0xffd100b4
+#define INT2PRI30 0xffd100b8
+#define INT2PRI31 0xffd100bc
+#define INT2PRI32 0xffd20000
+#define INT2PRI33 0xffd20004
+#define INT2PRI34 0xffd20008
+#define INT2PRI35 0xffd2000c
+#define INT2PRI36 0xffd20010
+#define INT2PRI37 0xffd20014
+#define INT2PRI38 0xffd20018
+#define INT2PRI39 0xffd2001c
+#define INT2PRI40 0xffd200a0
+#define INT2PRI41 0xffd200a4
+#define INT2PRI42 0xffd200a8
+#define INT2PRI43 0xffd200ac
+#define INT2PRI44 0xffd200b0
+#define INT2PRI45 0xffd200b4
+#define INT2PRI46 0xffd200b8
+#define INT2PRI47 0xffd200bc
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { INTPRI, 0, 32, 4, { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+
+ { INT2PRI0, 0, 32, 8, { TMU0, TMU1, TMU2, TMU2_TICPI } },
+ { INT2PRI1, 0, 32, 8, { TMU3, TMU4, TMU5, SDHI } },
+ { INT2PRI2, 0, 32, 8, { SCIF2, SCIF3, 0, IRQ8 } },
+ { INT2PRI3, 0, 32, 8, { HUDI, DMAC0_5, ADC0, IRQ9 } },
+ { INT2PRI4, 0, 32, 8, { IRQ10, 0, TMR01, TMR23 } },
+ { INT2PRI5, 0, 32, 8, { TMR45, 0, FRT, LPC } },
+ { INT2PRI6, 0, 32, 8, { PECI0, ETHERC, DMAC8_11, 0 } },
+ { INT2PRI7, 0, 32, 8, { SCIF4, 0, IRQ11, IRQ12 } },
+ { INT2PRI8, 0, 32, 8, { 0, 0, 0, DVC } },
+ { INT2PRI9, 0, 32, 8, { ARC4, 0, SPI1, JMC } },
+ { INT2PRI10, 0, 32, 8, { SPI0, SIM, PECI2, PECI1 } },
+ { INT2PRI11, 0, 32, 8, { ADC1, IRQ13, DMAC6_7, IRQ14 } },
+ { INT2PRI12, 0, 32, 8, { USB0, 0, IRQ15, USB1 } },
+ { INT2PRI13, 0, 32, 8, { 0, 0, SCIF1, SCIF0 } },
+
+ { INT2PRI16, 0, 32, 8, { IIC2_2, 0, 0, 0 } },
+ { INT2PRI17, 0, 32, 8, { 0, 0, 0, IIC1_0 } },
+ { INT2PRI18, 0, 32, 8, { IIC3_3, IIC9_1, IIC2_1, IIC1_2 } },
+ { INT2PRI19, 0, 32, 8, { IIC2_3, IIC3_1, 0, IIC1_3 } },
+ { INT2PRI20, 0, 32, 8, { IIC2_0, IIC6_3, IIC7_1, IIC7_2 } },
+ { INT2PRI21, 0, 32, 8, { IIC7_3, IIC8_0, IIC8_1, IIC8_2 } },
+ { INT2PRI22, 0, 32, 8, { IIC9_2, MMC2, G200, 0 } },
+ { INT2PRI23, 0, 32, 8, { PECI5, SGPIO, IIC3_2, IIC5_1 } },
+ { INT2PRI24, 0, 32, 8, { PECI4, PECI3, 0, IIC1_1 } },
+ { INT2PRI25, 0, 32, 8, { IIC3_0, 0, IIC5_3, IIC5_2 } },
+ { INT2PRI26, 0, 32, 8, { ECCU, RSPI, 0, IIC9_3 } },
+ { INT2PRI27, 0, 32, 8, { PCIC, IIC6_0, IIC4_0, IIC6_1 } },
+ { INT2PRI28, 0, 32, 8, { IIC4_3, IIC7_0, MMC1, IIC6_2 } },
+ { INT2PRI29, 0, 32, 8, { 0, 0, IIC9_0, IIC8_3 } },
+ { INT2PRI30, 0, 32, 8, { IIC4_1, IIC4_2, IIC5_0, ONFICTL } },
+ { INT2PRI31, 0, 32, 8, { IIC0_0, IIC0_1, IIC0_2, IIC0_3 } },
+ { INT2PRI32, 0, 32, 8, { DMINT22, 0, 0, 0 } },
+ { INT2PRI33, 0, 32, 8, { 0, 0, 0, DMINT16 } },
+ { INT2PRI34, 0, 32, 8, { 0, LPC6, DMINT21, DMINT18 } },
+ { INT2PRI35, 0, 32, 8, { DMINT23, TSIP, 0, DMINT19 } },
+ { INT2PRI36, 0, 32, 8, { DMINT20, GETHER1, PBIA, PBIB } },
+ { INT2PRI37, 0, 32, 8, { PBIC, DMAE2, DMAE3, SERMUX2 } },
+ { INT2PRI38, 0, 32, 8, { LPC7, 0, 0, 0 } },
+ { INT2PRI39, 0, 32, 8, { 0, 0, 0, WDT4B } },
+ { INT2PRI40, 0, 32, 8, { 0, 0, 0, DMINT17 } },
+ { INT2PRI41, 0, 32, 8, { DDRECC, 0, WDT6B, WDT5B } },
+ { INT2PRI42, 0, 32, 8, { 0, 0, 0, LPC8 } },
+ { INT2PRI43, 0, 32, 8, { 0, WDT7B, PCIE_BRIDGE, WDT8B } },
+ { INT2PRI44, 0, 32, 8, { WDT2B, GETHER2, 0, 0 } },
+ { INT2PRI45, 0, 32, 8, { 0, 0, LPC5, SERMUX3 } },
+ { INT2PRI46, 0, 32, 8, { WDT0B, WDT1B, WDT3B, GETHER0 } },
+ { INT2PRI47, 0, 32, 8, { DMINT12, DMINT13, DMINT14, DMINT15 } },
+};
+
+static struct intc_sense_reg sense_registers_irq8to15[] __initdata = {
+ { 0xffd100f8, 32, 2, /* ICR2 */ { IRQ15, IRQ14, IRQ13, IRQ12,
+ IRQ11, IRQ10, IRQ9, IRQ8 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups,
+ mask_registers, prio_registers,
+ sense_registers_irq8to15);
+
+/* Support for external interrupt pins in IRQ mode */
+static struct intc_vect vectors_irq0123[] __initdata = {
+ INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
+ INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
+};
+
+static struct intc_vect vectors_irq4567[] __initdata = {
+ INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340),
+ INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
+};
+
+static struct intc_sense_reg sense_registers[] __initdata = {
+ { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xffd00024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7757-irq0123",
+ vectors_irq0123, NULL, mask_registers,
+ prio_registers, sense_registers, ack_registers);
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7757-irq4567",
+ vectors_irq4567, NULL, mask_registers,
+ prio_registers, sense_registers, ack_registers);
+
+/* External interrupt pins in IRL mode */
+static struct intc_vect vectors_irl0123[] __initdata = {
+ INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
+ INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
+ INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
+ INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
+ INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
+ INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
+ INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
+ INTC_VECT(IRL0_HHHL, 0x3c0),
+};
+
+static struct intc_vect vectors_irl4567[] __initdata = {
+ INTC_VECT(IRL4_LLLL, 0x200), INTC_VECT(IRL4_LLLH, 0x220),
+ INTC_VECT(IRL4_LLHL, 0x240), INTC_VECT(IRL4_LLHH, 0x260),
+ INTC_VECT(IRL4_LHLL, 0x280), INTC_VECT(IRL4_LHLH, 0x2a0),
+ INTC_VECT(IRL4_LHHL, 0x2c0), INTC_VECT(IRL4_LHHH, 0x2e0),
+ INTC_VECT(IRL4_HLLL, 0x300), INTC_VECT(IRL4_HLLH, 0x320),
+ INTC_VECT(IRL4_HLHL, 0x340), INTC_VECT(IRL4_HLHH, 0x360),
+ INTC_VECT(IRL4_HHLL, 0x380), INTC_VECT(IRL4_HHLH, 0x3a0),
+ INTC_VECT(IRL4_HHHL, 0x3c0),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7757-irl0123", vectors_irl0123,
+ NULL, mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7757-irl4567", vectors_irl4567,
+ NULL, mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xffd00000
+#define INTC_INTMSK0 0xffd00044
+#define INTC_INTMSK1 0xffd00048
+#define INTC_INTMSK2 0xffd40080
+#define INTC_INTMSKCLR1 0xffd00068
+#define INTC_INTMSKCLR2 0xffd40084
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ3-0 + IRQ7-4 */
+ __raw_writel(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
+
+ /* select IRL mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+
+ /* disable holding function, ie enable "SH-4 Mode" */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ7654:
+ /* select IRQ mode for IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq4567);
+ break;
+ case IRQ_MODE_IRQ3210:
+ /* select IRQ mode for IRL3-0 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq0123);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl4567);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl0123);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
index 07c988dc9de..5a47d670dde 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2006 Paul Mundt
* Copyright (C) 2007 Yoshihiro Shimoda
+ * Copyright (C) 2008, 2009 Nobuhiro Iwamatsu
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -11,8 +12,77 @@
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
#include <linux/io.h>
#include <linux/serial_sci.h>
+#include <linux/usb/ohci_pdriver.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffe00000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x700)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xffe08000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xb80)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xffe10000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xf00)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
static struct resource rtc_resources[] = {
[0] = {
@@ -21,18 +91,8 @@ static struct resource rtc_resources[] = {
.flags = IORESOURCE_IO,
},
[1] = {
- /* Period IRQ */
- .start = 21,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* Carry IRQ */
- .start = 22,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- /* Alarm IRQ */
- .start = 20,
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
@@ -44,30 +104,6 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
- }, {
- .mapbase = 0xffe08000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 77, 79, 78 },
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
- .dev = {
- .platform_data = sci_platform_data,
- },
-};
-
static struct resource usb_ohci_resources[] = {
[0] = {
.start = 0xffec8000,
@@ -75,19 +111,23 @@ static struct resource usb_ohci_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = 83,
- .end = 83,
+ .start = evt2irq(0xc60),
+ .end = evt2irq(0xc60),
.flags = IORESOURCE_IRQ,
},
};
static u64 usb_ohci_dma_mask = 0xffffffffUL;
+
+static struct usb_ohci_pdata usb_ohci_pdata;
+
static struct platform_device usb_ohci_device = {
- .name = "sh_ohci",
+ .name = "ohci-platform",
.id = -1,
.dev = {
.dma_mask = &usb_ohci_dma_mask,
.coherent_dma_mask = 0xffffffff,
+ .platform_data = &usb_ohci_pdata,
},
.num_resources = ARRAY_SIZE(usb_ohci_resources),
.resource = usb_ohci_resources,
@@ -100,8 +140,8 @@ static struct resource usbf_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = 84,
- .end = 84,
+ .start = evt2irq(0xc80),
+ .end = evt2irq(0xc80),
.flags = IORESOURCE_IRQ,
},
};
@@ -117,9 +157,55 @@ static struct platform_device usbf_device = {
.resource = usbf_resources,
};
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x580)),
+ DEFINE_RES_IRQ(evt2irq(0x5a0)),
+ DEFINE_RES_IRQ(evt2irq(0x5c0)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu1_resources[] = {
+ DEFINE_RES_MEM(0xffd88000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0xe00)),
+ DEFINE_RES_IRQ(evt2irq(0xe20)),
+ DEFINE_RES_IRQ(evt2irq(0xe40)),
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh-tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
static struct platform_device *sh7763_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &tmu0_device,
+ &tmu1_device,
&rtc_device,
- &sci_device,
&usb_ohci_device,
&usbf_device,
};
@@ -129,7 +215,21 @@ static int __init sh7763_devices_setup(void)
return platform_add_devices(sh7763_devices,
ARRAY_SIZE(sh7763_devices));
}
-__initcall(sh7763_devices_setup);
+arch_initcall(sh7763_devices_setup);
+
+static struct platform_device *sh7763_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &tmu0_device,
+ &tmu1_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7763_early_devices,
+ ARRAY_SIZE(sh7763_early_devices));
+}
enum {
UNUSED = 0,
@@ -142,99 +242,65 @@ enum {
IRL_HHLL, IRL_HHLH, IRL_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
- RTC_ATI, RTC_PRI, RTC_CUI,
- WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
- HUDI, LCDC,
- DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, DMAC0_DMAE,
- SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
- DMAC0_DMINT4, DMAC0_DMINT5,
- IIC0, IIC1,
- CMT,
- GEINT0, GEINT1, GEINT2,
- HAC,
- PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD,
- PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0,
- STIF0, STIF1,
- SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
- SIOF0, SIOF1, SIOF2,
- USBH, USBFI0, USBFI1,
- TPU, PCC,
- MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY,
- SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND,
+ RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
+ HUDI, LCDC, DMAC, SCIF0, IIC0, IIC1, CMT, GETHER, HAC,
+ PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5,
+ STIF0, STIF1, SCIF1, SIOF0, SIOF1, SIOF2,
+ USBH, USBF, TPU, PCC, MMCIF, SIM,
TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3,
- SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
- GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3,
+ SCIF2, GPIO,
/* interrupt groups */
- TMU012, TMU345, RTC, DMAC, SCIF0, GETHER, PCIC5,
- SCIF1, USBF, MMCIF, SIM, SCIF2, GPIO,
+ TMU012, TMU345,
};
static struct intc_vect vectors[] __initdata = {
- INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
- INTC_VECT(RTC_CUI, 0x4c0),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580),
INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0),
INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600),
INTC_VECT(LCDC, 0x620),
- INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
- INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(DMAC0_DMINT3, 0x6a0),
- INTC_VECT(DMAC0_DMAE, 0x6c0),
- INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
- INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
- INTC_VECT(DMAC0_DMINT4, 0x780), INTC_VECT(DMAC0_DMINT5, 0x7a0),
+ INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
+ INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
+ INTC_VECT(DMAC, 0x6c0),
+ INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720),
+ INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760),
+ INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0),
INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0),
- INTC_VECT(CMT, 0x900), INTC_VECT(GEINT0, 0x920),
- INTC_VECT(GEINT1, 0x940), INTC_VECT(GEINT2, 0x960),
+ INTC_VECT(CMT, 0x900), INTC_VECT(GETHER, 0x920),
+ INTC_VECT(GETHER, 0x940), INTC_VECT(GETHER, 0x960),
INTC_VECT(HAC, 0x980),
INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
- INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0),
- INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0),
- INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20),
+ INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0),
+ INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0),
+ INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20),
INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60),
- INTC_VECT(SCIF1_ERI, 0xb80), INTC_VECT(SCIF1_RXI, 0xba0),
- INTC_VECT(SCIF1_BRI, 0xbc0), INTC_VECT(SCIF1_TXI, 0xbe0),
+ INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0),
+ INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0),
INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20),
- INTC_VECT(USBH, 0xc60), INTC_VECT(USBFI0, 0xc80),
- INTC_VECT(USBFI1, 0xca0),
+ INTC_VECT(USBH, 0xc60), INTC_VECT(USBF, 0xc80),
+ INTC_VECT(USBF, 0xca0),
INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0),
- INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20),
- INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60),
- INTC_VECT(SIM_ERI, 0xd80), INTC_VECT(SIM_RXI, 0xda0),
- INTC_VECT(SIM_TXI, 0xdc0), INTC_VECT(SIM_TEND, 0xde0),
+ INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20),
+ INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60),
+ INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0),
+ INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0),
INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60),
INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0),
- INTC_VECT(SCIF1_ERI, 0xf00), INTC_VECT(SCIF1_RXI, 0xf20),
- INTC_VECT(SCIF1_BRI, 0xf40), INTC_VECT(SCIF1_TXI, 0xf60),
- INTC_VECT(GPIO_CH0, 0xf80), INTC_VECT(GPIO_CH1, 0xfa0),
- INTC_VECT(GPIO_CH2, 0xfc0), INTC_VECT(GPIO_CH3, 0xfe0),
+ INTC_VECT(SCIF2, 0xf00), INTC_VECT(SCIF2, 0xf20),
+ INTC_VECT(SCIF2, 0xf40), INTC_VECT(SCIF2, 0xf60),
+ INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0),
+ INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
- INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
- INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
- DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
- INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
- INTC_GROUP(GETHER, GEINT0, GEINT1, GEINT2),
- INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0),
- INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
- INTC_GROUP(USBF, USBFI0, USBFI1),
- INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY),
- INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEND),
- INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
- INTC_GROUP(GPIO, GPIO_CH0, GPIO_CH1, GPIO_CH2, GPIO_CH3),
-};
-
-static struct intc_prio priorities[] __initdata = {
- INTC_PRIO(SCIF0, 3),
- INTC_PRIO(SCIF1, 3),
- INTC_PRIO(SCIF2, 3),
};
static struct intc_mask_reg mask_registers[] __initdata = {
@@ -270,11 +336,10 @@ static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd400b4, 0, 32, 8, /* INT2PRI13 */ { 0, 0, STIF1, STIF0 } },
};
-static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups, priorities,
+static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups,
mask_registers, prio_registers, NULL);
/* Support for external interrupt pins in IRQ mode */
-
static struct intc_vect irq_vectors[] __initdata = {
INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
@@ -297,12 +362,17 @@ static struct intc_sense_reg irq_sense_registers[] __initdata = {
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC(intc_irq_desc, "sh7763-irq", irq_vectors,
- NULL, NULL, irq_mask_registers, irq_prio_registers,
- irq_sense_registers);
+static struct intc_mask_reg irq_ack_registers[] __initdata = {
+ { 0xffd00024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_irq_desc, "sh7763-irq", irq_vectors,
+ NULL, irq_mask_registers, irq_prio_registers,
+ irq_sense_registers, irq_ack_registers);
-/* External interrupt pins in IRL mode */
+/* External interrupt pins in IRL mode */
static struct intc_vect irl_vectors[] __initdata = {
INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
@@ -332,10 +402,10 @@ static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
};
static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7763-irl7654", irl_vectors,
- NULL, NULL, irl7654_mask_registers, NULL, NULL);
+ NULL, irl7654_mask_registers, NULL, NULL);
static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
- NULL, NULL, irl3210_mask_registers, NULL, NULL);
+ NULL, irl3210_mask_registers, NULL, NULL);
#define INTC_ICR0 0xffd00000
#define INTC_INTMSK0 0xffd00044
@@ -347,11 +417,11 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors,
void __init plat_irq_setup(void)
{
/* disable IRQ7-0 */
- ctrl_outl(0xff000000, INTC_INTMSK0);
+ __raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
- ctrl_outl(0xc0000000, INTC_INTMSK1);
- ctrl_outl(0xfffefffe, INTC_INTMSK2);
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
register_intc_controller(&intc_desc);
}
@@ -361,27 +431,27 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ:
/* select IRQ mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
register_intc_controller(&intc_irq_desc);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
- ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
- ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl7654_desc);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl3210_desc);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
index b9cec48b180..e9b532a76c3 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c
@@ -1,7 +1,7 @@
/*
* SH7770 Setup
*
- * Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2006 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -11,38 +11,297 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <linux/io.h>
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xff923000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 61, 61, 61, 61 },
- }, {
- .mapbase = 0xff924000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 62, 62, 62, 62 },
- }, {
- .mapbase = 0xff925000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 63, 63, 63, 63 },
- }, {
- .flags = 0,
- }
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xff923000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x9a0)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xff924000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x9c0)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xff925000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x9e0)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xff926000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xa00)),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif4_resources[] = {
+ DEFINE_RES_MEM(0xff927000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xa20)),
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .resource = scif4_resources,
+ .num_resources = ARRAY_SIZE(scif4_resources),
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif5_resources[] = {
+ DEFINE_RES_MEM(0xff928000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xa40)),
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .resource = scif5_resources,
+ .num_resources = ARRAY_SIZE(scif5_resources),
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct plat_sci_port scif6_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif6_resources[] = {
+ DEFINE_RES_MEM(0xff929000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xa60)),
};
-static struct platform_device sci_device = {
+static struct platform_device scif6_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 6,
+ .resource = scif6_resources,
+ .num_resources = ARRAY_SIZE(scif6_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif6_platform_data,
},
};
+static struct plat_sci_port scif7_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif7_resources[] = {
+ DEFINE_RES_MEM(0xff92a000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xa80)),
+};
+
+static struct platform_device scif7_device = {
+ .name = "sh-sci",
+ .id = 7,
+ .resource = scif7_resources,
+ .num_resources = ARRAY_SIZE(scif7_resources),
+ .dev = {
+ .platform_data = &scif7_platform_data,
+ },
+};
+
+static struct plat_sci_port scif8_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif8_resources[] = {
+ DEFINE_RES_MEM(0xff92b000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xaa0)),
+};
+
+static struct platform_device scif8_device = {
+ .name = "sh-sci",
+ .id = 8,
+ .resource = scif8_resources,
+ .num_resources = ARRAY_SIZE(scif8_resources),
+ .dev = {
+ .platform_data = &scif8_platform_data,
+ },
+};
+
+static struct plat_sci_port scif9_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif9_resources[] = {
+ DEFINE_RES_MEM(0xff92c000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xac0)),
+};
+
+static struct platform_device scif9_device = {
+ .name = "sh-sci",
+ .id = 9,
+ .resource = scif9_resources,
+ .num_resources = ARRAY_SIZE(scif9_resources),
+ .dev = {
+ .platform_data = &scif9_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu1_resources[] = {
+ DEFINE_RES_MEM(0xffd81000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x460)),
+ DEFINE_RES_IRQ(evt2irq(0x480)),
+ DEFINE_RES_IRQ(evt2irq(0x4a0)),
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh-tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu2_resources[] = {
+ DEFINE_RES_MEM(0xffd82000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x4c0)),
+ DEFINE_RES_IRQ(evt2irq(0x4e0)),
+ DEFINE_RES_IRQ(evt2irq(0x500)),
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh-tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
static struct platform_device *sh7770_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &scif8_device,
+ &scif9_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
};
static int __init sh7770_devices_setup(void)
@@ -50,8 +309,275 @@ static int __init sh7770_devices_setup(void)
return platform_add_devices(sh7770_devices,
ARRAY_SIZE(sh7770_devices));
}
-__initcall(sh7770_devices_setup);
+arch_initcall(sh7770_devices_setup);
+
+static struct platform_device *sh7770_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &scif6_device,
+ &scif7_device,
+ &scif8_device,
+ &scif9_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7770_early_devices,
+ ARRAY_SIZE(sh7770_early_devices));
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL,
+
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
+
+ GPIO,
+ TMU0, TMU1, TMU2, TMU2_TICPI,
+ TMU3, TMU4, TMU5, TMU5_TICPI,
+ TMU6, TMU7, TMU8,
+ HAC, IPI, SPDIF, HUDI, I2C,
+ DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
+ I2S0, I2S1, I2S2, I2S3,
+ SRC_RX, SRC_TX, SRC_SPDIF,
+ DU, VIDEO_IN, REMOTE, YUV, USB, ATAPI, CAN, GPS, GFX2D,
+ GFX3D_MBX, GFX3D_DMAC,
+ EXBUS_ATA,
+ SPI0, SPI1,
+ SCIF089, SCIF1234, SCIF567,
+ ADC,
+ BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14,
+ BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27,
+ BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31,
+
+ /* interrupt groups */
+ TMU, DMAC, I2S, SRC, GFX3D, SPI, SCIF, BBDMAC,
+};
+
+static struct intc_vect vectors[] __initdata = {
+ INTC_VECT(GPIO, 0x3e0),
+ INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
+ INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2_TICPI, 0x460),
+ INTC_VECT(TMU3, 0x480), INTC_VECT(TMU4, 0x4a0),
+ INTC_VECT(TMU5, 0x4c0), INTC_VECT(TMU5_TICPI, 0x4e0),
+ INTC_VECT(TMU6, 0x500), INTC_VECT(TMU7, 0x520),
+ INTC_VECT(TMU8, 0x540),
+ INTC_VECT(HAC, 0x580), INTC_VECT(IPI, 0x5c0),
+ INTC_VECT(SPDIF, 0x5e0),
+ INTC_VECT(HUDI, 0x600), INTC_VECT(I2C, 0x620),
+ INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
+ INTC_VECT(DMAC0_DMINT2, 0x680),
+ INTC_VECT(I2S0, 0x6a0), INTC_VECT(I2S1, 0x6c0),
+ INTC_VECT(I2S2, 0x6e0), INTC_VECT(I2S3, 0x700),
+ INTC_VECT(SRC_RX, 0x720), INTC_VECT(SRC_TX, 0x740),
+ INTC_VECT(SRC_SPDIF, 0x760),
+ INTC_VECT(DU, 0x780), INTC_VECT(VIDEO_IN, 0x7a0),
+ INTC_VECT(REMOTE, 0x7c0), INTC_VECT(YUV, 0x7e0),
+ INTC_VECT(USB, 0x840), INTC_VECT(ATAPI, 0x860),
+ INTC_VECT(CAN, 0x880), INTC_VECT(GPS, 0x8a0),
+ INTC_VECT(GFX2D, 0x8c0),
+ INTC_VECT(GFX3D_MBX, 0x900), INTC_VECT(GFX3D_DMAC, 0x920),
+ INTC_VECT(EXBUS_ATA, 0x940),
+ INTC_VECT(SPI0, 0x960), INTC_VECT(SPI1, 0x980),
+ INTC_VECT(SCIF089, 0x9a0), INTC_VECT(SCIF1234, 0x9c0),
+ INTC_VECT(SCIF1234, 0x9e0), INTC_VECT(SCIF1234, 0xa00),
+ INTC_VECT(SCIF1234, 0xa20), INTC_VECT(SCIF567, 0xa40),
+ INTC_VECT(SCIF567, 0xa60), INTC_VECT(SCIF567, 0xa80),
+ INTC_VECT(SCIF089, 0xaa0), INTC_VECT(SCIF089, 0xac0),
+ INTC_VECT(ADC, 0xb20),
+ INTC_VECT(BBDMAC_0_3, 0xba0), INTC_VECT(BBDMAC_0_3, 0xbc0),
+ INTC_VECT(BBDMAC_0_3, 0xbe0), INTC_VECT(BBDMAC_0_3, 0xc00),
+ INTC_VECT(BBDMAC_4_7, 0xc20), INTC_VECT(BBDMAC_4_7, 0xc40),
+ INTC_VECT(BBDMAC_4_7, 0xc60), INTC_VECT(BBDMAC_4_7, 0xc80),
+ INTC_VECT(BBDMAC_8_10, 0xca0), INTC_VECT(BBDMAC_8_10, 0xcc0),
+ INTC_VECT(BBDMAC_8_10, 0xce0), INTC_VECT(BBDMAC_11_14, 0xd00),
+ INTC_VECT(BBDMAC_11_14, 0xd20), INTC_VECT(BBDMAC_11_14, 0xd40),
+ INTC_VECT(BBDMAC_11_14, 0xd60), INTC_VECT(BBDMAC_15_18, 0xd80),
+ INTC_VECT(BBDMAC_15_18, 0xda0), INTC_VECT(BBDMAC_15_18, 0xdc0),
+ INTC_VECT(BBDMAC_15_18, 0xde0), INTC_VECT(BBDMAC_19_22, 0xe00),
+ INTC_VECT(BBDMAC_19_22, 0xe20), INTC_VECT(BBDMAC_19_22, 0xe40),
+ INTC_VECT(BBDMAC_19_22, 0xe60), INTC_VECT(BBDMAC_23_26, 0xe80),
+ INTC_VECT(BBDMAC_23_26, 0xea0), INTC_VECT(BBDMAC_23_26, 0xec0),
+ INTC_VECT(BBDMAC_23_26, 0xee0), INTC_VECT(BBDMAC_27, 0xf00),
+ INTC_VECT(BBDMAC_28, 0xf20), INTC_VECT(BBDMAC_29, 0xf40),
+ INTC_VECT(BBDMAC_30, 0xf60), INTC_VECT(BBDMAC_31, 0xf80),
+};
+
+static struct intc_group groups[] __initdata = {
+ INTC_GROUP(TMU, TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5,
+ TMU5_TICPI, TMU6, TMU7, TMU8),
+ INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2),
+ INTC_GROUP(I2S, I2S0, I2S1, I2S2, I2S3),
+ INTC_GROUP(SRC, SRC_RX, SRC_TX, SRC_SPDIF),
+ INTC_GROUP(GFX3D, GFX3D_MBX, GFX3D_DMAC),
+ INTC_GROUP(SPI, SPI0, SPI1),
+ INTC_GROUP(SCIF, SCIF089, SCIF1234, SCIF567),
+ INTC_GROUP(BBDMAC,
+ BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14,
+ BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27,
+ BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31),
+};
+
+static struct intc_mask_reg mask_registers[] __initdata = {
+ { 0xffe00040, 0xffe00044, 32, /* INT2MSKR / INT2MSKCR */
+ { 0, BBDMAC, ADC, SCIF, SPI, EXBUS_ATA, GFX3D, GFX2D,
+ GPS, CAN, ATAPI, USB, YUV, REMOTE, VIDEO_IN, DU, SRC, I2S,
+ DMAC, I2C, HUDI, SPDIF, IPI, HAC, TMU, GPIO } },
+};
+
+static struct intc_prio_reg prio_registers[] __initdata = {
+ { 0xffe00000, 0, 32, 8, /* INT2PRI0 */ { GPIO, TMU0, 0, HAC } },
+ { 0xffe00004, 0, 32, 8, /* INT2PRI1 */ { IPI, SPDIF, HUDI, I2C } },
+ { 0xffe00008, 0, 32, 8, /* INT2PRI2 */ { DMAC, I2S, SRC, DU } },
+ { 0xffe0000c, 0, 32, 8, /* INT2PRI3 */ { VIDEO_IN, REMOTE, YUV, USB } },
+ { 0xffe00010, 0, 32, 8, /* INT2PRI4 */ { ATAPI, CAN, GPS, GFX2D } },
+ { 0xffe00014, 0, 32, 8, /* INT2PRI5 */ { 0, GFX3D, EXBUS_ATA, SPI } },
+ { 0xffe00018, 0, 32, 8, /* INT2PRI6 */ { SCIF1234, SCIF567, SCIF089 } },
+ { 0xffe0001c, 0, 32, 8, /* INT2PRI7 */ { ADC, 0, 0, BBDMAC_0_3 } },
+ { 0xffe00020, 0, 32, 8, /* INT2PRI8 */
+ { BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, BBDMAC_15_18 } },
+ { 0xffe00024, 0, 32, 8, /* INT2PRI9 */
+ { BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, BBDMAC_28 } },
+ { 0xffe00028, 0, 32, 8, /* INT2PRI10 */
+ { BBDMAC_29, BBDMAC_30, BBDMAC_31 } },
+ { 0xffe0002c, 0, 32, 8, /* INT2PRI11 */
+ { TMU1, TMU2, TMU2_TICPI, TMU3 } },
+ { 0xffe00030, 0, 32, 8, /* INT2PRI12 */
+ { TMU4, TMU5, TMU5_TICPI, TMU6 } },
+ { 0xffe00034, 0, 32, 8, /* INT2PRI13 */
+ { TMU7, TMU8 } },
+};
+
+static DECLARE_INTC_DESC(intc_desc, "sh7770", vectors, groups,
+ mask_registers, prio_registers, NULL);
+
+/* Support for external interrupt pins in IRQ mode */
+static struct intc_vect irq_vectors[] __initdata = {
+ INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280),
+ INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300),
+ INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380),
+};
+
+static struct intc_mask_reg irq_mask_registers[] __initdata = {
+ { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, } },
+};
+
+static struct intc_prio_reg irq_prio_registers[] __initdata = {
+ { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, } },
+};
+
+static struct intc_sense_reg irq_sense_registers[] __initdata = {
+ { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, } },
+};
+
+static DECLARE_INTC_DESC(intc_irq_desc, "sh7770-irq", irq_vectors,
+ NULL, irq_mask_registers, irq_prio_registers,
+ irq_sense_registers);
+
+/* External interrupt pins in IRL mode */
+static struct intc_vect irl_vectors[] __initdata = {
+ INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220),
+ INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260),
+ INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0),
+ INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0),
+ INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320),
+ INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360),
+ INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0),
+ INTC_VECT(IRL_HHHL, 0x3c0),
+};
+
+static struct intc_mask_reg irl3210_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static struct intc_mask_reg irl7654_mask_registers[] __initdata = {
+ { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH,
+ IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH,
+ IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH,
+ IRL_HHLL, IRL_HHLH, IRL_HHHL, } },
+};
+
+static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors,
+ NULL, irl7654_mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
+ NULL, irl3210_mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xffd00000
+#define INTC_INTMSK0 0xffd00044
+#define INTC_INTMSK1 0xffd00048
+#define INTC_INTMSK2 0xffd40080
+#define INTC_INTMSKCLR1 0xffd00068
+#define INTC_INTMSKCLR2 0xffd40084
void __init plat_irq_setup(void)
{
+ /* disable IRQ7-0 */
+ __raw_writel(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
+
+ /* select IRL mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+
+ /* disable holding function, ie enable "SH-4 Mode" */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+
+ register_intc_controller(&intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ:
+ /* select IRQ mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ register_intc_controller(&intc_irq_desc);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl7654_desc);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_irl3210_desc);
+ break;
+ default:
+ BUG();
+ }
}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index 18dbbe23fea..3ee7dd9b3a6 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -12,6 +12,96 @@
#include <linux/serial.h>
#include <linux/io.h>
#include <linux/serial_sci.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <cpu/dma-register.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffe00000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x700)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xffe10000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0xb80)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x580)),
+ DEFINE_RES_IRQ(evt2irq(0x5a0)),
+ DEFINE_RES_IRQ(evt2irq(0x5c0)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu1_resources[] = {
+ DEFINE_RES_MEM(0xffdc0000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0xe00)),
+ DEFINE_RES_IRQ(evt2irq(0xe20)),
+ DEFINE_RES_IRQ(evt2irq(0xe40)),
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh-tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
static struct resource rtc_resources[] = {
[0] = {
@@ -20,18 +110,8 @@ static struct resource rtc_resources[] = {
.flags = IORESOURCE_IO,
},
[1] = {
- /* Period IRQ */
- .start = 21,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* Carry IRQ */
- .start = 22,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- /* Alarm IRQ */
- .start = 20,
+ /* Shared Period/Carry/Alarm IRQ */
+ .start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
@@ -43,33 +123,150 @@ static struct platform_device rtc_device = {
.resource = rtc_resources,
};
-static struct plat_sci_port sci_platform_data[] = {
+/* DMA */
+static const struct sh_dmae_channel sh7780_dmae0_channels[] = {
{
- .mapbase = 0xffe00000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
}, {
- .mapbase = 0xffe10000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 76, 77, 79, 78 },
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
}, {
- .flags = 0,
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
}
};
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
+static const struct sh_dmae_channel sh7780_dmae1_channels[] = {
+ {
+ .offset = 0,
+ }, {
+ .offset = 0x10,
+ }, {
+ .offset = 0x20,
+ }, {
+ .offset = 0x30,
+ }, {
+ .offset = 0x50,
+ }, {
+ .offset = 0x60,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .channel = sh7780_dmae0_channels,
+ .channel_num = ARRAY_SIZE(sh7780_dmae0_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma1_platform_data = {
+ .channel = sh7780_dmae1_channels,
+ .channel_num = ARRAY_SIZE(sh7780_dmae1_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct resource sh7780_dmae0_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xfc808020,
+ .end = 0xfc80808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xfc809000,
+ .end = 0xfc80900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /*
+ * Real DMA error vector is 0x6c0, and channel
+ * vectors are 0x640-0x6a0, 0x780-0x7a0
+ */
+ .name = "error_irq",
+ .start = evt2irq(0x640),
+ .end = evt2irq(0x640),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct resource sh7780_dmae1_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xfc818020,
+ .end = 0xfc81808f,
+ .flags = IORESOURCE_MEM,
+ },
+ /* DMAC1 has no DMARS */
+ {
+ /*
+ * Real DMA error vector is 0x6c0, and channel
+ * vectors are 0x7c0-0x7e0, 0xd80-0xde0
+ */
+ .name = "error_irq",
+ .start = evt2irq(0x7c0),
+ .end = evt2irq(0x7c0),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = sh7780_dmae0_resources,
+ .num_resources = ARRAY_SIZE(sh7780_dmae0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
+static struct platform_device dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = sh7780_dmae1_resources,
+ .num_resources = ARRAY_SIZE(sh7780_dmae1_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &dma1_platform_data,
},
};
static struct platform_device *sh7780_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
+ &tmu1_device,
&rtc_device,
- &sci_device,
+ &dma0_device,
+ &dma1_device,
};
static int __init sh7780_devices_setup(void)
@@ -77,7 +274,25 @@ static int __init sh7780_devices_setup(void)
return platform_add_devices(sh7780_devices,
ARRAY_SIZE(sh7780_devices));
}
-__initcall(sh7780_devices_setup);
+arch_initcall(sh7780_devices_setup);
+
+static struct platform_device *sh7780_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &tmu0_device,
+ &tmu1_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ if (mach_is_sh2007()) {
+ scif0_platform_data.scscr &= ~SCSCR_CKE1;
+ scif1_platform_data.scscr &= ~SCSCR_CKE1;
+ }
+
+ early_platform_add_devices(sh7780_early_devices,
+ ARRAY_SIZE(sh7780_early_devices));
+}
enum {
UNUSED = 0,
@@ -90,82 +305,55 @@ enum {
IRL_HHLL, IRL_HHLH, IRL_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
- RTC_ATI, RTC_PRI, RTC_CUI,
- WDT,
- TMU0, TMU1, TMU2, TMU2_TICPI,
- HUDI,
- DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, DMAC0_DMAE,
- SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
- DMAC0_DMINT4, DMAC0_DMINT5, DMAC1_DMINT6, DMAC1_DMINT7,
- CMT, HAC,
- PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD,
- PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0,
- SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
- SIOF, HSPI,
- MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY,
- DMAC1_DMINT8, DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11,
- TMU3, TMU4, TMU5,
- SSI,
- FLCTL_FLSTE, FLCTL_FLEND, FLCTL_FLTRQ0, FLCTL_FLTRQ1,
- GPIOI0, GPIOI1, GPIOI2, GPIOI3,
+ RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
+ HUDI, DMAC0, SCIF0, DMAC1, CMT, HAC,
+ PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5,
+ SCIF1, SIOF, HSPI, MMCIF, TMU3, TMU4, TMU5, SSI, FLCTL, GPIO,
/* interrupt groups */
- RTC, TMU012, DMAC0, SCIF0, DMAC45, DMAC1,
- PCIC5, SCIF1, MMCIF, TMU345, FLCTL, GPIO,
+ TMU012, TMU345,
};
static struct intc_vect vectors[] __initdata = {
- INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0),
- INTC_VECT(RTC_CUI, 0x4c0),
+ INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
+ INTC_VECT(RTC, 0x4c0),
INTC_VECT(WDT, 0x560),
INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
INTC_VECT(HUDI, 0x600),
- INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660),
- INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(DMAC0_DMINT3, 0x6a0),
- INTC_VECT(DMAC0_DMAE, 0x6c0),
- INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
- INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
- INTC_VECT(DMAC0_DMINT4, 0x780), INTC_VECT(DMAC0_DMINT5, 0x7a0),
- INTC_VECT(DMAC1_DMINT6, 0x7c0), INTC_VECT(DMAC1_DMINT7, 0x7e0),
+ INTC_VECT(DMAC0, 0x640), INTC_VECT(DMAC0, 0x660),
+ INTC_VECT(DMAC0, 0x680), INTC_VECT(DMAC0, 0x6a0),
+ INTC_VECT(DMAC0, 0x6c0),
+ INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720),
+ INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760),
+ INTC_VECT(DMAC0, 0x780), INTC_VECT(DMAC0, 0x7a0),
+ INTC_VECT(DMAC1, 0x7c0), INTC_VECT(DMAC1, 0x7e0),
INTC_VECT(CMT, 0x900), INTC_VECT(HAC, 0x980),
INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
- INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0),
- INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0),
- INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20),
- INTC_VECT(SCIF1_ERI, 0xb80), INTC_VECT(SCIF1_RXI, 0xba0),
- INTC_VECT(SCIF1_BRI, 0xbc0), INTC_VECT(SCIF1_TXI, 0xbe0),
+ INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0),
+ INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0),
+ INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20),
+ INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0),
+ INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0),
INTC_VECT(SIOF, 0xc00), INTC_VECT(HSPI, 0xc80),
- INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20),
- INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60),
- INTC_VECT(DMAC1_DMINT8, 0xd80), INTC_VECT(DMAC1_DMINT9, 0xda0),
- INTC_VECT(DMAC1_DMINT10, 0xdc0), INTC_VECT(DMAC1_DMINT11, 0xde0),
+ INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20),
+ INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60),
+ INTC_VECT(DMAC1, 0xd80), INTC_VECT(DMAC1, 0xda0),
+ INTC_VECT(DMAC1, 0xdc0), INTC_VECT(DMAC1, 0xde0),
INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
INTC_VECT(TMU5, 0xe40),
INTC_VECT(SSI, 0xe80),
- INTC_VECT(FLCTL_FLSTE, 0xf00), INTC_VECT(FLCTL_FLEND, 0xf20),
- INTC_VECT(FLCTL_FLTRQ0, 0xf40), INTC_VECT(FLCTL_FLTRQ1, 0xf60),
- INTC_VECT(GPIOI0, 0xf80), INTC_VECT(GPIOI1, 0xfa0),
- INTC_VECT(GPIOI2, 0xfc0), INTC_VECT(GPIOI3, 0xfe0),
+ INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20),
+ INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60),
+ INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0),
+ INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0),
};
static struct intc_group groups[] __initdata = {
- INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI),
INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
- INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
- DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
- INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
- INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8,
- DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11),
- INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0),
- INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
- INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY),
INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
- INTC_GROUP(FLCTL, FLCTL_FLSTE, FLCTL_FLEND,
- FLCTL_FLTRQ0, FLCTL_FLTRQ1),
- INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
};
static struct intc_mask_reg mask_registers[] __initdata = {
@@ -217,9 +405,14 @@ static struct intc_sense_reg irq_sense_registers[] __initdata = {
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC(intc_irq_desc, "sh7780-irq", irq_vectors,
- NULL, irq_mask_registers, irq_prio_registers,
- irq_sense_registers);
+static struct intc_mask_reg irq_ack_registers[] __initdata = {
+ { 0xffd00024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_irq_desc, "sh7780-irq", irq_vectors,
+ NULL, irq_mask_registers, irq_prio_registers,
+ irq_sense_registers, irq_ack_registers);
/* External interrupt pins in IRL mode */
@@ -267,17 +460,17 @@ static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors,
void __init plat_irq_setup(void)
{
/* disable IRQ7-0 */
- ctrl_outl(0xff000000, INTC_INTMSK0);
+ __raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
- ctrl_outl(0xc0000000, INTC_INTMSK1);
- ctrl_outl(0xfffefffe, INTC_INTMSK2);
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
@@ -287,27 +480,27 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ:
/* select IRQ mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0);
register_intc_controller(&intc_irq_desc);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
- ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
- ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl7654_desc);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_irl3210_desc);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 621e7329ec6..c72d5a5d099 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -13,59 +13,333 @@
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/mm.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
#include <asm/mmzone.h>
+#include <cpu/dma-register.h>
-static struct plat_sci_port sci_platform_data[] = {
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffea0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x700)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xffeb0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x780)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xffec0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x980)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xffed0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x9a0)),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif4_resources[] = {
+ DEFINE_RES_MEM(0xffee0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x9c0)),
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .resource = scif4_resources,
+ .num_resources = ARRAY_SIZE(scif4_resources),
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif5_resources[] = {
+ DEFINE_RES_MEM(0xffef0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x9e0)),
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .resource = scif5_resources,
+ .num_resources = ARRAY_SIZE(scif5_resources),
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x580)),
+ DEFINE_RES_IRQ(evt2irq(0x5a0)),
+ DEFINE_RES_IRQ(evt2irq(0x5c0)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu1_resources[] = {
+ DEFINE_RES_MEM(0xffdc0000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0xe00)),
+ DEFINE_RES_IRQ(evt2irq(0xe20)),
+ DEFINE_RES_IRQ(evt2irq(0xe40)),
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh-tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+/* DMA */
+static const struct sh_dmae_channel sh7785_dmae0_channels[] = {
{
- .mapbase = 0xffea0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
}, {
- .mapbase = 0xffeb0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 44, 45, 47, 46 },
- },
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
- /*
- * The rest of these all have multiplexed IRQs
- */
+static const struct sh_dmae_channel sh7785_dmae1_channels[] = {
{
- .mapbase = 0xffec0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 60, 60, 60, 60 },
+ .offset = 0,
+ }, {
+ .offset = 0x10,
}, {
- .mapbase = 0xffed0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 61, 61, 61, 61 },
+ .offset = 0x20,
}, {
- .mapbase = 0xffee0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 62, 62, 62, 62 },
+ .offset = 0x30,
}, {
- .mapbase = 0xffef0000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 63, 63, 63, 63 },
+ .offset = 0x50,
}, {
- .flags = 0,
+ .offset = 0x60,
}
};
-static struct platform_device sci_device = {
- .name = "sh-sci",
- .id = -1,
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .channel = sh7785_dmae0_channels,
+ .channel_num = ARRAY_SIZE(sh7785_dmae0_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma1_platform_data = {
+ .channel = sh7785_dmae1_channels,
+ .channel_num = ARRAY_SIZE(sh7785_dmae1_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct resource sh7785_dmae0_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xfc808020,
+ .end = 0xfc80808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xfc809000,
+ .end = 0xfc80900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /*
+ * Real DMA error vector is 0x6e0, and channel
+ * vectors are 0x620-0x6c0
+ */
+ .name = "error_irq",
+ .start = evt2irq(0x620),
+ .end = evt2irq(0x620),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct resource sh7785_dmae1_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xfcc08020,
+ .end = 0xfcc0808f,
+ .flags = IORESOURCE_MEM,
+ },
+ /* DMAC1 has no DMARS */
+ {
+ /*
+ * Real DMA error vector is 0x940, and channel
+ * vectors are 0x880-0x920
+ */
+ .name = "error_irq",
+ .start = evt2irq(0x880),
+ .end = evt2irq(0x880),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = sh7785_dmae0_resources,
+ .num_resources = ARRAY_SIZE(sh7785_dmae0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
+static struct platform_device dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = sh7785_dmae1_resources,
+ .num_resources = ARRAY_SIZE(sh7785_dmae1_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &dma1_platform_data,
},
};
static struct platform_device *sh7785_devices[] __initdata = {
- &sci_device,
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &tmu0_device,
+ &tmu1_device,
+ &dma0_device,
+ &dma1_device,
};
static int __init sh7785_devices_setup(void)
@@ -73,7 +347,24 @@ static int __init sh7785_devices_setup(void)
return platform_add_devices(sh7785_devices,
ARRAY_SIZE(sh7785_devices));
}
-__initcall(sh7785_devices_setup);
+arch_initcall(sh7785_devices_setup);
+
+static struct platform_device *sh7785_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &tmu0_device,
+ &tmu1_device,
+};
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7785_early_devices,
+ ARRAY_SIZE(sh7785_early_devices));
+}
enum {
UNUSED = 0,
@@ -91,33 +382,19 @@ enum {
IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
- WDT,
- TMU0, TMU1, TMU2, TMU2_TICPI,
- HUDI,
- DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3,
- DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE,
- SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
- SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
- DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9,
- DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE,
- HSPI,
+ WDT, TMU0, TMU1, TMU2, TMU2_TICPI,
+ HUDI, DMAC0, SCIF0, SCIF1, DMAC1, HSPI,
SCIF2, SCIF3, SCIF4, SCIF5,
- PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD,
- PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0,
- SIOF,
- MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY,
- DU,
- GDTA_GACLI, GDTA_GAMCI, GDTA_GAERI,
+ PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5,
+ SIOF, MMCIF, DU, GDTA,
TMU3, TMU4, TMU5,
SSI0, SSI1,
HAC0, HAC1,
- FLCTL_FLSTE, FLCTL_FLEND, FLCTL_FLTRQ0, FLCTL_FLTRQ1,
- GPIOI0, GPIOI1, GPIOI2, GPIOI3,
+ FLCTL, GPIO,
/* interrupt groups */
- TMU012, DMAC0, SCIF0, SCIF1, DMAC1,
- PCIC5, MMCIF, GDTA, TMU345, FLCTL, GPIO
+ TMU012, TMU345
};
static struct intc_vect vectors[] __initdata = {
@@ -125,57 +402,45 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0),
INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0),
INTC_VECT(HUDI, 0x600),
- INTC_VECT(DMAC0_DMINT0, 0x620), INTC_VECT(DMAC0_DMINT1, 0x640),
- INTC_VECT(DMAC0_DMINT2, 0x660), INTC_VECT(DMAC0_DMINT3, 0x680),
- INTC_VECT(DMAC0_DMINT4, 0x6a0), INTC_VECT(DMAC0_DMINT5, 0x6c0),
- INTC_VECT(DMAC0_DMAE, 0x6e0),
- INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720),
- INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
- INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0),
- INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0),
- INTC_VECT(DMAC1_DMINT6, 0x880), INTC_VECT(DMAC1_DMINT7, 0x8a0),
- INTC_VECT(DMAC1_DMINT8, 0x8c0), INTC_VECT(DMAC1_DMINT9, 0x8e0),
- INTC_VECT(DMAC1_DMINT10, 0x900), INTC_VECT(DMAC1_DMINT11, 0x920),
- INTC_VECT(DMAC1_DMAE, 0x940),
+ INTC_VECT(DMAC0, 0x620), INTC_VECT(DMAC0, 0x640),
+ INTC_VECT(DMAC0, 0x660), INTC_VECT(DMAC0, 0x680),
+ INTC_VECT(DMAC0, 0x6a0), INTC_VECT(DMAC0, 0x6c0),
+ INTC_VECT(DMAC0, 0x6e0),
+ INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720),
+ INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760),
+ INTC_VECT(SCIF1, 0x780), INTC_VECT(SCIF1, 0x7a0),
+ INTC_VECT(SCIF1, 0x7c0), INTC_VECT(SCIF1, 0x7e0),
+ INTC_VECT(DMAC1, 0x880), INTC_VECT(DMAC1, 0x8a0),
+ INTC_VECT(DMAC1, 0x8c0), INTC_VECT(DMAC1, 0x8e0),
+ INTC_VECT(DMAC1, 0x900), INTC_VECT(DMAC1, 0x920),
+ INTC_VECT(DMAC1, 0x940),
INTC_VECT(HSPI, 0x960),
INTC_VECT(SCIF2, 0x980), INTC_VECT(SCIF3, 0x9a0),
INTC_VECT(SCIF4, 0x9c0), INTC_VECT(SCIF5, 0x9e0),
INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20),
INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60),
- INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIERR, 0xaa0),
- INTC_VECT(PCIPWD3, 0xac0), INTC_VECT(PCIPWD2, 0xae0),
- INTC_VECT(PCIPWD1, 0xb00), INTC_VECT(PCIPWD0, 0xb20),
+ INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0),
+ INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0),
+ INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20),
INTC_VECT(SIOF, 0xc00),
- INTC_VECT(MMCIF_FSTAT, 0xd00), INTC_VECT(MMCIF_TRAN, 0xd20),
- INTC_VECT(MMCIF_ERR, 0xd40), INTC_VECT(MMCIF_FRDY, 0xd60),
+ INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20),
+ INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60),
INTC_VECT(DU, 0xd80),
- INTC_VECT(GDTA_GACLI, 0xda0), INTC_VECT(GDTA_GAMCI, 0xdc0),
- INTC_VECT(GDTA_GAERI, 0xde0),
+ INTC_VECT(GDTA, 0xda0), INTC_VECT(GDTA, 0xdc0),
+ INTC_VECT(GDTA, 0xde0),
INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20),
INTC_VECT(TMU5, 0xe40),
INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0),
INTC_VECT(HAC0, 0xec0), INTC_VECT(HAC1, 0xee0),
- INTC_VECT(FLCTL_FLSTE, 0xf00), INTC_VECT(FLCTL_FLEND, 0xf20),
- INTC_VECT(FLCTL_FLTRQ0, 0xf40), INTC_VECT(FLCTL_FLTRQ1, 0xf60),
- INTC_VECT(GPIOI0, 0xf80), INTC_VECT(GPIOI1, 0xfa0),
- INTC_VECT(GPIOI2, 0xfc0), INTC_VECT(GPIOI3, 0xfe0),
+ INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20),
+ INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60),
+ INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0),
+ INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI),
- INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
- DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
- INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
- INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
- INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8,
- DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE),
- INTC_GROUP(PCIC5, PCIERR, PCIPWD3, PCIPWD2, PCIPWD1, PCIPWD0),
- INTC_GROUP(MMCIF, MMCIF_FSTAT, MMCIF_TRAN, MMCIF_ERR, MMCIF_FRDY),
- INTC_GROUP(GDTA, GDTA_GACLI, GDTA_GAMCI, GDTA_GAERI),
INTC_GROUP(TMU345, TMU3, TMU4, TMU5),
- INTC_GROUP(FLCTL, FLCTL_FLSTE, FLCTL_FLEND,
- FLCTL_FLTRQ0, FLCTL_FLTRQ1),
- INTC_GROUP(GPIO, GPIOI0, GPIOI1, GPIOI2, GPIOI3),
};
static struct intc_mask_reg mask_registers[] __initdata = {
@@ -238,13 +503,18 @@ static struct intc_sense_reg sense_registers[] __initdata = {
IRQ4, IRQ5, IRQ6, IRQ7 } },
};
-static DECLARE_INTC_DESC(intc_desc_irq0123, "sh7785-irq0123", vectors_irq0123,
- NULL, mask_registers, prio_registers,
- sense_registers);
+static struct intc_mask_reg ack_registers[] __initdata = {
+ { 0xffd00024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7785-irq0123",
+ vectors_irq0123, NULL, mask_registers,
+ prio_registers, sense_registers, ack_registers);
-static DECLARE_INTC_DESC(intc_desc_irq4567, "sh7785-irq4567", vectors_irq4567,
- NULL, mask_registers, prio_registers,
- sense_registers);
+static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7785-irq4567",
+ vectors_irq4567, NULL, mask_registers,
+ prio_registers, sense_registers, ack_registers);
/* External interrupt pins in IRL mode */
@@ -286,17 +556,17 @@ static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567,
void __init plat_irq_setup(void)
{
/* disable IRQ3-0 + IRQ7-4 */
- ctrl_outl(0xff000000, INTC_INTMSK0);
+ __raw_writel(0xff000000, INTC_INTMSK0);
/* disable IRL3-0 + IRL7-4 */
- ctrl_outl(0xc0000000, INTC_INTMSK1);
- ctrl_outl(0xfffefffe, INTC_INTMSK2);
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
/* select IRL mode for IRL3-0 + IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
/* disable holding function, ie enable "SH-4 Mode" */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0);
register_intc_controller(&intc_desc);
}
@@ -306,32 +576,32 @@ void __init plat_irq_setup_pins(int mode)
switch (mode) {
case IRQ_MODE_IRQ7654:
/* select IRQ mode for IRL7-4 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
register_intc_controller(&intc_desc_irq4567);
break;
case IRQ_MODE_IRQ3210:
/* select IRQ mode for IRL3-0 */
- ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
register_intc_controller(&intc_desc_irq0123);
break;
case IRQ_MODE_IRL7654:
/* enable IRL7-4 but don't provide any masking */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
- ctrl_outl(0x0000fffe, INTC_INTMSKCLR2);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL3210:
/* enable IRL0-3 but don't provide any masking */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
- ctrl_outl(0xfffe0000, INTC_INTMSKCLR2);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
break;
case IRQ_MODE_IRL7654_MASK:
/* enable IRL7-4 and mask using cpu intc controller */
- ctrl_outl(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl4567);
break;
case IRQ_MODE_IRL3210_MASK:
/* enable IRL0-3 and mask using cpu intc controller */
- ctrl_outl(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
register_intc_controller(&intc_desc_irl0123);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
new file mode 100644
index 00000000000..479e79bdd3d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -0,0 +1,848 @@
+/*
+ * SH7786 Setup
+ *
+ * Copyright (C) 2009 - 2011 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ * Paul Mundt <paul.mundt@renesas.com>
+ *
+ * Based on SH7785 Setup
+ *
+ * Copyright (C) 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_dma.h>
+#include <linux/sh_intc.h>
+#include <linux/usb/ohci_pdriver.h>
+#include <cpu/dma-register.h>
+#include <asm/mmzone.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffea0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x700)),
+ DEFINE_RES_IRQ(evt2irq(0x720)),
+ DEFINE_RES_IRQ(evt2irq(0x760)),
+ DEFINE_RES_IRQ(evt2irq(0x740)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+/*
+ * The rest of these all have multiplexed IRQs
+ */
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xffeb0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x780)),
+};
+
+static struct resource scif1_demux_resources[] = {
+ DEFINE_RES_MEM(0xffeb0000, 0x100),
+ /* Placeholders, see sh7786_devices_setup() */
+ DEFINE_RES_IRQ(0),
+ DEFINE_RES_IRQ(0),
+ DEFINE_RES_IRQ(0),
+ DEFINE_RES_IRQ(0),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xffec0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x840)),
+};
+
+static struct platform_device scif2_device = {
+ .name = "sh-sci",
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
+ .dev = {
+ .platform_data = &scif2_platform_data,
+ },
+};
+
+static struct plat_sci_port scif3_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif3_resources[] = {
+ DEFINE_RES_MEM(0xffed0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x860)),
+};
+
+static struct platform_device scif3_device = {
+ .name = "sh-sci",
+ .id = 3,
+ .resource = scif3_resources,
+ .num_resources = ARRAY_SIZE(scif3_resources),
+ .dev = {
+ .platform_data = &scif3_platform_data,
+ },
+};
+
+static struct plat_sci_port scif4_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif4_resources[] = {
+ DEFINE_RES_MEM(0xffee0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x880)),
+};
+
+static struct platform_device scif4_device = {
+ .name = "sh-sci",
+ .id = 4,
+ .resource = scif4_resources,
+ .num_resources = ARRAY_SIZE(scif4_resources),
+ .dev = {
+ .platform_data = &scif4_platform_data,
+ },
+};
+
+static struct plat_sci_port scif5_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_CKE1,
+ .type = PORT_SCIF,
+ .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+};
+
+static struct resource scif5_resources[] = {
+ DEFINE_RES_MEM(0xffef0000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x8a0)),
+};
+
+static struct platform_device scif5_device = {
+ .name = "sh-sci",
+ .id = 5,
+ .resource = scif5_resources,
+ .num_resources = ARRAY_SIZE(scif5_resources),
+ .dev = {
+ .platform_data = &scif5_platform_data,
+ },
+};
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffd80000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu1_resources[] = {
+ DEFINE_RES_MEM(0xffda0000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x480)),
+ DEFINE_RES_IRQ(evt2irq(0x4a0)),
+ DEFINE_RES_IRQ(evt2irq(0x4c0)),
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh-tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct sh_timer_config tmu2_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu2_resources[] = {
+ DEFINE_RES_MEM(0xffdc0000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x7a0)),
+ DEFINE_RES_IRQ(evt2irq(0x7a0)),
+ DEFINE_RES_IRQ(evt2irq(0x7a0)),
+};
+
+static struct platform_device tmu2_device = {
+ .name = "sh-tmu",
+ .id = 2,
+ .dev = {
+ .platform_data = &tmu2_platform_data,
+ },
+ .resource = tmu2_resources,
+ .num_resources = ARRAY_SIZE(tmu2_resources),
+};
+
+static struct sh_timer_config tmu3_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu3_resources[] = {
+ DEFINE_RES_MEM(0xffde0000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x7c0)),
+ DEFINE_RES_IRQ(evt2irq(0x7c0)),
+ DEFINE_RES_IRQ(evt2irq(0x7c0)),
+};
+
+static struct platform_device tmu3_device = {
+ .name = "sh-tmu",
+ .id = 3,
+ .dev = {
+ .platform_data = &tmu3_platform_data,
+ },
+ .resource = tmu3_resources,
+ .num_resources = ARRAY_SIZE(tmu3_resources),
+};
+
+static const struct sh_dmae_channel dmac0_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .channel = dmac0_channels,
+ .channel_num = ARRAY_SIZE(dmac0_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+/* Resource order important! */
+static struct resource dmac0_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfe008020,
+ .end = 0xfe00808f,
+ .flags = IORESOURCE_MEM,
+ }, {
+ /* DMARSx */
+ .start = 0xfe009000,
+ .end = 0xfe00900b,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "error_irq",
+ .start = evt2irq(0x5c0),
+ .end = evt2irq(0x5c0),
+ .flags = IORESOURCE_IRQ,
+ }, {
+ /* IRQ for channels 0-5 */
+ .start = evt2irq(0x500),
+ .end = evt2irq(0x5a0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = dmac0_resources,
+ .num_resources = ARRAY_SIZE(dmac0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
+#define USB_EHCI_START 0xffe70000
+#define USB_OHCI_START 0xffe70400
+
+static struct resource usb_ehci_resources[] = {
+ [0] = {
+ .start = USB_EHCI_START,
+ .end = USB_EHCI_START + 0x3ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0xba0),
+ .end = evt2irq(0xba0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usb_ehci_device = {
+ .name = "sh_ehci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ehci_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(usb_ehci_resources),
+ .resource = usb_ehci_resources,
+};
+
+static struct resource usb_ohci_resources[] = {
+ [0] = {
+ .start = USB_OHCI_START,
+ .end = USB_OHCI_START + 0x3ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0xba0),
+ .end = evt2irq(0xba0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct usb_ohci_pdata usb_ohci_pdata;
+
+static struct platform_device usb_ohci_device = {
+ .name = "ohci-platform",
+ .id = -1,
+ .dev = {
+ .dma_mask = &usb_ohci_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &usb_ohci_pdata,
+ },
+ .num_resources = ARRAY_SIZE(usb_ohci_resources),
+ .resource = usb_ohci_resources,
+};
+
+static struct platform_device *sh7786_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &scif3_device,
+ &scif4_device,
+ &scif5_device,
+ &tmu0_device,
+ &tmu1_device,
+ &tmu2_device,
+};
+
+static struct platform_device *sh7786_devices[] __initdata = {
+ &dma0_device,
+ &usb_ehci_device,
+ &usb_ohci_device,
+};
+
+/*
+ * Please call this function if your platform board
+ * use external clock for USB
+ * */
+#define USBCTL0 0xffe70858
+#define CLOCK_MODE_MASK 0xffffff7f
+#define EXT_CLOCK_MODE 0x00000080
+
+void __init sh7786_usb_use_exclock(void)
+{
+ u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK;
+ __raw_writel(val | EXT_CLOCK_MODE, USBCTL0);
+}
+
+#define USBINITREG1 0xffe70094
+#define USBINITREG2 0xffe7009c
+#define USBINITVAL1 0x00ff0040
+#define USBINITVAL2 0x00000001
+
+#define USBPCTL1 0xffe70804
+#define USBST 0xffe70808
+#define PHY_ENB 0x00000001
+#define PLL_ENB 0x00000002
+#define PHY_RST 0x00000004
+#define ACT_PLL_STATUS 0xc0000000
+
+static void __init sh7786_usb_setup(void)
+{
+ int i = 1000000;
+
+ /*
+ * USB initial settings
+ *
+ * The following settings are necessary
+ * for using the USB modules.
+ *
+ * see "USB Initial Settings" for detail
+ */
+ __raw_writel(USBINITVAL1, USBINITREG1);
+ __raw_writel(USBINITVAL2, USBINITREG2);
+
+ /*
+ * Set the PHY and PLL enable bit
+ */
+ __raw_writel(PHY_ENB | PLL_ENB, USBPCTL1);
+ while (i--) {
+ if (ACT_PLL_STATUS == (__raw_readl(USBST) & ACT_PLL_STATUS)) {
+ /* Set the PHY RST bit */
+ __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1);
+ printk(KERN_INFO "sh7786 usb setup done\n");
+ break;
+ }
+ cpu_relax();
+ }
+}
+
+enum {
+ UNUSED = 0,
+
+ /* interrupt sources */
+ IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL,
+
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL,
+
+ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
+ WDT,
+ TMU0_0, TMU0_1, TMU0_2, TMU0_3,
+ TMU1_0, TMU1_1, TMU1_2,
+ DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
+ HUDI1, HUDI0,
+ DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3,
+ HPB_0, HPB_1, HPB_2,
+ SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
+ SCIF1,
+ TMU2, TMU3,
+ SCIF2, SCIF3, SCIF4, SCIF5,
+ Eth_0, Eth_1,
+ PCIeC0_0, PCIeC0_1, PCIeC0_2,
+ PCIeC1_0, PCIeC1_1, PCIeC1_2,
+ USB,
+ I2C0, I2C1,
+ DU,
+ SSI0, SSI1, SSI2, SSI3,
+ PCIeC2_0, PCIeC2_1, PCIeC2_2,
+ HAC0, HAC1,
+ FLCTL,
+ HSPI,
+ GPIO0, GPIO1,
+ Thermal,
+ INTICI0, INTICI1, INTICI2, INTICI3,
+ INTICI4, INTICI5, INTICI6, INTICI7,
+
+ /* Muxed sub-events */
+ TXI1, BRI1, RXI1, ERI1,
+};
+
+static struct intc_vect sh7786_vectors[] __initdata = {
+ INTC_VECT(WDT, 0x3e0),
+ INTC_VECT(TMU0_0, 0x400), INTC_VECT(TMU0_1, 0x420),
+ INTC_VECT(TMU0_2, 0x440), INTC_VECT(TMU0_3, 0x460),
+ INTC_VECT(TMU1_0, 0x480), INTC_VECT(TMU1_1, 0x4a0),
+ INTC_VECT(TMU1_2, 0x4c0),
+ INTC_VECT(DMAC0_0, 0x500), INTC_VECT(DMAC0_1, 0x520),
+ INTC_VECT(DMAC0_2, 0x540), INTC_VECT(DMAC0_3, 0x560),
+ INTC_VECT(DMAC0_4, 0x580), INTC_VECT(DMAC0_5, 0x5a0),
+ INTC_VECT(DMAC0_6, 0x5c0),
+ INTC_VECT(HUDI1, 0x5e0), INTC_VECT(HUDI0, 0x600),
+ INTC_VECT(DMAC1_0, 0x620), INTC_VECT(DMAC1_1, 0x640),
+ INTC_VECT(DMAC1_2, 0x660), INTC_VECT(DMAC1_3, 0x680),
+ INTC_VECT(HPB_0, 0x6a0), INTC_VECT(HPB_1, 0x6c0),
+ INTC_VECT(HPB_2, 0x6e0),
+ INTC_VECT(SCIF0_0, 0x700), INTC_VECT(SCIF0_1, 0x720),
+ INTC_VECT(SCIF0_2, 0x740), INTC_VECT(SCIF0_3, 0x760),
+ INTC_VECT(SCIF1, 0x780),
+ INTC_VECT(TMU2, 0x7a0), INTC_VECT(TMU3, 0x7c0),
+ INTC_VECT(SCIF2, 0x840), INTC_VECT(SCIF3, 0x860),
+ INTC_VECT(SCIF4, 0x880), INTC_VECT(SCIF5, 0x8a0),
+ INTC_VECT(Eth_0, 0x8c0), INTC_VECT(Eth_1, 0x8e0),
+ INTC_VECT(PCIeC0_0, 0xae0), INTC_VECT(PCIeC0_1, 0xb00),
+ INTC_VECT(PCIeC0_2, 0xb20),
+ INTC_VECT(PCIeC1_0, 0xb40), INTC_VECT(PCIeC1_1, 0xb60),
+ INTC_VECT(PCIeC1_2, 0xb80),
+ INTC_VECT(USB, 0xba0),
+ INTC_VECT(I2C0, 0xcc0), INTC_VECT(I2C1, 0xce0),
+ INTC_VECT(DU, 0xd00),
+ INTC_VECT(SSI0, 0xd20), INTC_VECT(SSI1, 0xd40),
+ INTC_VECT(SSI2, 0xd60), INTC_VECT(SSI3, 0xd80),
+ INTC_VECT(PCIeC2_0, 0xda0), INTC_VECT(PCIeC2_1, 0xdc0),
+ INTC_VECT(PCIeC2_2, 0xde0),
+ INTC_VECT(HAC0, 0xe00), INTC_VECT(HAC1, 0xe20),
+ INTC_VECT(FLCTL, 0xe40),
+ INTC_VECT(HSPI, 0xe80),
+ INTC_VECT(GPIO0, 0xea0), INTC_VECT(GPIO1, 0xec0),
+ INTC_VECT(Thermal, 0xee0),
+ INTC_VECT(INTICI0, 0xf00), INTC_VECT(INTICI1, 0xf20),
+ INTC_VECT(INTICI2, 0xf40), INTC_VECT(INTICI3, 0xf60),
+ INTC_VECT(INTICI4, 0xf80), INTC_VECT(INTICI5, 0xfa0),
+ INTC_VECT(INTICI6, 0xfc0), INTC_VECT(INTICI7, 0xfe0),
+};
+
+#define CnINTMSK0 0xfe410030
+#define CnINTMSK1 0xfe410040
+#define CnINTMSKCLR0 0xfe410050
+#define CnINTMSKCLR1 0xfe410060
+#define CnINT2MSKR0 0xfe410a20
+#define CnINT2MSKR1 0xfe410a24
+#define CnINT2MSKR2 0xfe410a28
+#define CnINT2MSKR3 0xfe410a2c
+#define CnINT2MSKCR0 0xfe410a30
+#define CnINT2MSKCR1 0xfe410a34
+#define CnINT2MSKCR2 0xfe410a38
+#define CnINT2MSKCR3 0xfe410a3c
+#define INTMSK2 0xfe410068
+#define INTMSKCLR2 0xfe41006c
+
+#define INTDISTCR0 0xfe4100b0
+#define INTDISTCR1 0xfe4100b4
+#define INT2DISTCR0 0xfe410900
+#define INT2DISTCR1 0xfe410904
+#define INT2DISTCR2 0xfe410908
+#define INT2DISTCR3 0xfe41090c
+
+static struct intc_mask_reg sh7786_mask_registers[] __initdata = {
+ { CnINTMSK0, CnINTMSKCLR0, 32,
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 },
+ INTC_SMP_BALANCING(INTDISTCR0) },
+ { INTMSK2, INTMSKCLR2, 32,
+ { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH,
+ IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH,
+ IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH,
+ IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0,
+ IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH,
+ IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH,
+ IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH,
+ IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } },
+ { CnINT2MSKR0, CnINT2MSKCR0 , 32,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT },
+ INTC_SMP_BALANCING(INT2DISTCR0) },
+ { CnINT2MSKR1, CnINT2MSKCR1, 32,
+ { TMU0_0, TMU0_1, TMU0_2, TMU0_3, TMU1_0, TMU1_1, TMU1_2, 0,
+ DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6,
+ HUDI1, HUDI0,
+ DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3,
+ HPB_0, HPB_1, HPB_2,
+ SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3,
+ SCIF1,
+ TMU2, TMU3, 0, }, INTC_SMP_BALANCING(INT2DISTCR1) },
+ { CnINT2MSKR2, CnINT2MSKCR2, 32,
+ { 0, 0, SCIF2, SCIF3, SCIF4, SCIF5,
+ Eth_0, Eth_1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ PCIeC0_0, PCIeC0_1, PCIeC0_2,
+ PCIeC1_0, PCIeC1_1, PCIeC1_2,
+ USB, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR2) },
+ { CnINT2MSKR3, CnINT2MSKCR3, 32,
+ { 0, 0, 0, 0, 0, 0,
+ I2C0, I2C1,
+ DU, SSI0, SSI1, SSI2, SSI3,
+ PCIeC2_0, PCIeC2_1, PCIeC2_2,
+ HAC0, HAC1,
+ FLCTL, 0,
+ HSPI, GPIO0, GPIO1, Thermal,
+ 0, 0, 0, 0, 0, 0, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR3) },
+};
+
+static struct intc_prio_reg sh7786_prio_registers[] __initdata = {
+ { 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+ { 0xfe410800, 0, 32, 8, /* INT2PRI0 */ { 0, 0, 0, WDT } },
+ { 0xfe410804, 0, 32, 8, /* INT2PRI1 */ { TMU0_0, TMU0_1,
+ TMU0_2, TMU0_3 } },
+ { 0xfe410808, 0, 32, 8, /* INT2PRI2 */ { TMU1_0, TMU1_1,
+ TMU1_2, 0 } },
+ { 0xfe41080c, 0, 32, 8, /* INT2PRI3 */ { DMAC0_0, DMAC0_1,
+ DMAC0_2, DMAC0_3 } },
+ { 0xfe410810, 0, 32, 8, /* INT2PRI4 */ { DMAC0_4, DMAC0_5,
+ DMAC0_6, HUDI1 } },
+ { 0xfe410814, 0, 32, 8, /* INT2PRI5 */ { HUDI0, DMAC1_0,
+ DMAC1_1, DMAC1_2 } },
+ { 0xfe410818, 0, 32, 8, /* INT2PRI6 */ { DMAC1_3, HPB_0,
+ HPB_1, HPB_2 } },
+ { 0xfe41081c, 0, 32, 8, /* INT2PRI7 */ { SCIF0_0, SCIF0_1,
+ SCIF0_2, SCIF0_3 } },
+ { 0xfe410820, 0, 32, 8, /* INT2PRI8 */ { SCIF1, TMU2, TMU3, 0 } },
+ { 0xfe410824, 0, 32, 8, /* INT2PRI9 */ { 0, 0, SCIF2, SCIF3 } },
+ { 0xfe410828, 0, 32, 8, /* INT2PRI10 */ { SCIF4, SCIF5,
+ Eth_0, Eth_1 } },
+ { 0xfe41082c, 0, 32, 8, /* INT2PRI11 */ { 0, 0, 0, 0 } },
+ { 0xfe410830, 0, 32, 8, /* INT2PRI12 */ { 0, 0, 0, 0 } },
+ { 0xfe410834, 0, 32, 8, /* INT2PRI13 */ { 0, 0, 0, 0 } },
+ { 0xfe410838, 0, 32, 8, /* INT2PRI14 */ { 0, 0, 0, PCIeC0_0 } },
+ { 0xfe41083c, 0, 32, 8, /* INT2PRI15 */ { PCIeC0_1, PCIeC0_2,
+ PCIeC1_0, PCIeC1_1 } },
+ { 0xfe410840, 0, 32, 8, /* INT2PRI16 */ { PCIeC1_2, USB, 0, 0 } },
+ { 0xfe410844, 0, 32, 8, /* INT2PRI17 */ { 0, 0, 0, 0 } },
+ { 0xfe410848, 0, 32, 8, /* INT2PRI18 */ { 0, 0, I2C0, I2C1 } },
+ { 0xfe41084c, 0, 32, 8, /* INT2PRI19 */ { DU, SSI0, SSI1, SSI2 } },
+ { 0xfe410850, 0, 32, 8, /* INT2PRI20 */ { SSI3, PCIeC2_0,
+ PCIeC2_1, PCIeC2_2 } },
+ { 0xfe410854, 0, 32, 8, /* INT2PRI21 */ { HAC0, HAC1, FLCTL, 0 } },
+ { 0xfe410858, 0, 32, 8, /* INT2PRI22 */ { HSPI, GPIO0,
+ GPIO1, Thermal } },
+ { 0xfe41085c, 0, 32, 8, /* INT2PRI23 */ { 0, 0, 0, 0 } },
+ { 0xfe410860, 0, 32, 8, /* INT2PRI24 */ { 0, 0, 0, 0 } },
+ { 0xfe410090, 0xfe4100a0, 32, 4, /* CnICIPRI / CnICIPRICLR */
+ { INTICI7, INTICI6, INTICI5, INTICI4,
+ INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 2) },
+};
+
+static struct intc_subgroup sh7786_subgroups[] __initdata = {
+ { 0xfe410c20, 32, SCIF1,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, TXI1, BRI1, RXI1, ERI1 } },
+};
+
+static struct intc_desc sh7786_intc_desc __initdata = {
+ .name = "sh7786",
+ .hw = {
+ .vectors = sh7786_vectors,
+ .nr_vectors = ARRAY_SIZE(sh7786_vectors),
+ .mask_regs = sh7786_mask_registers,
+ .nr_mask_regs = ARRAY_SIZE(sh7786_mask_registers),
+ .subgroups = sh7786_subgroups,
+ .nr_subgroups = ARRAY_SIZE(sh7786_subgroups),
+ .prio_regs = sh7786_prio_registers,
+ .nr_prio_regs = ARRAY_SIZE(sh7786_prio_registers),
+ },
+};
+
+/* Support for external interrupt pins in IRQ mode */
+static struct intc_vect vectors_irq0123[] __initdata = {
+ INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240),
+ INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0),
+};
+
+static struct intc_vect vectors_irq4567[] __initdata = {
+ INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340),
+ INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0),
+};
+
+static struct intc_sense_reg sh7786_sense_registers[] __initdata = {
+ { 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3,
+ IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static struct intc_mask_reg sh7786_ack_registers[] __initdata = {
+ { 0xfe410024, 0, 32, /* INTREQ */
+ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
+};
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7786-irq0123",
+ vectors_irq0123, NULL, sh7786_mask_registers,
+ sh7786_prio_registers, sh7786_sense_registers,
+ sh7786_ack_registers);
+
+static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7786-irq4567",
+ vectors_irq4567, NULL, sh7786_mask_registers,
+ sh7786_prio_registers, sh7786_sense_registers,
+ sh7786_ack_registers);
+
+/* External interrupt pins in IRL mode */
+
+static struct intc_vect vectors_irl0123[] __initdata = {
+ INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220),
+ INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260),
+ INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0),
+ INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0),
+ INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320),
+ INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360),
+ INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0),
+ INTC_VECT(IRL0_HHHL, 0x3c0),
+};
+
+static struct intc_vect vectors_irl4567[] __initdata = {
+ INTC_VECT(IRL4_LLLL, 0x900), INTC_VECT(IRL4_LLLH, 0x920),
+ INTC_VECT(IRL4_LLHL, 0x940), INTC_VECT(IRL4_LLHH, 0x960),
+ INTC_VECT(IRL4_LHLL, 0x980), INTC_VECT(IRL4_LHLH, 0x9a0),
+ INTC_VECT(IRL4_LHHL, 0x9c0), INTC_VECT(IRL4_LHHH, 0x9e0),
+ INTC_VECT(IRL4_HLLL, 0xa00), INTC_VECT(IRL4_HLLH, 0xa20),
+ INTC_VECT(IRL4_HLHL, 0xa40), INTC_VECT(IRL4_HLHH, 0xa60),
+ INTC_VECT(IRL4_HHLL, 0xa80), INTC_VECT(IRL4_HHLH, 0xaa0),
+ INTC_VECT(IRL4_HHHL, 0xac0),
+};
+
+static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7786-irl0123", vectors_irl0123,
+ NULL, sh7786_mask_registers, NULL, NULL);
+
+static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567,
+ NULL, sh7786_mask_registers, NULL, NULL);
+
+#define INTC_ICR0 0xfe410000
+#define INTC_INTMSK0 CnINTMSK0
+#define INTC_INTMSK1 CnINTMSK1
+#define INTC_INTMSK2 INTMSK2
+#define INTC_INTMSKCLR1 CnINTMSKCLR1
+#define INTC_INTMSKCLR2 INTMSKCLR2
+
+void __init plat_irq_setup(void)
+{
+ /* disable IRQ3-0 + IRQ7-4 */
+ __raw_writel(0xff000000, INTC_INTMSK0);
+
+ /* disable IRL3-0 + IRL7-4 */
+ __raw_writel(0xc0000000, INTC_INTMSK1);
+ __raw_writel(0xfffefffe, INTC_INTMSK2);
+
+ /* select IRL mode for IRL3-0 + IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0);
+
+ register_intc_controller(&sh7786_intc_desc);
+}
+
+void __init plat_irq_setup_pins(int mode)
+{
+ switch (mode) {
+ case IRQ_MODE_IRQ7654:
+ /* select IRQ mode for IRL7-4 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq4567);
+ break;
+ case IRQ_MODE_IRQ3210:
+ /* select IRQ mode for IRL3-0 */
+ __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0);
+ register_intc_controller(&intc_desc_irq0123);
+ break;
+ case IRQ_MODE_IRL7654:
+ /* enable IRL7-4 but don't provide any masking */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ __raw_writel(0x0000fffe, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL3210:
+ /* enable IRL0-3 but don't provide any masking */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ __raw_writel(0xfffe0000, INTC_INTMSKCLR2);
+ break;
+ case IRQ_MODE_IRL7654_MASK:
+ /* enable IRL7-4 and mask using cpu intc controller */
+ __raw_writel(0x40000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl4567);
+ break;
+ case IRQ_MODE_IRL3210_MASK:
+ /* enable IRL0-3 and mask using cpu intc controller */
+ __raw_writel(0x80000000, INTC_INTMSKCLR1);
+ register_intc_controller(&intc_desc_irl0123);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+}
+
+static int __init sh7786_devices_setup(void)
+{
+ int ret, irq;
+
+ sh7786_usb_setup();
+
+ /*
+ * De-mux SCIF1 IRQs if possible
+ */
+ irq = intc_irq_lookup(sh7786_intc_desc.name, TXI1);
+ if (irq > 0) {
+ scif1_demux_resources[1].start =
+ intc_irq_lookup(sh7786_intc_desc.name, ERI1);
+ scif1_demux_resources[2].start =
+ intc_irq_lookup(sh7786_intc_desc.name, RXI1);
+ scif1_demux_resources[3].start = irq;
+ scif1_demux_resources[4].start =
+ intc_irq_lookup(sh7786_intc_desc.name, BRI1);
+
+ scif1_device.resource = scif1_demux_resources;
+ scif1_device.num_resources = ARRAY_SIZE(scif1_demux_resources);
+ }
+
+ ret = platform_add_devices(sh7786_early_devices,
+ ARRAY_SIZE(sh7786_early_devices));
+ if (unlikely(ret != 0))
+ return ret;
+
+ return platform_add_devices(sh7786_devices,
+ ARRAY_SIZE(sh7786_devices));
+}
+arch_initcall(sh7786_devices_setup);
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh7786_early_devices,
+ ARRAY_SIZE(sh7786_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
index bd35f32534b..a78c5feb4e3 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c
@@ -1,7 +1,7 @@
/*
- * SH-X3 Setup
+ * SH-X3 Prototype Setup
*
- * Copyright (C) 2007 Paul Mundt
+ * Copyright (C) 2007 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -12,52 +12,155 @@
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/sh_timer.h>
+#include <linux/sh_intc.h>
+#include <cpu/shx3.h>
#include <asm/mmzone.h>
-static struct plat_sci_port sci_platform_data[] = {
- {
- .mapbase = 0xffc30000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 40, 41, 43, 42 },
- }, {
- .mapbase = 0xffc40000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 44, 45, 47, 46 },
- }, {
- .mapbase = 0xffc50000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 48, 49, 51, 50 },
- }, {
- .mapbase = 0xffc60000,
- .flags = UPF_BOOT_AUTOCONF,
- .type = PORT_SCIF,
- .irqs = { 52, 53, 55, 54 },
- }, {
- .flags = 0,
- }
+/*
+ * This intentionally only registers SCIF ports 0, 1, and 3. SCIF 2
+ * INTEVT values overlap with the FPU EXPEVT ones, requiring special
+ * demuxing in the exception dispatch path.
+ *
+ * As this overlap is something that never should have made it in to
+ * silicon in the first place, we just refuse to deal with the port at
+ * all rather than adding infrastructure to hack around it.
+ */
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(0xffc30000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x700)),
+ DEFINE_RES_IRQ(evt2irq(0x720)),
+ DEFINE_RES_IRQ(evt2irq(0x760)),
+ DEFINE_RES_IRQ(evt2irq(0x740)),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct plat_sci_port scif1_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif1_resources[] = {
+ DEFINE_RES_MEM(0xffc40000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x780)),
+ DEFINE_RES_IRQ(evt2irq(0x7a0)),
+ DEFINE_RES_IRQ(evt2irq(0x7e0)),
+ DEFINE_RES_IRQ(evt2irq(0x7c0)),
+};
+
+static struct platform_device scif1_device = {
+ .name = "sh-sci",
+ .id = 1,
+ .resource = scif1_resources,
+ .num_resources = ARRAY_SIZE(scif1_resources),
+ .dev = {
+ .platform_data = &scif1_platform_data,
+ },
+};
+
+static struct plat_sci_port scif2_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif2_resources[] = {
+ DEFINE_RES_MEM(0xffc60000, 0x100),
+ DEFINE_RES_IRQ(evt2irq(0x880)),
+ DEFINE_RES_IRQ(evt2irq(0x8a0)),
+ DEFINE_RES_IRQ(evt2irq(0x8e0)),
+ DEFINE_RES_IRQ(evt2irq(0x8c0)),
};
-static struct platform_device sci_device = {
+static struct platform_device scif2_device = {
.name = "sh-sci",
- .id = -1,
+ .id = 2,
+ .resource = scif2_resources,
+ .num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
- .platform_data = sci_platform_data,
+ .platform_data = &scif2_platform_data,
},
};
-static struct platform_device *shx3_devices[] __initdata = {
- &sci_device,
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(0xffc10000, 0x30),
+ DEFINE_RES_IRQ(evt2irq(0x400)),
+ DEFINE_RES_IRQ(evt2irq(0x420)),
+ DEFINE_RES_IRQ(evt2irq(0x440)),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct sh_timer_config tmu1_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu1_resources[] = {
+ DEFINE_RES_MEM(0xffc20000, 0x2c),
+ DEFINE_RES_IRQ(evt2irq(0x460)),
+ DEFINE_RES_IRQ(evt2irq(0x480)),
+ DEFINE_RES_IRQ(evt2irq(0x4a0)),
+};
+
+static struct platform_device tmu1_device = {
+ .name = "sh-tmu",
+ .id = 1,
+ .dev = {
+ .platform_data = &tmu1_platform_data,
+ },
+ .resource = tmu1_resources,
+ .num_resources = ARRAY_SIZE(tmu1_resources),
+};
+
+static struct platform_device *shx3_early_devices[] __initdata = {
+ &scif0_device,
+ &scif1_device,
+ &scif2_device,
+ &tmu0_device,
+ &tmu1_device,
};
static int __init shx3_devices_setup(void)
{
- return platform_add_devices(shx3_devices,
- ARRAY_SIZE(shx3_devices));
+ return platform_add_devices(shx3_early_devices,
+ ARRAY_SIZE(shx3_early_devices));
+}
+arch_initcall(shx3_devices_setup);
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(shx3_early_devices,
+ ARRAY_SIZE(shx3_early_devices));
}
-__initcall(shx3_devices_setup);
enum {
UNUSED = 0,
@@ -82,10 +185,7 @@ enum {
DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9,
DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE,
IIC, VIN0, VIN1, VCORE0, ATAPI,
- DTU0_TEND, DTU0_AE, DTU0_TMISS,
- DTU1_TEND, DTU1_AE, DTU1_TMISS,
- DTU2_TEND, DTU2_AE, DTU2_TMISS,
- DTU3_TEND, DTU3_AE, DTU3_TMISS,
+ DTU0, DTU1, DTU2, DTU3,
FE0, FE1,
GPIO0, GPIO1, GPIO2, GPIO3,
PAM, IRM,
@@ -94,7 +194,7 @@ enum {
/* interrupt groups */
IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3,
- DMAC0, DMAC1, DTU0, DTU1, DTU2, DTU3,
+ DMAC0, DMAC1,
};
static struct intc_vect vectors[] __initdata = {
@@ -111,8 +211,6 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760),
INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0),
INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0),
- INTC_VECT(SCIF2_ERI, 0x800), INTC_VECT(SCIF2_RXI, 0x820),
- INTC_VECT(SCIF2_BRI, 0x840), INTC_VECT(SCIF2_TXI, 0x860),
INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0),
INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0),
INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920),
@@ -127,14 +225,14 @@ static struct intc_vect vectors[] __initdata = {
INTC_VECT(IIC, 0xae0),
INTC_VECT(VIN0, 0xb00), INTC_VECT(VIN1, 0xb20),
INTC_VECT(VCORE0, 0xb00), INTC_VECT(ATAPI, 0xb60),
- INTC_VECT(DTU0_TEND, 0xc00), INTC_VECT(DTU0_AE, 0xc20),
- INTC_VECT(DTU0_TMISS, 0xc40),
- INTC_VECT(DTU1_TEND, 0xc60), INTC_VECT(DTU1_AE, 0xc80),
- INTC_VECT(DTU1_TMISS, 0xca0),
- INTC_VECT(DTU2_TEND, 0xcc0), INTC_VECT(DTU2_AE, 0xce0),
- INTC_VECT(DTU2_TMISS, 0xd00),
- INTC_VECT(DTU3_TEND, 0xd20), INTC_VECT(DTU3_AE, 0xd40),
- INTC_VECT(DTU3_TMISS, 0xd60),
+ INTC_VECT(DTU0, 0xc00), INTC_VECT(DTU0, 0xc20),
+ INTC_VECT(DTU0, 0xc40),
+ INTC_VECT(DTU1, 0xc60), INTC_VECT(DTU1, 0xc80),
+ INTC_VECT(DTU1, 0xca0),
+ INTC_VECT(DTU2, 0xcc0), INTC_VECT(DTU2, 0xce0),
+ INTC_VECT(DTU2, 0xd00),
+ INTC_VECT(DTU3, 0xd20), INTC_VECT(DTU3, 0xd40),
+ INTC_VECT(DTU3, 0xd60),
INTC_VECT(FE0, 0xe00), INTC_VECT(FE1, 0xe20),
INTC_VECT(GPIO0, 0xe40), INTC_VECT(GPIO1, 0xe60),
INTC_VECT(GPIO2, 0xe80), INTC_VECT(GPIO3, 0xea0),
@@ -153,18 +251,17 @@ static struct intc_group groups[] __initdata = {
INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9),
INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
- INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI),
INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2,
DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE),
INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8,
DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11),
- INTC_GROUP(DTU0, DTU0_TEND, DTU0_AE, DTU0_TMISS),
- INTC_GROUP(DTU1, DTU1_TEND, DTU1_AE, DTU1_TMISS),
- INTC_GROUP(DTU2, DTU2_TEND, DTU2_AE, DTU2_TMISS),
- INTC_GROUP(DTU3, DTU3_TEND, DTU3_AE, DTU3_TMISS),
};
+#define INT2DISTCR0 0xfe4108a0
+#define INT2DISTCR1 0xfe4108a4
+#define INT2DISTCR2 0xfe4108a8
+
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */
{ IRQ0, IRQ1, IRQ2, IRQ3 } },
@@ -174,20 +271,23 @@ static struct intc_mask_reg mask_registers[] __initdata = {
{ FE1, FE0, 0, ATAPI, VCORE0, VIN1, VIN0, IIC,
DU, GPIO3, GPIO2, GPIO1, GPIO0, PAM, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, /* HUDI bits ignored */
- 0, TMU5, TMU4, TMU3, TMU2, TMU1, TMU0, 0, } },
+ 0, TMU5, TMU4, TMU3, TMU2, TMU1, TMU0, 0, },
+ INTC_SMP_BALANCING(INT2DISTCR0) },
{ 0xfe410830, 0xfe410860, 32, /* CnINT2MSK1 / CnINT2MSKCLR1 */
{ 0, 0, 0, 0, DTU3, DTU2, DTU1, DTU0, /* IRM bits ignored */
PCII9, PCII8, PCII7, PCII6, PCII5, PCII4, PCII3, PCII2,
PCII1, PCII0, DMAC1_DMAE, DMAC1_DMINT11,
DMAC1_DMINT10, DMAC1_DMINT9, DMAC1_DMINT8, DMAC1_DMINT7,
DMAC1_DMINT6, DMAC0_DMAE, DMAC0_DMINT5, DMAC0_DMINT4,
- DMAC0_DMINT3, DMAC0_DMINT2, DMAC0_DMINT1, DMAC0_DMINT0 } },
+ DMAC0_DMINT3, DMAC0_DMINT2, DMAC0_DMINT1, DMAC0_DMINT0 },
+ INTC_SMP_BALANCING(INT2DISTCR1) },
{ 0xfe410840, 0xfe410870, 32, /* CnINT2MSK2 / CnINT2MSKCLR2 */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
SCIF3_TXI, SCIF3_BRI, SCIF3_RXI, SCIF3_ERI,
SCIF2_TXI, SCIF2_BRI, SCIF2_RXI, SCIF2_ERI,
SCIF1_TXI, SCIF1_BRI, SCIF1_RXI, SCIF1_ERI,
- SCIF0_TXI, SCIF0_BRI, SCIF0_RXI, SCIF0_ERI } },
+ SCIF0_TXI, SCIF0_BRI, SCIF0_RXI, SCIF0_ERI },
+ INTC_SMP_BALANCING(INT2DISTCR2) },
};
static struct intc_prio_reg prio_registers[] __initdata = {
@@ -244,11 +344,33 @@ static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups,
void __init plat_irq_setup_pins(int mode)
{
+ int ret = 0;
+
switch (mode) {
case IRQ_MODE_IRQ:
+ ret |= gpio_request(GPIO_FN_IRQ3, intc_desc_irq.name);
+ ret |= gpio_request(GPIO_FN_IRQ2, intc_desc_irq.name);
+ ret |= gpio_request(GPIO_FN_IRQ1, intc_desc_irq.name);
+ ret |= gpio_request(GPIO_FN_IRQ0, intc_desc_irq.name);
+
+ if (unlikely(ret)) {
+ pr_err("Failed to set IRQ mode\n");
+ return;
+ }
+
register_intc_controller(&intc_desc_irq);
break;
case IRQ_MODE_IRL3210:
+ ret |= gpio_request(GPIO_FN_IRL3, intc_desc_irl.name);
+ ret |= gpio_request(GPIO_FN_IRL2, intc_desc_irl.name);
+ ret |= gpio_request(GPIO_FN_IRL1, intc_desc_irl.name);
+ ret |= gpio_request(GPIO_FN_IRL0, intc_desc_irl.name);
+
+ if (unlikely(ret)) {
+ pr_err("Failed to set IRL mode\n");
+ return;
+ }
+
register_intc_controller(&intc_desc_irl);
break;
default:
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index e5e06845fa4..4a298808789 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -1,7 +1,7 @@
/*
* SH-X3 SMP
*
- * Copyright (C) 2007 Paul Mundt
+ * Copyright (C) 2007 - 2010 Paul Mundt
* Copyright (C) 2007 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -9,18 +9,49 @@
* for more details.
*/
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include <asm/sections.h>
-void __init plat_smp_setup(void)
+#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
+#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
+
+#define STBCR_MSTP 0x00000001
+#define STBCR_RESET 0x00000002
+#define STBCR_SLEEP 0x00000004
+#define STBCR_LTSLP 0x80000000
+
+static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
+{
+ unsigned int message = (unsigned int)(long)arg;
+ unsigned int cpu = hard_smp_processor_id();
+ unsigned int offs = 4 * cpu;
+ unsigned int x;
+
+ x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
+ x &= (1 << (message << 2));
+ __raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
+
+ smp_message_recv(message);
+
+ return IRQ_HANDLED;
+}
+
+static void shx3_smp_setup(void)
{
unsigned int cpu = 0;
int i, num;
- cpus_clear(cpu_possible_map);
- cpu_set(cpu, cpu_possible_map);
+ init_cpu_possible(cpumask_of(cpu));
+
+ /* Enable light sleep for the boot CPU */
+ __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu));
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
@@ -30,7 +61,7 @@ void __init plat_smp_setup(void)
* for the total number of cores.
*/
for (i = 1, num = 0; i < NR_CPUS; i++) {
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
@@ -38,83 +69,98 @@ void __init plat_smp_setup(void)
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
-void __init plat_prepare_cpus(unsigned int max_cpus)
+static void shx3_prepare_cpus(unsigned int max_cpus)
{
-}
+ int i;
-#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
-#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
+ local_timer_setup(0);
-#define STBCR_MSTP 0x00000001
-#define STBCR_RESET 0x00000002
-#define STBCR_LTSLP 0x80000000
+ BUILD_BUG_ON(SMP_MSG_NR >= 8);
-#define STBCR_AP_VAL (STBCR_RESET | STBCR_LTSLP)
+ for (i = 0; i < SMP_MSG_NR; i++)
+ request_irq(104 + i, ipi_interrupt_handler,
+ IRQF_PERCPU, "IPI", (void *)(long)i);
-void plat_start_cpu(unsigned int cpu, unsigned long entry_point)
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+}
+
+static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point)
{
- ctrl_outl(entry_point, RESET_REG(cpu));
+ if (__in_29bit_mode())
+ __raw_writel(entry_point, RESET_REG(cpu));
+ else
+ __raw_writel(virt_to_phys(entry_point), RESET_REG(cpu));
- if (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP))
- ctrl_outl(STBCR_MSTP, STBCR_REG(cpu));
+ if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
+ __raw_writel(STBCR_MSTP, STBCR_REG(cpu));
- while (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP))
- ;
+ while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
+ cpu_relax();
/* Start up secondary processor by sending a reset */
- ctrl_outl(STBCR_AP_VAL, STBCR_REG(cpu));
+ __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
}
-int plat_smp_processor_id(void)
+static unsigned int shx3_smp_processor_id(void)
{
- return ctrl_inl(0xff000048); /* CPIDR */
+ return __raw_readl(0xff000048); /* CPIDR */
}
-void plat_send_ipi(unsigned int cpu, unsigned int message)
+static void shx3_send_ipi(unsigned int cpu, unsigned int message)
{
unsigned long addr = 0xfe410070 + (cpu * 4);
BUG_ON(cpu >= 4);
- BUG_ON(message >= SMP_MSG_NR);
- ctrl_outl(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
+ __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
}
-struct ipi_data {
- void (*handler)(void *);
- void *arg;
- unsigned int message;
-};
-
-static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
+static void shx3_update_boot_vector(unsigned int cpu)
{
- struct ipi_data *id = arg;
- unsigned int cpu = hard_smp_processor_id();
- unsigned int offs = 4 * cpu;
- unsigned int x;
-
- x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
- x &= (1 << (id->message << 2));
- ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
-
- id->handler(id->arg);
-
- return IRQ_HANDLED;
+ __raw_writel(STBCR_MSTP, STBCR_REG(cpu));
+ while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
+ cpu_relax();
+ __raw_writel(STBCR_RESET, STBCR_REG(cpu));
}
-static struct ipi_data ipi_handlers[SMP_MSG_NR];
-
-int plat_register_ipi_handler(unsigned int message,
- void (*handler)(void *), void *arg)
+static int
+shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
- struct ipi_data *id = &ipi_handlers[message];
+ unsigned int cpu = (unsigned int)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ shx3_update_boot_vector(cpu);
+ break;
+ case CPU_ONLINE:
+ pr_info("CPU %u is now online\n", cpu);
+ break;
+ case CPU_DEAD:
+ break;
+ }
- BUG_ON(SMP_MSG_NR >= 8);
- BUG_ON(message >= SMP_MSG_NR);
+ return NOTIFY_OK;
+}
- id->handler = handler;
- id->arg = arg;
- id->message = message;
+static struct notifier_block shx3_cpu_notifier = {
+ .notifier_call = shx3_cpu_callback,
+};
- return request_irq(104 + message, ipi_interrupt_handler, 0, "IPI", id);
+static int register_shx3_cpu_notifier(void)
+{
+ register_hotcpu_notifier(&shx3_cpu_notifier);
+ return 0;
}
+late_initcall(register_shx3_cpu_notifier);
+
+struct plat_smp_ops shx3_smp_ops = {
+ .smp_setup = shx3_smp_setup,
+ .prepare_cpus = shx3_prepare_cpus,
+ .start_cpu = shx3_start_cpu,
+ .smp_processor_id = shx3_smp_processor_id,
+ .send_ipi = shx3_send_ipi,
+ .cpu_die = native_cpu_die,
+ .cpu_disable = native_cpu_disable,
+ .play_dead = native_play_dead,
+};
diff --git a/arch/sh/kernel/cpu/sh4a/ubc.c b/arch/sh/kernel/cpu/sh4a/ubc.c
new file mode 100644
index 00000000000..efb2745bcb3
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4a/ubc.c
@@ -0,0 +1,133 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/ubc.c
+ *
+ * On-chip UBC support for SH-4A CPUs.
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <asm/hw_breakpoint.h>
+
+#define UBC_CBR(idx) (0xff200000 + (0x20 * idx))
+#define UBC_CRR(idx) (0xff200004 + (0x20 * idx))
+#define UBC_CAR(idx) (0xff200008 + (0x20 * idx))
+#define UBC_CAMR(idx) (0xff20000c + (0x20 * idx))
+
+#define UBC_CCMFR 0xff200600
+#define UBC_CBCR 0xff200620
+
+/* CRR */
+#define UBC_CRR_PCB (1 << 1)
+#define UBC_CRR_BIE (1 << 0)
+
+/* CBR */
+#define UBC_CBR_CE (1 << 0)
+
+static struct sh_ubc sh4a_ubc;
+
+static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
+{
+ __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx));
+ __raw_writel(info->address, UBC_CAR(idx));
+}
+
+static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
+{
+ __raw_writel(0, UBC_CBR(idx));
+ __raw_writel(0, UBC_CAR(idx));
+}
+
+static void sh4a_ubc_enable_all(unsigned long mask)
+{
+ int i;
+
+ for (i = 0; i < sh4a_ubc.num_events; i++)
+ if (mask & (1 << i))
+ __raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE,
+ UBC_CBR(i));
+}
+
+static void sh4a_ubc_disable_all(void)
+{
+ int i;
+
+ for (i = 0; i < sh4a_ubc.num_events; i++)
+ __raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE,
+ UBC_CBR(i));
+}
+
+static unsigned long sh4a_ubc_active_mask(void)
+{
+ unsigned long active = 0;
+ int i;
+
+ for (i = 0; i < sh4a_ubc.num_events; i++)
+ if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE)
+ active |= (1 << i);
+
+ return active;
+}
+
+static unsigned long sh4a_ubc_triggered_mask(void)
+{
+ return __raw_readl(UBC_CCMFR);
+}
+
+static void sh4a_ubc_clear_triggered_mask(unsigned long mask)
+{
+ __raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR);
+}
+
+static struct sh_ubc sh4a_ubc = {
+ .name = "SH-4A",
+ .num_events = 2,
+ .trap_nr = 0x1e0,
+ .enable = sh4a_ubc_enable,
+ .disable = sh4a_ubc_disable,
+ .enable_all = sh4a_ubc_enable_all,
+ .disable_all = sh4a_ubc_disable_all,
+ .active_mask = sh4a_ubc_active_mask,
+ .triggered_mask = sh4a_ubc_triggered_mask,
+ .clear_triggered_mask = sh4a_ubc_clear_triggered_mask,
+};
+
+static int __init sh4a_ubc_init(void)
+{
+ struct clk *ubc_iclk = clk_get(NULL, "ubc0");
+ int i;
+
+ /*
+ * The UBC MSTP bit is optional, as not all platforms will have
+ * it. Just ignore it if we can't find it.
+ */
+ if (IS_ERR(ubc_iclk))
+ ubc_iclk = NULL;
+
+ clk_enable(ubc_iclk);
+
+ __raw_writel(0, UBC_CBCR);
+
+ for (i = 0; i < sh4a_ubc.num_events; i++) {
+ __raw_writel(0, UBC_CAMR(i));
+ __raw_writel(0, UBC_CBR(i));
+
+ __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i));
+
+ /* dummy read for write posting */
+ (void)__raw_readl(UBC_CRR(i));
+ }
+
+ clk_disable(ubc_iclk);
+
+ sh4a_ubc.clk = ubc_iclk;
+
+ return register_sh_ubc(&sh4a_ubc);
+}
+arch_initcall(sh4a_ubc_init);
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
index 8646363e9de..a184a31e686 100644
--- a/arch/sh/kernel/cpu/sh5/Makefile
+++ b/arch/sh/kernel/cpu/sh5/Makefile
@@ -5,3 +5,11 @@ obj-y := entry.o probe.o switchto.o
obj-$(CONFIG_SH_FPU) += fpu.o
obj-$(CONFIG_KALLSYMS) += unwind.o
+
+# CPU subtype setup
+obj-$(CONFIG_CPU_SH5) += setup-sh5.o
+
+# Primary on-chip clocks (common)
+clock-$(CONFIG_CPU_SH5) := clock-sh5.o
+
+obj-y += $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c
new file mode 100644
index 00000000000..c48b93d4c08
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/clock-sh5.c
@@ -0,0 +1,79 @@
+/*
+ * arch/sh/kernel/cpu/sh5/clock-sh5.c
+ *
+ * SH-5 support for the clock framework
+ *
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/io.h>
+
+static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
+
+/* Clock, Power and Reset Controller */
+#define CPRC_BLOCK_OFF 0x01010000
+#define CPRC_BASE (PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF)
+
+static unsigned long cprc_base;
+
+static void master_clk_init(struct clk *clk)
+{
+ int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
+ clk->rate *= ifc_table[idx];
+}
+
+static struct sh_clk_ops sh5_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static unsigned long module_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
+ return clk->parent->rate / ifc_table[idx];
+}
+
+static struct sh_clk_ops sh5_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static unsigned long bus_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
+ return clk->parent->rate / ifc_table[idx];
+}
+
+static struct sh_clk_ops sh5_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static unsigned long cpu_clk_recalc(struct clk *clk)
+{
+ int idx = (__raw_readw(cprc_base) & 0x0007);
+ return clk->parent->rate / ifc_table[idx];
+}
+
+static struct sh_clk_ops sh5_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct sh_clk_ops *sh5_clk_ops[] = {
+ &sh5_master_clk_ops,
+ &sh5_module_clk_ops,
+ &sh5_bus_clk_ops,
+ &sh5_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
+{
+ cprc_base = (unsigned long)ioremap_nocache(CPRC_BASE, 1024);
+ BUG_ON(!cprc_base);
+
+ if (idx < ARRAY_SIZE(sh5_clk_ops))
+ *ops = sh5_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index ba8750176d9..0c8d0377d40 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -2,7 +2,7 @@
* arch/sh/kernel/cpu/sh5/entry.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2004 - 2007 Paul Mundt
+ * Copyright (C) 2004 - 2008 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -10,8 +10,9 @@
* for more details.
*/
#include <linux/errno.h>
+#include <linux/init.h>
#include <linux/sys.h>
-#include <asm/cpu/registers.h>
+#include <cpu/registers.h>
#include <asm/processor.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
@@ -143,12 +144,22 @@ resvec_save_area:
trap_jtable:
.long do_exception_error /* 0x000 */
.long do_exception_error /* 0x020 */
+#ifdef CONFIG_MMU
.long tlb_miss_load /* 0x040 */
.long tlb_miss_store /* 0x060 */
+#else
+ .long do_exception_error
+ .long do_exception_error
+#endif
! ARTIFICIAL pseudo-EXPEVT setting
.long do_debug_interrupt /* 0x080 */
+#ifdef CONFIG_MMU
.long tlb_miss_load /* 0x0A0 */
.long tlb_miss_store /* 0x0C0 */
+#else
+ .long do_exception_error
+ .long do_exception_error
+#endif
.long do_address_error_load /* 0x0E0 */
.long do_address_error_store /* 0x100 */
#ifdef CONFIG_SH_FPU
@@ -176,7 +187,7 @@ trap_jtable:
.rept 6
.long do_exception_error /* 0x880 - 0x920 */
.endr
- .long do_software_break_point /* 0x940 */
+ .long breakpoint_trap_handler /* 0x940 */
.long do_exception_error /* 0x960 */
.long do_single_step /* 0x980 */
@@ -185,10 +196,18 @@ trap_jtable:
.endr
.long do_IRQ /* 0xA00 */
.long do_IRQ /* 0xA20 */
+#ifdef CONFIG_MMU
.long itlb_miss_or_IRQ /* 0xA40 */
+#else
+ .long do_IRQ
+#endif
.long do_IRQ /* 0xA60 */
.long do_IRQ /* 0xA80 */
+#ifdef CONFIG_MMU
.long itlb_miss_or_IRQ /* 0xAA0 */
+#else
+ .long do_IRQ
+#endif
.long do_exception_error /* 0xAC0 */
.long do_address_error_exec /* 0xAE0 */
.rept 8
@@ -274,6 +293,7 @@ not_a_tlb_miss:
* Instead of '.space 1024-TEXT_SIZE' place the RESVEC
* block making sure the final alignment is correct.
*/
+#ifdef CONFIG_MMU
tlb_miss:
synco /* TAKum03020 (but probably a good idea anyway.) */
putcon SP, KCR1
@@ -315,7 +335,7 @@ tlb_miss:
/* If the fast path handler fixed the fault, just drop through quickly
to the restore code right away to return to the excepting context.
*/
- beqi/u r2, 0, tr1
+ bnei/u r2, 0, tr1
fast_tlb_miss_restore:
ld.q SP, SAVED_TR0, r2
@@ -377,6 +397,9 @@ fixup_to_invoke_general_handler:
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
+#else /* CONFIG_MMU */
+ .balign 256
+#endif
/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
DOES END UP AT VBR+0x600 */
@@ -789,27 +812,6 @@ no_underflow:
! exceptions
add SP, ZERO, r14
-#ifdef CONFIG_POOR_MANS_STRACE
- /* We've pushed all the registers now, so only r2-r4 hold anything
- * useful. Move them into callee save registers */
- or r2, ZERO, r28
- or r3, ZERO, r29
- or r4, ZERO, r30
-
- /* Preserve r2 as the event code */
- movi evt_debug, r3
- ori r3, 1, r3
- ptabs r3, tr0
-
- or SP, ZERO, r6
- getcon TRA, r5
- blink tr0, LINK
-
- or r28, ZERO, r2
- or r29, ZERO, r3
- or r30, ZERO, r4
-#endif
-
/* For syscall and debug race condition, get TRA now */
getcon TRA, r5
@@ -864,11 +866,6 @@ no_underflow:
*/
.global ret_from_irq
ret_from_irq:
-#ifdef CONFIG_POOR_MANS_STRACE
- pta evt_debug_ret_from_irq, tr0
- ori SP, 0, r2
- blink tr0, LINK
-#endif
ld.q SP, FRAME_S(FSSR), r6
shlri r6, 30, r6
andi r6, 1, r6
@@ -882,12 +879,6 @@ ret_from_irq:
ret_from_exception:
preempt_stop()
-#ifdef CONFIG_POOR_MANS_STRACE
- pta evt_debug_ret_from_exc, tr0
- ori SP, 0, r2
- blink tr0, LINK
-#endif
-
ld.q SP, FRAME_S(FSSR), r6
shlri r6, 30, r6
andi r6, 1, r6
@@ -901,6 +892,8 @@ ret_from_exception:
blink tr0, ZERO
resume_kernel:
+ CLI()
+
pta restore_all, tr0
getcon KCR0, r6
@@ -917,19 +910,11 @@ need_resched:
andi r7, 0xf0, r7
bne r7, ZERO, tr0
- movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
- shori (PREEMPT_ACTIVE & 65535), r8
- st.l r6, TI_PRE_COUNT, r8
-
- STI()
- movi schedule, r7
+ movi preempt_schedule_irq, r7
ori r7, 1, r7
ptabs r7, tr1
blink tr1, LINK
- st.l r6, TI_PRE_COUNT, ZERO
- CLI()
-
pta need_resched, tr1
blink tr1, ZERO
#endif
@@ -948,7 +933,7 @@ ret_with_reschedule:
pta restore_all, tr1
- movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
+ movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
and r8, r7, r8
pta work_notifysig, tr0
bne r8, ZERO, tr0
@@ -965,11 +950,11 @@ work_resched:
work_notifysig:
gettr tr1, LINK
- movi do_signal, r6
+ movi do_notify_resume, r6
ptabs r6, tr0
or SP, ZERO, r2
- or ZERO, ZERO, r3
- blink tr0, LINK /* Call do_signal(regs, 0), return here */
+ or r7, ZERO, r3
+ blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */
restore_all:
/* Do prefetches */
@@ -1094,50 +1079,50 @@ restore_all:
*
* Kernel TLB fault handlers will get a slightly different interface.
* (r2) struct pt_regs *, original register's frame pointer
- * (r3) writeaccess, whether it's a store fault as opposed to load fault
- * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
- * (r5) Effective Address of fault
+ * (r3) page fault error code (see asm/thread_info.h)
+ * (r4) Effective Address of fault
* (LINK) return address
* (SP) = r2
*
* fpu_error_or_IRQ? is a helper to deflect to the right cause.
*
*/
+#ifdef CONFIG_MMU
tlb_miss_load:
or SP, ZERO, r2
or ZERO, ZERO, r3 /* Read */
- or ZERO, ZERO, r4 /* Data */
- getcon TEA, r5
+ getcon TEA, r4
pta call_do_page_fault, tr0
beq ZERO, ZERO, tr0
tlb_miss_store:
or SP, ZERO, r2
- movi 1, r3 /* Write */
- or ZERO, ZERO, r4 /* Data */
- getcon TEA, r5
+ movi FAULT_CODE_WRITE, r3 /* Write */
+ getcon TEA, r4
pta call_do_page_fault, tr0
beq ZERO, ZERO, tr0
itlb_miss_or_IRQ:
pta its_IRQ, tr0
beqi/u r4, EVENT_INTERRUPT, tr0
+
+ /* ITLB miss */
or SP, ZERO, r2
- or ZERO, ZERO, r3 /* Read */
- movi 1, r4 /* Text */
- getcon TEA, r5
+ movi FAULT_CODE_ITLB, r3
+ getcon TEA, r4
/* Fall through */
call_do_page_fault:
movi do_page_fault, r6
ptabs r6, tr0
blink tr0, ZERO
+#endif /* CONFIG_MMU */
fpu_error_or_IRQA:
pta its_IRQ, tr0
beqi/l r4, EVENT_INTERRUPT, tr0
#ifdef CONFIG_SH_FPU
- movi do_fpu_state_restore, r6
+ movi fpu_state_restore_trap_handler, r6
#else
movi do_exception_error, r6
#endif
@@ -1148,7 +1133,7 @@ fpu_error_or_IRQB:
pta its_IRQ, tr0
beqi/l r4, EVENT_INTERRUPT, tr0
#ifdef CONFIG_SH_FPU
- movi do_fpu_state_restore, r6
+ movi fpu_state_restore_trap_handler, r6
#else
movi do_exception_error, r6
#endif
@@ -1217,18 +1202,6 @@ syscall_bad:
.global syscall_ret
syscall_ret:
st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
-
-#ifdef CONFIG_POOR_MANS_STRACE
- /* nothing useful in registers at this point */
-
- movi evt_debug2, r5
- ori r5, 1, r5
- ptabs r5, tr0
- ld.q SP, FRAME_R(9), r2
- or SP, ZERO, r3
- blink tr0, LINK
-#endif
-
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
@@ -1249,16 +1222,24 @@ ret_from_fork:
ptabs r5, tr0
blink tr0, LINK
-#ifdef CONFIG_POOR_MANS_STRACE
- /* nothing useful in registers at this point */
+ ld.q SP, FRAME_S(FSPC), r2
+ addi r2, 4, r2 /* Move PC, being pre-execution event */
+ st.q SP, FRAME_S(FSPC), r2
+ pta ret_from_syscall, tr0
+ blink tr0, ZERO
+
+.global ret_from_kernel_thread
+ret_from_kernel_thread:
- movi evt_debug2, r5
+ movi schedule_tail,r5
ori r5, 1, r5
ptabs r5, tr0
- ld.q SP, FRAME_R(9), r2
- or SP, ZERO, r3
blink tr0, LINK
-#endif
+
+ ld.q SP, FRAME_R(2), r2
+ ld.q SP, FRAME_R(3), r3
+ ptabs r3, tr0
+ blink tr0, LINK
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
@@ -1266,8 +1247,6 @@ ret_from_fork:
pta ret_from_syscall, tr0
blink tr0, ZERO
-
-
syscall_allowed:
/* Use LINK to deflect the exit point, default is syscall_ret */
pta syscall_ret, tr0
@@ -1276,18 +1255,20 @@ syscall_allowed:
getcon KCR0, r2
ld.l r2, TI_FLAGS, r4
- movi (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
+ movi _TIF_WORK_SYSCALL_MASK, r6
and r6, r4, r6
beq/l r6, ZERO, tr0
/* Trace it by calling syscall_trace before and after */
- movi syscall_trace, r4
+ movi do_syscall_trace_enter, r4
or SP, ZERO, r2
- or ZERO, ZERO, r3
ptabs r4, tr0
blink tr0, LINK
- /* Reload syscall number as r5 is trashed by syscall_trace */
+ /* Save the retval */
+ st.q SP, FRAME_R(2), r2
+
+ /* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
ld.q SP, FRAME_S(FSYSCALL_ID), r5
andi r5, 0x1ff, r5
@@ -1319,9 +1300,8 @@ syscall_ret_trace:
/* We get back here only if under trace */
st.q SP, FRAME_R(9), r2 /* Save return value */
- movi syscall_trace, LINK
+ movi do_syscall_trace_leave, LINK
or SP, ZERO, r2
- movi 1, r3
ptabs LINK, tr0
blink tr0, LINK
@@ -1390,8 +1370,8 @@ peek_real_address_q:
r2(out) : result quadword
This is provided as a cheapskate way of manipulating device
- registers for debugging (to avoid the need to onchip_remap the debug
- module, and to avoid the need to onchip_remap the watchpoint
+ registers for debugging (to avoid the need to ioremap the debug
+ module, and to avoid the need to ioremap the watchpoint
controller in a way that identity maps sufficient bits to avoid the
SH5-101 cut2 silicon defect).
@@ -1439,8 +1419,8 @@ poke_real_address_q:
r3 : quadword value to write.
This is provided as a cheapskate way of manipulating device
- registers for debugging (to avoid the need to onchip_remap the debug
- module, and to avoid the need to onchip_remap the watchpoint
+ registers for debugging (to avoid the need to ioremap the debug
+ module, and to avoid the need to ioremap the watchpoint
controller in a way that identity maps sufficient bits to avoid the
SH5-101 cut2 silicon defect).
@@ -1481,6 +1461,7 @@ poke_real_address_q:
ptabs LINK, tr0
blink tr0, r63
+#ifdef CONFIG_MMU
/*
* --- User Access Handling Section
*/
@@ -1604,86 +1585,7 @@ ___clear_user_exit:
ptabs LINK, tr0
blink tr0, ZERO
-
-/*
- * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
- * int __count)
- *
- * Inputs:
- * (r2) target address
- * (r3) source address
- * (r4) maximum size in bytes
- *
- * Ouputs:
- * (*r2) copied data
- * (r2) -EFAULT (in case of faulting)
- * copied data (otherwise)
- */
- .global __strncpy_from_user
-__strncpy_from_user:
- pta ___strncpy_from_user1, tr0
- pta ___strncpy_from_user_done, tr1
- or r4, ZERO, r5 /* r5 = original count */
- beq/u r4, r63, tr1 /* early exit if r4==0 */
- movi -(EFAULT), r6 /* r6 = reply, no real fixup */
- or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
-
-___strncpy_from_user1:
- ld.b r3, 0, r7 /* Fault address: only in reading */
- st.b r2, 0, r7
- addi r2, 1, r2
- addi r3, 1, r3
- beq/u ZERO, r7, tr1
- addi r4, -1, r4 /* return real number of copied bytes */
- bne/l ZERO, r4, tr0
-
-___strncpy_from_user_done:
- sub r5, r4, r6 /* If done, return copied */
-
-___strncpy_from_user_exit:
- or r6, ZERO, r2
- ptabs LINK, tr0
- blink tr0, ZERO
-
-/*
- * extern long __strnlen_user(const char *__s, long __n)
- *
- * Inputs:
- * (r2) source address
- * (r3) source size in bytes
- *
- * Ouputs:
- * (r2) -EFAULT (in case of faulting)
- * string length (otherwise)
- */
- .global __strnlen_user
-__strnlen_user:
- pta ___strnlen_user_set_reply, tr0
- pta ___strnlen_user1, tr1
- or ZERO, ZERO, r5 /* r5 = counter */
- movi -(EFAULT), r6 /* r6 = reply, no real fixup */
- or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
- beq r3, ZERO, tr0
-
-___strnlen_user1:
- ldx.b r2, r5, r7 /* Fault address: only in reading */
- addi r3, -1, r3 /* No real fixup */
- addi r5, 1, r5
- beq r3, ZERO, tr0
- bne r7, ZERO, tr1
-! The line below used to be active. This meant led to a junk byte lying between each pair
-! of entries in the argv & envp structures in memory. Whilst the program saw the right data
-! via the argv and envp arguments to main, it meant the 'flat' representation visible through
-! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
-! addi r5, 1, r5 /* Include '\0' */
-
-___strnlen_user_set_reply:
- or r5, ZERO, r6 /* If done, return counter */
-
-___strnlen_user_exit:
- or r6, ZERO, r2
- ptabs LINK, tr0
- blink tr0, ZERO
+#endif /* CONFIG_MMU */
/*
* extern long __get_user_asm_?(void *val, long addr)
@@ -2014,11 +1916,11 @@ sa_default_restorer:
.global asm_uaccess_start /* Just a marker */
asm_uaccess_start:
+#ifdef CONFIG_MMU
.long ___copy_user1, ___copy_user_exit
.long ___copy_user2, ___copy_user_exit
.long ___clear_user1, ___clear_user_exit
- .long ___strncpy_from_user1, ___strncpy_from_user_exit
- .long ___strnlen_user1, ___strnlen_user_exit
+#endif
.long ___get_user_asm_b1, ___get_user_asm_b_exit
.long ___get_user_asm_w1, ___get_user_asm_w_exit
.long ___get_user_asm_l1, ___get_user_asm_l_exit
@@ -2035,10 +1937,10 @@ asm_uaccess_end:
/*
- * --- .text.init Section
+ * --- .init.text Section
*/
- .section .text.init, "ax"
+ __INIT
/*
* void trap_init (void)
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
index dd4f51ffb50..9f8713aa718 100644
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -15,26 +15,8 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/processor.h>
-#include <asm/user.h>
-#include <asm/io.h>
-#include <asm/fpu.h>
-/*
- * Initially load the FPU with signalling NANS. This bit pattern
- * has the property that no matter whether considered as single or as
- * double precision, it still represents a signalling NAN.
- */
-#define sNAN64 0xFFFFFFFFFFFFFFFFULL
-#define sNAN32 0xFFFFFFFFUL
-
-static union sh_fpu_union init_fpuregs = {
- .hard = {
- .fp_regs = { [0 ... 63] = sNAN32 },
- .fpscr = FPSCR_INIT
- }
-};
-
-void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
+void save_fpu(struct task_struct *tsk)
{
asm volatile("fst.p %0, (0*8), fp0\n\t"
"fst.p %0, (1*8), fp2\n\t"
@@ -72,12 +54,11 @@ void save_fpu(struct task_struct *tsk, struct pt_regs *regs)
"fgetscr fr63\n\t"
"fst.s %0, (32*8), fr63\n\t"
: /* no output */
- : "r" (&tsk->thread.fpu.hard)
+ : "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
-static inline void
-fpload(struct sh_fpu_hard_struct *fpregs)
+void restore_fpu(struct task_struct *tsk)
{
asm volatile("fld.p %0, (0*8), fp0\n\t"
"fld.p %0, (1*8), fp2\n\t"
@@ -116,52 +97,15 @@ fpload(struct sh_fpu_hard_struct *fpregs)
"fld.p %0, (31*8), fp62\n\t"
: /* no output */
- : "r" (fpregs) );
-}
-
-void fpinit(struct sh_fpu_hard_struct *fpregs)
-{
- *fpregs = init_fpuregs.hard;
+ : "r" (&tsk->thread.xstate->hardfpu)
+ : "memory");
}
-asmlinkage void
-do_fpu_error(unsigned long ex, struct pt_regs *regs)
+asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
{
struct task_struct *tsk = current;
regs->pc += 4;
- tsk->thread.trap_no = 11;
- tsk->thread.error_code = 0;
force_sig(SIGFPE, tsk);
}
-
-
-asmlinkage void
-do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
-{
- void die(const char *str, struct pt_regs *regs, long err);
-
- if (! user_mode(regs))
- die("FPU used in kernel", regs, ex);
-
- regs->sr &= ~SR_FD;
-
- if (last_task_used_math == current)
- return;
-
- enable_fpu();
- if (last_task_used_math != NULL)
- /* Other processes fpu state, save away */
- save_fpu(last_task_used_math, regs);
-
- last_task_used_math = current;
- if (used_math()) {
- fpload(&current->thread.fpu.hard);
- } else {
- /* First time FPU user. */
- fpload(&init_fpuregs.hard);
- set_used_math();
- }
- disable_fpu();
-}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
index 31f8cb0f637..eca427c2f2f 100644
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -15,8 +15,9 @@
#include <linux/string.h>
#include <asm/processor.h>
#include <asm/cache.h>
+#include <asm/tlb.h>
-int __init detect_cpu_and_cache_system(void)
+void cpu_probe(void)
{
unsigned long long cir;
@@ -33,6 +34,8 @@ int __init detect_cpu_and_cache_system(void)
/* CPU.VCR aliased at CIR address on SH5-101 */
boot_cpu_data.type = CPU_SH5_101;
+ boot_cpu_data.family = CPU_FAMILY_SH5;
+
/*
* First, setup some sane values for the I-cache.
*/
@@ -67,5 +70,6 @@ int __init detect_cpu_and_cache_system(void)
set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags));
#endif
- return 0;
+ /* Setup some I/D TLB defaults */
+ sh64_tlb_init();
}
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
new file mode 100644
index 00000000000..1bf0b2cf665
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c
@@ -0,0 +1,123 @@
+/*
+ * SH5-101/SH5-103 CPU Setup
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_sci.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/sh_timer.h>
+#include <asm/addrspace.h>
+
+static struct plat_sci_port scif0_platform_data = {
+ .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
+ .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
+ .type = PORT_SCIF,
+};
+
+static struct resource scif0_resources[] = {
+ DEFINE_RES_MEM(PHYS_PERIPHERAL_BLOCK + 0x01030000, 0x100),
+ DEFINE_RES_IRQ(39),
+ DEFINE_RES_IRQ(40),
+ DEFINE_RES_IRQ(42),
+};
+
+static struct platform_device scif0_device = {
+ .name = "sh-sci",
+ .id = 0,
+ .resource = scif0_resources,
+ .num_resources = ARRAY_SIZE(scif0_resources),
+ .dev = {
+ .platform_data = &scif0_platform_data,
+ },
+};
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = PHYS_PERIPHERAL_BLOCK + 0x01040000,
+ .end = PHYS_PERIPHERAL_BLOCK + 0x01040000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = IRQ_PRI,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = IRQ_CUI,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = IRQ_ATI,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+#define TMU_BLOCK_OFF 0x01020000
+#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
+
+static struct sh_timer_config tmu0_platform_data = {
+ .channels_mask = 7,
+};
+
+static struct resource tmu0_resources[] = {
+ DEFINE_RES_MEM(TMU_BASE, 0x30),
+ DEFINE_RES_IRQ(IRQ_TUNI0),
+ DEFINE_RES_IRQ(IRQ_TUNI1),
+ DEFINE_RES_IRQ(IRQ_TUNI2),
+};
+
+static struct platform_device tmu0_device = {
+ .name = "sh-tmu",
+ .id = 0,
+ .dev = {
+ .platform_data = &tmu0_platform_data,
+ },
+ .resource = tmu0_resources,
+ .num_resources = ARRAY_SIZE(tmu0_resources),
+};
+
+static struct platform_device *sh5_early_devices[] __initdata = {
+ &scif0_device,
+ &tmu0_device,
+};
+
+static struct platform_device *sh5_devices[] __initdata = {
+ &rtc_device,
+};
+
+static int __init sh5_devices_setup(void)
+{
+ int ret;
+
+ ret = platform_add_devices(sh5_early_devices,
+ ARRAY_SIZE(sh5_early_devices));
+ if (unlikely(ret != 0))
+ return ret;
+
+ return platform_add_devices(sh5_devices,
+ ARRAY_SIZE(sh5_devices));
+}
+arch_initcall(sh5_devices_setup);
+
+void __init plat_early_device_setup(void)
+{
+ early_platform_add_devices(sh5_early_devices,
+ ARRAY_SIZE(sh5_early_devices));
+}
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
index b205b25eaf4..10aed41757f 100644
--- a/arch/sh/kernel/cpu/sh5/unwind.c
+++ b/arch/sh/kernel/cpu/sh5/unwind.c
@@ -16,6 +16,8 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/io.h>
+#include <asm/unwinder.h>
+#include <asm/stacktrace.h>
static u8 regcache[63];
@@ -199,8 +201,11 @@ static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
return 0;
}
-/* Don't put this on the stack since we'll want to call sh64_unwind
- * when we're close to underflowing the stack anyway. */
+/*
+ * Don't put this on the stack since we'll want to call in to
+ * sh64_unwinder_dump() when we're close to underflowing the stack
+ * anyway.
+ */
static struct pt_regs here_regs;
extern const char syscall_ret;
@@ -208,17 +213,19 @@ extern const char ret_from_syscall;
extern const char ret_from_exception;
extern const char ret_from_irq;
-static void sh64_unwind_inner(struct pt_regs *regs);
+static void sh64_unwind_inner(const struct stacktrace_ops *ops,
+ void *data, struct pt_regs *regs);
-static void unwind_nested (unsigned long pc, unsigned long fp)
+static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
+ unsigned long pc, unsigned long fp)
{
if ((fp >= __MEMORY_START) &&
- ((fp & 7) == 0)) {
- sh64_unwind_inner((struct pt_regs *) fp);
- }
+ ((fp & 7) == 0))
+ sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
}
-static void sh64_unwind_inner(struct pt_regs *regs)
+static void sh64_unwind_inner(const struct stacktrace_ops *ops,
+ void *data, struct pt_regs *regs)
{
unsigned long pc, fp;
int ofs = 0;
@@ -232,29 +239,29 @@ static void sh64_unwind_inner(struct pt_regs *regs)
int cond;
unsigned long next_fp, next_pc;
- if (pc == ((unsigned long) &syscall_ret & ~1)) {
+ if (pc == ((unsigned long)&syscall_ret & ~1)) {
printk("SYSCALL\n");
- unwind_nested(pc,fp);
+ unwind_nested(ops, data, pc, fp);
return;
}
- if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
+ if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
printk("SYSCALL (PREEMPTED)\n");
- unwind_nested(pc,fp);
+ unwind_nested(ops, data, pc, fp);
return;
}
/* In this case, the PC is discovered by lookup_prev_stack_frame but
it has 4 taken off it to look like the 'caller' */
- if (pc == ((unsigned long) &ret_from_exception & ~1)) {
+ if (pc == ((unsigned long)&ret_from_exception & ~1)) {
printk("EXCEPTION\n");
- unwind_nested(pc,fp);
+ unwind_nested(ops, data, pc, fp);
return;
}
- if (pc == ((unsigned long) &ret_from_irq & ~1)) {
+ if (pc == ((unsigned long)&ret_from_irq & ~1)) {
printk("IRQ\n");
- unwind_nested(pc,fp);
+ unwind_nested(ops, data, pc, fp);
return;
}
@@ -263,8 +270,7 @@ static void sh64_unwind_inner(struct pt_regs *regs)
pc -= ofs;
- printk("[<%08lx>] ", pc);
- print_symbol("%s\n", pc);
+ ops->address(data, pc, 1);
if (first_pass) {
/* If the innermost frame is a leaf function, it's
@@ -287,10 +293,13 @@ static void sh64_unwind_inner(struct pt_regs *regs)
}
printk("\n");
-
}
-void sh64_unwind(struct pt_regs *regs)
+static void sh64_unwinder_dump(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *sp,
+ const struct stacktrace_ops *ops,
+ void *data)
{
if (!regs) {
/*
@@ -320,7 +329,17 @@ void sh64_unwind(struct pt_regs *regs)
);
}
- printk("\nCall Trace:\n");
- sh64_unwind_inner(regs);
+ sh64_unwind_inner(ops, data, regs);
}
+static struct unwinder sh64_unwinder = {
+ .name = "sh64-unwinder",
+ .dump = sh64_unwinder_dump,
+ .rating = 150,
+};
+
+static int __init sh64_unwinder_init(void)
+{
+ return unwinder_register(&sh64_unwinder);
+}
+early_initcall(sh64_unwinder_init);
diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile
new file mode 100644
index 00000000000..e8a5111e848
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux/SuperH SH-Mobile backends.
+#
+
+# Power Management & Sleep mode
+obj-$(CONFIG_PM) += pm.o sleep.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
new file mode 100644
index 00000000000..e3abfd4277e
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -0,0 +1,101 @@
+/*
+ * arch/sh/kernel/cpu/shmobile/cpuidle.c
+ *
+ * Cpuidle support code for SuperH Mobile
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <linux/cpuidle.h>
+#include <linux/export.h>
+#include <asm/suspend.h>
+#include <asm/uaccess.h>
+
+static unsigned long cpuidle_mode[] = {
+ SUSP_SH_SLEEP, /* regular sleep mode */
+ SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */
+ SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */
+};
+
+static int cpuidle_sleep_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ unsigned long allowed_mode = SUSP_SH_SLEEP;
+ int requested_state = index;
+ int allowed_state;
+ int k;
+
+ /* convert allowed mode to allowed state */
+ for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--)
+ if (cpuidle_mode[k] == allowed_mode)
+ break;
+
+ allowed_state = k;
+
+ /* take the following into account for sleep mode selection:
+ * - allowed_state: best mode allowed by hardware (clock deps)
+ * - requested_state: best mode allowed by software (latencies)
+ */
+ k = min_t(int, allowed_state, requested_state);
+
+ sh_mobile_call_standby(cpuidle_mode[k]);
+
+ return k;
+}
+
+static struct cpuidle_driver cpuidle_driver = {
+ .name = "sh_idle",
+ .owner = THIS_MODULE,
+ .states = {
+ {
+ .exit_latency = 1,
+ .target_residency = 1 * 2,
+ .power_usage = 3,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .enter = cpuidle_sleep_enter,
+ .name = "C1",
+ .desc = "SuperH Sleep Mode",
+ },
+ {
+ .exit_latency = 100,
+ .target_residency = 1 * 2,
+ .power_usage = 1,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .enter = cpuidle_sleep_enter,
+ .name = "C2",
+ .desc = "SuperH Sleep Mode [SF]",
+ .disabled = true,
+ },
+ {
+ .exit_latency = 2300,
+ .target_residency = 1 * 2,
+ .power_usage = 1,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .enter = cpuidle_sleep_enter,
+ .name = "C3",
+ .desc = "SuperH Mobile Standby Mode [SF]",
+ .disabled = true,
+ },
+ },
+ .safe_state_index = 0,
+ .state_count = 3,
+};
+
+int __init sh_mobile_setup_cpuidle(void)
+{
+ if (sh_mobile_sleep_supported & SUSP_SH_SF)
+ cpuidle_driver.states[1].disabled = false;
+
+ if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
+ cpuidle_driver.states[2].disabled = false;
+
+ return cpuidle_register(&cpuidle_driver, NULL);
+}
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
new file mode 100644
index 00000000000..ac37b7234f8
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -0,0 +1,156 @@
+/*
+ * arch/sh/kernel/cpu/shmobile/pm.c
+ *
+ * Power management support code for SuperH Mobile
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
+#include <asm/suspend.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/bl_bit.h>
+
+/*
+ * Notifier lists for pre/post sleep notification
+ */
+ATOMIC_NOTIFIER_HEAD(sh_mobile_pre_sleep_notifier_list);
+ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list);
+
+/*
+ * Sleep modes available on SuperH Mobile:
+ *
+ * Sleep mode is just plain "sleep" instruction
+ * Sleep Self-Refresh mode is above plus RAM put in Self-Refresh
+ * Standby Self-Refresh mode is above plus stopped clocks
+ */
+#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP)
+#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF)
+#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF)
+#define SUSP_MODE_RSTANDBY_SF \
+ (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_REGS | SUSP_SH_SF)
+ /*
+ * U-standby mode is unsupported since it needs bootloader hacks
+ */
+
+#ifdef CONFIG_CPU_SUBTYPE_SH7724
+#define RAM_BASE 0xfd800000 /* RSMEM */
+#else
+#define RAM_BASE 0xe5200000 /* ILRAM */
+#endif
+
+void sh_mobile_call_standby(unsigned long mode)
+{
+ void *onchip_mem = (void *)RAM_BASE;
+ struct sh_sleep_data *sdp = onchip_mem;
+ void (*standby_onchip_mem)(unsigned long, unsigned long);
+
+ /* code located directly after data structure */
+ standby_onchip_mem = (void *)(sdp + 1);
+
+ atomic_notifier_call_chain(&sh_mobile_pre_sleep_notifier_list,
+ mode, NULL);
+
+ /* flush the caches if MMU flag is set */
+ if (mode & SUSP_SH_MMU)
+ flush_cache_all();
+
+ /* Let assembly snippet in on-chip memory handle the rest */
+ standby_onchip_mem(mode, RAM_BASE);
+
+ atomic_notifier_call_chain(&sh_mobile_post_sleep_notifier_list,
+ mode, NULL);
+}
+
+extern char sh_mobile_sleep_enter_start;
+extern char sh_mobile_sleep_enter_end;
+
+extern char sh_mobile_sleep_resume_start;
+extern char sh_mobile_sleep_resume_end;
+
+unsigned long sh_mobile_sleep_supported = SUSP_SH_SLEEP;
+
+void sh_mobile_register_self_refresh(unsigned long flags,
+ void *pre_start, void *pre_end,
+ void *post_start, void *post_end)
+{
+ void *onchip_mem = (void *)RAM_BASE;
+ void *vp;
+ struct sh_sleep_data *sdp;
+ int n;
+
+ /* part 0: data area */
+ sdp = onchip_mem;
+ sdp->addr.stbcr = 0xa4150020; /* STBCR */
+ sdp->addr.bar = 0xa4150040; /* BAR */
+ sdp->addr.pteh = 0xff000000; /* PTEH */
+ sdp->addr.ptel = 0xff000004; /* PTEL */
+ sdp->addr.ttb = 0xff000008; /* TTB */
+ sdp->addr.tea = 0xff00000c; /* TEA */
+ sdp->addr.mmucr = 0xff000010; /* MMUCR */
+ sdp->addr.ptea = 0xff000034; /* PTEA */
+ sdp->addr.pascr = 0xff000070; /* PASCR */
+ sdp->addr.irmcr = 0xff000078; /* IRMCR */
+ sdp->addr.ccr = 0xff00001c; /* CCR */
+ sdp->addr.ramcr = 0xff000074; /* RAMCR */
+ vp = sdp + 1;
+
+ /* part 1: common code to enter sleep mode */
+ n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start;
+ memcpy(vp, &sh_mobile_sleep_enter_start, n);
+ vp += roundup(n, 4);
+
+ /* part 2: board specific code to enter self-refresh mode */
+ n = pre_end - pre_start;
+ memcpy(vp, pre_start, n);
+ sdp->sf_pre = (unsigned long)vp;
+ vp += roundup(n, 4);
+
+ /* part 3: board specific code to resume from self-refresh mode */
+ n = post_end - post_start;
+ memcpy(vp, post_start, n);
+ sdp->sf_post = (unsigned long)vp;
+ vp += roundup(n, 4);
+
+ /* part 4: common code to resume from sleep mode */
+ WARN_ON(vp > (onchip_mem + 0x600));
+ vp = onchip_mem + 0x600; /* located at interrupt vector */
+ n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start;
+ memcpy(vp, &sh_mobile_sleep_resume_start, n);
+ sdp->resume = (unsigned long)vp;
+
+ sh_mobile_sleep_supported |= flags;
+}
+
+static int sh_pm_enter(suspend_state_t state)
+{
+ if (!(sh_mobile_sleep_supported & SUSP_MODE_STANDBY_SF))
+ return -ENXIO;
+
+ local_irq_disable();
+ set_bl_bit();
+ sh_mobile_call_standby(SUSP_MODE_STANDBY_SF);
+ local_irq_disable();
+ clear_bl_bit();
+ return 0;
+}
+
+static const struct platform_suspend_ops sh_pm_ops = {
+ .enter = sh_pm_enter,
+ .valid = suspend_valid_only_mem,
+};
+
+static int __init sh_pm_init(void)
+{
+ suspend_set_ops(&sh_pm_ops);
+ return sh_mobile_setup_cpuidle();
+}
+
+late_initcall(sh_pm_init);
diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S
new file mode 100644
index 00000000000..e6aac65f575
--- /dev/null
+++ b/arch/sh/kernel/cpu/shmobile/sleep.S
@@ -0,0 +1,405 @@
+/*
+ * arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S
+ *
+ * Sleep mode and Standby modes support for SuperH Mobile
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/suspend.h>
+
+/*
+ * Kernel mode register usage, see entry.S:
+ * k0 scratch
+ * k1 scratch
+ */
+#define k0 r0
+#define k1 r1
+
+/* manage self-refresh and enter standby mode. must be self-contained.
+ * this code will be copied to on-chip memory and executed from there.
+ */
+ .balign 4
+ENTRY(sh_mobile_sleep_enter_start)
+
+ /* save mode flags */
+ mov.l r4, @(SH_SLEEP_MODE, r5)
+
+ /* save original vbr */
+ stc vbr, r0
+ mov.l r0, @(SH_SLEEP_VBR, r5)
+
+ /* point vbr to our on-chip memory page */
+ ldc r5, vbr
+
+ /* save return address */
+ sts pr, r0
+ mov.l r0, @(SH_SLEEP_SPC, r5)
+
+ /* save sr */
+ stc sr, r0
+ mov.l r0, @(SH_SLEEP_SR, r5)
+
+ /* save general purpose registers to stack if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_REGS, r0
+ bt skip_regs_save
+
+ sts.l pr, @-r15
+ mov.l r14, @-r15
+ mov.l r13, @-r15
+ mov.l r12, @-r15
+ mov.l r11, @-r15
+ mov.l r10, @-r15
+ mov.l r9, @-r15
+ mov.l r8, @-r15
+
+ /* make sure bank0 is selected, save low registers */
+ mov.l rb_bit, r9
+ not r9, r9
+ bsr set_sr
+ mov #0, r10
+
+ bsr save_low_regs
+ nop
+
+ /* switch to bank 1, save low registers */
+ mov.l rb_bit, r10
+ bsr set_sr
+ mov #-1, r9
+
+ bsr save_low_regs
+ nop
+
+ /* switch back to bank 0 */
+ mov.l rb_bit, r9
+ not r9, r9
+ bsr set_sr
+ mov #0, r10
+
+skip_regs_save:
+
+ /* save sp, also set to internal ram */
+ mov.l r15, @(SH_SLEEP_SP, r5)
+ mov r5, r15
+
+ /* save stbcr */
+ bsr save_register
+ mov #SH_SLEEP_REG_STBCR, r0
+
+ /* save mmu and cache context if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_MMU, r0
+ bt skip_mmu_save_disable
+
+ /* save mmu state */
+ bsr save_register
+ mov #SH_SLEEP_REG_PTEH, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_PTEL, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_TTB, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_TEA, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_MMUCR, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_PTEA, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_PASCR, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_IRMCR, r0
+
+ /* invalidate TLBs and disable the MMU */
+ bsr get_register
+ mov #SH_SLEEP_REG_MMUCR, r0
+ mov #4, r1
+ mov.l r1, @r0
+ icbi @r0
+
+ /* save cache registers and disable caches */
+ bsr save_register
+ mov #SH_SLEEP_REG_CCR, r0
+
+ bsr save_register
+ mov #SH_SLEEP_REG_RAMCR, r0
+
+ bsr get_register
+ mov #SH_SLEEP_REG_CCR, r0
+ mov #0, r1
+ mov.l r1, @r0
+ icbi @r0
+
+skip_mmu_save_disable:
+ /* call self-refresh entering code if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_SF, r0
+ bt skip_set_sf
+
+ mov.l @(SH_SLEEP_SF_PRE, r5), r0
+ jsr @r0
+ nop
+
+skip_set_sf:
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_STANDBY, r0
+ bt test_rstandby
+
+ /* set mode to "software standby mode" */
+ bra do_sleep
+ mov #0x80, r1
+
+test_rstandby:
+ tst #SUSP_SH_RSTANDBY, r0
+ bt test_ustandby
+
+ /* setup BAR register */
+ bsr get_register
+ mov #SH_SLEEP_REG_BAR, r0
+ mov.l @(SH_SLEEP_RESUME, r5), r1
+ mov.l r1, @r0
+
+ /* set mode to "r-standby mode" */
+ bra do_sleep
+ mov #0x20, r1
+
+test_ustandby:
+ tst #SUSP_SH_USTANDBY, r0
+ bt force_sleep
+
+ /* set mode to "u-standby mode" */
+ bra do_sleep
+ mov #0x10, r1
+
+force_sleep:
+
+ /* set mode to "sleep mode" */
+ mov #0x00, r1
+
+do_sleep:
+ /* setup and enter selected standby mode */
+ bsr get_register
+ mov #SH_SLEEP_REG_STBCR, r0
+ mov.l r1, @r0
+again:
+ sleep
+ bra again
+ nop
+
+save_register:
+ add #SH_SLEEP_BASE_ADDR, r0
+ mov.l @(r0, r5), r1
+ add #-SH_SLEEP_BASE_ADDR, r0
+ mov.l @r1, r1
+ add #SH_SLEEP_BASE_DATA, r0
+ mov.l r1, @(r0, r5)
+ add #-SH_SLEEP_BASE_DATA, r0
+ rts
+ nop
+
+get_register:
+ add #SH_SLEEP_BASE_ADDR, r0
+ mov.l @(r0, r5), r0
+ rts
+ nop
+
+set_sr:
+ stc sr, r8
+ and r9, r8
+ or r10, r8
+ ldc r8, sr
+ rts
+ nop
+
+save_low_regs:
+ mov.l r7, @-r15
+ mov.l r6, @-r15
+ mov.l r5, @-r15
+ mov.l r4, @-r15
+ mov.l r3, @-r15
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ rts
+ mov.l r0, @-r15
+
+ .balign 4
+rb_bit: .long 0x20000000 ! RB=1
+
+ENTRY(sh_mobile_sleep_enter_end)
+
+ .balign 4
+ENTRY(sh_mobile_sleep_resume_start)
+
+ /* figure out start address */
+ bsr 0f
+ nop
+0:
+ sts pr, k1
+ mov.l 1f, k0
+ and k0, k1
+
+ /* store pointer to data area in VBR */
+ ldc k1, vbr
+
+ /* setup sr with saved sr */
+ mov.l @(SH_SLEEP_SR, k1), k0
+ ldc k0, sr
+
+ /* now: user register set! */
+ stc vbr, r5
+
+ /* setup spc with return address to c code */
+ mov.l @(SH_SLEEP_SPC, r5), r0
+ ldc r0, spc
+
+ /* restore vbr */
+ mov.l @(SH_SLEEP_VBR, r5), r0
+ ldc r0, vbr
+
+ /* setup ssr with saved sr */
+ mov.l @(SH_SLEEP_SR, r5), r0
+ ldc r0, ssr
+
+ /* restore sp */
+ mov.l @(SH_SLEEP_SP, r5), r15
+
+ /* restore sleep mode register */
+ bsr restore_register
+ mov #SH_SLEEP_REG_STBCR, r0
+
+ /* call self-refresh resume code if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_SF, r0
+ bt skip_restore_sf
+
+ mov.l @(SH_SLEEP_SF_POST, r5), r0
+ jsr @r0
+ nop
+
+skip_restore_sf:
+ /* restore mmu and cache state if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_MMU, r0
+ bt skip_restore_mmu
+
+ /* restore mmu state */
+ bsr restore_register
+ mov #SH_SLEEP_REG_PTEH, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_PTEL, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_TTB, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_TEA, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_PTEA, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_PASCR, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_IRMCR, r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_MMUCR, r0
+ icbi @r0
+
+ /* restore cache settings */
+ bsr restore_register
+ mov #SH_SLEEP_REG_RAMCR, r0
+ icbi @r0
+
+ bsr restore_register
+ mov #SH_SLEEP_REG_CCR, r0
+ icbi @r0
+
+skip_restore_mmu:
+
+ /* restore general purpose registers if needed */
+ mov.l @(SH_SLEEP_MODE, r5), r0
+ tst #SUSP_SH_REGS, r0
+ bt skip_restore_regs
+
+ /* switch to bank 1, restore low registers */
+ mov.l _rb_bit, r10
+ bsr _set_sr
+ mov #-1, r9
+
+ bsr restore_low_regs
+ nop
+
+ /* switch to bank0, restore low registers */
+ mov.l _rb_bit, r9
+ not r9, r9
+ bsr _set_sr
+ mov #0, r10
+
+ bsr restore_low_regs
+ nop
+
+ /* restore the rest of the registers */
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ lds.l @r15+, pr
+
+skip_restore_regs:
+ rte
+ nop
+
+restore_register:
+ add #SH_SLEEP_BASE_DATA, r0
+ mov.l @(r0, r5), r1
+ add #-SH_SLEEP_BASE_DATA, r0
+ add #SH_SLEEP_BASE_ADDR, r0
+ mov.l @(r0, r5), r0
+ mov.l r1, @r0
+ rts
+ nop
+
+_set_sr:
+ stc sr, r8
+ and r9, r8
+ or r10, r8
+ ldc r8, sr
+ rts
+ nop
+
+restore_low_regs:
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ rts
+ mov.l @r15+, r7
+
+ .balign 4
+_rb_bit: .long 0x20000000 ! RB=1
+1: .long ~0x7ff
+ENTRY(sh_mobile_sleep_resume_end)
diff --git a/arch/sh/kernel/cpu/ubc.S b/arch/sh/kernel/cpu/ubc.S
deleted file mode 100644
index 81923079fa1..00000000000
--- a/arch/sh/kernel/cpu/ubc.S
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * arch/sh/kernel/cpu/ubc.S
- *
- * Set of management routines for the User Break Controller (UBC)
- *
- * Copyright (C) 2002 Paul Mundt
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#include <linux/linkage.h>
-#include <asm/ubc.h>
-
-#define STBCR2 0xffc00010
-
-ENTRY(ubc_sleep)
- mov #0, r0
-
- mov.l 1f, r1 ! Zero out UBC_BBRA ..
- mov.w r0, @r1
-
- mov.l 2f, r1 ! .. same for BBRB ..
- mov.w r0, @r1
-
- mov.l 3f, r1 ! .. and again for BRCR.
- mov.w r0, @r1
-
- mov.w @r1, r0 ! Dummy read BRCR
-
- mov.l 4f, r1 ! Set MSTP5 in STBCR2
- mov.b @r1, r0
- or #0x01, r0
- mov.b r0, @r1
-
- mov.b @r1, r0 ! Two dummy reads ..
- mov.b @r1, r0
-
- rts
- nop
-
-ENTRY(ubc_wakeup)
- mov.l 4f, r1 ! Clear MSTP5
- mov.b @r1, r0
- and #0xfe, r0
- mov.b r0, @r1
-
- mov.b @r1, r0 ! Two more dummy reads ..
- mov.b @r1, r0
-
- rts
- nop
-
-1: .long UBC_BBRA
-2: .long UBC_BBRB
-3: .long UBC_BRCR
-4: .long STBCR2
-
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
deleted file mode 100644
index e0590ffebd7..00000000000
--- a/arch/sh/kernel/cpufreq.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * arch/sh/kernel/cpufreq.c
- *
- * cpufreq driver for the SuperH processors.
- *
- * Copyright (C) 2002 - 2007 Paul Mundt
- * Copyright (C) 2002 M. R. Brown
- *
- * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
- *
- * Copyright (C) 2004-2007 Atmel Corporation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/types.h>
-#include <linux/cpufreq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/cpumask.h>
-#include <linux/smp.h>
-#include <linux/sched.h> /* set_cpus_allowed() */
-#include <linux/clk.h>
-
-static struct clk *cpuclk;
-
-static unsigned int sh_cpufreq_get(unsigned int cpu)
-{
- return (clk_get_rate(cpuclk) + 500) / 1000;
-}
-
-/*
- * Here we notify other drivers of the proposed change and the final change.
- */
-static int sh_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int cpu = policy->cpu;
- cpumask_t cpus_allowed;
- struct cpufreq_freqs freqs;
- long freq;
-
- if (!cpu_online(cpu))
- return -ENODEV;
-
- cpus_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
-
- BUG_ON(smp_processor_id() != cpu);
-
- /* Convert target_freq from kHz to Hz */
- freq = clk_round_rate(cpuclk, target_freq * 1000);
-
- if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
- return -EINVAL;
-
- pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
-
- freqs.cpu = cpu;
- freqs.old = sh_cpufreq_get(cpu);
- freqs.new = (freq + 500) / 1000;
- freqs.flags = 0;
-
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- set_cpus_allowed(current, cpus_allowed);
- clk_set_rate(cpuclk, freq);
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- pr_debug("cpufreq: set frequency %lu Hz\n", freq);
-
- return 0;
-}
-
-static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- if (!cpu_online(policy->cpu))
- return -ENODEV;
-
- cpuclk = clk_get(NULL, "cpu_clk");
- if (IS_ERR(cpuclk)) {
- printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
- return PTR_ERR(cpuclk);
- }
-
- /* cpuinfo and default policy values */
- policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
- policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-
- policy->cur = sh_cpufreq_get(policy->cpu);
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
-
-
- /*
- * Catch the cases where the clock framework hasn't been wired up
- * properly to support scaling.
- */
- if (unlikely(policy->min == policy->max)) {
- printk(KERN_ERR "cpufreq: clock framework rate rounding "
- "not supported on this CPU.\n");
-
- clk_put(cpuclk);
- return -EINVAL;
- }
-
- printk(KERN_INFO "cpufreq: Frequencies - Minimum %u.%03u MHz, "
- "Maximum %u.%03u MHz.\n",
- policy->min / 1000, policy->min % 1000,
- policy->max / 1000, policy->max % 1000);
-
- return 0;
-}
-
-static int sh_cpufreq_verify(struct cpufreq_policy *policy)
-{
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
- return 0;
-}
-
-static int sh_cpufreq_exit(struct cpufreq_policy *policy)
-{
- clk_put(cpuclk);
- return 0;
-}
-
-static struct cpufreq_driver sh_cpufreq_driver = {
- .owner = THIS_MODULE,
- .name = "sh",
- .init = sh_cpufreq_cpu_init,
- .verify = sh_cpufreq_verify,
- .target = sh_cpufreq_target,
- .get = sh_cpufreq_get,
- .exit = sh_cpufreq_exit,
-};
-
-static int __init sh_cpufreq_module_init(void)
-{
- printk(KERN_INFO "cpufreq: SuperH CPU frequency driver.\n");
- return cpufreq_register_driver(&sh_cpufreq_driver);
-}
-
-static void __exit sh_cpufreq_module_exit(void)
-{
- cpufreq_unregister_driver(&sh_cpufreq_driver);
-}
-
-module_init(sh_cpufreq_module_init);
-module_exit(sh_cpufreq_module_exit);
-
-MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
-MODULE_DESCRIPTION("cpufreq driver for SuperH");
-MODULE_LICENSE("GPL");
diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
index 4a2ecbe27d8..569e7b171c0 100644
--- a/arch/sh/kernel/crash_dump.c
+++ b/arch/sh/kernel/crash_dump.c
@@ -4,7 +4,6 @@
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
-
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/io.h>
diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S
index 13b66746410..7a1b46fec0f 100644
--- a/arch/sh/kernel/debugtraps.S
+++ b/arch/sh/kernel/debugtraps.S
@@ -3,7 +3,7 @@
*
* Debug trap jump tables for SuperH
*
- * Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2006 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -12,12 +12,12 @@
#include <linux/sys.h>
#include <linux/linkage.h>
-#if !defined(CONFIG_SH_KGDB)
-#define kgdb_handle_exception debug_trap_handler
+#if !defined(CONFIG_KGDB)
+#define singlestep_trap_handler debug_trap_handler
#endif
#if !defined(CONFIG_SH_STANDARD_BIOS)
-#define sh_bios_handler debug_trap_handler
+#define sh_bios_handler debug_trap_handler
#endif
.data
@@ -35,7 +35,7 @@ ENTRY(debug_trap_table)
.long debug_trap_handler /* 0x39 */
.long debug_trap_handler /* 0x3a */
.long debug_trap_handler /* 0x3b */
- .long kgdb_handle_exception /* 0x3c */
- .long debug_trap_handler /* 0x3d */
+ .long breakpoint_trap_handler /* 0x3c */
+ .long singlestep_trap_handler /* 0x3d */
.long bug_trap_handler /* 0x3e */
.long sh_bios_handler /* 0x3f */
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c
new file mode 100644
index 00000000000..64d5d8dded7
--- /dev/null
+++ b/arch/sh/kernel/disassemble.c
@@ -0,0 +1,573 @@
+/*
+ * Disassemble SuperH instructions.
+ *
+ * Copyright (C) 1999 kaz Kojima
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+/*
+ * Format of an instruction in memory.
+ */
+typedef enum {
+ HEX_0, HEX_1, HEX_2, HEX_3, HEX_4, HEX_5, HEX_6, HEX_7,
+ HEX_8, HEX_9, HEX_A, HEX_B, HEX_C, HEX_D, HEX_E, HEX_F,
+ REG_N, REG_M, REG_NM, REG_B,
+ BRANCH_12, BRANCH_8,
+ DISP_8, DISP_4,
+ IMM_4, IMM_4BY2, IMM_4BY4, PCRELIMM_8BY2, PCRELIMM_8BY4,
+ IMM_8, IMM_8BY2, IMM_8BY4,
+} sh_nibble_type;
+
+typedef enum {
+ A_END, A_BDISP12, A_BDISP8,
+ A_DEC_M, A_DEC_N,
+ A_DISP_GBR, A_DISP_PC, A_DISP_REG_M, A_DISP_REG_N,
+ A_GBR,
+ A_IMM,
+ A_INC_M, A_INC_N,
+ A_IND_M, A_IND_N, A_IND_R0_REG_M, A_IND_R0_REG_N,
+ A_MACH, A_MACL,
+ A_PR, A_R0, A_R0_GBR, A_REG_M, A_REG_N, A_REG_B,
+ A_SR, A_VBR, A_SSR, A_SPC, A_SGR, A_DBR,
+ F_REG_N, F_REG_M, D_REG_N, D_REG_M,
+ X_REG_N, /* Only used for argument parsing */
+ X_REG_M, /* Only used for argument parsing */
+ DX_REG_N, DX_REG_M, V_REG_N, V_REG_M,
+ FD_REG_N,
+ XMTRX_M4,
+ F_FR0,
+ FPUL_N, FPUL_M, FPSCR_N, FPSCR_M,
+} sh_arg_type;
+
+static struct sh_opcode_info {
+ char *name;
+ sh_arg_type arg[7];
+ sh_nibble_type nibbles[4];
+} sh_table[] = {
+ {"add",{A_IMM,A_REG_N},{HEX_7,REG_N,IMM_8}},
+ {"add",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_C}},
+ {"addc",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_E}},
+ {"addv",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_F}},
+ {"and",{A_IMM,A_R0},{HEX_C,HEX_9,IMM_8}},
+ {"and",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_9}},
+ {"and.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_D,IMM_8}},
+ {"bra",{A_BDISP12},{HEX_A,BRANCH_12}},
+ {"bsr",{A_BDISP12},{HEX_B,BRANCH_12}},
+ {"bt",{A_BDISP8},{HEX_8,HEX_9,BRANCH_8}},
+ {"bf",{A_BDISP8},{HEX_8,HEX_B,BRANCH_8}},
+ {"bt.s",{A_BDISP8},{HEX_8,HEX_D,BRANCH_8}},
+ {"bt/s",{A_BDISP8},{HEX_8,HEX_D,BRANCH_8}},
+ {"bf.s",{A_BDISP8},{HEX_8,HEX_F,BRANCH_8}},
+ {"bf/s",{A_BDISP8},{HEX_8,HEX_F,BRANCH_8}},
+ {"clrmac",{0},{HEX_0,HEX_0,HEX_2,HEX_8}},
+ {"clrs",{0},{HEX_0,HEX_0,HEX_4,HEX_8}},
+ {"clrt",{0},{HEX_0,HEX_0,HEX_0,HEX_8}},
+ {"cmp/eq",{A_IMM,A_R0},{HEX_8,HEX_8,IMM_8}},
+ {"cmp/eq",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_0}},
+ {"cmp/ge",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_3}},
+ {"cmp/gt",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_7}},
+ {"cmp/hi",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_6}},
+ {"cmp/hs",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_2}},
+ {"cmp/pl",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_5}},
+ {"cmp/pz",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_1}},
+ {"cmp/str",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_C}},
+ {"div0s",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_7}},
+ {"div0u",{0},{HEX_0,HEX_0,HEX_1,HEX_9}},
+ {"div1",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_4}},
+ {"exts.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_E}},
+ {"exts.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_F}},
+ {"extu.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_C}},
+ {"extu.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_D}},
+ {"jmp",{A_IND_N},{HEX_4,REG_N,HEX_2,HEX_B}},
+ {"jsr",{A_IND_N},{HEX_4,REG_N,HEX_0,HEX_B}},
+ {"ldc",{A_REG_N,A_SR},{HEX_4,REG_N,HEX_0,HEX_E}},
+ {"ldc",{A_REG_N,A_GBR},{HEX_4,REG_N,HEX_1,HEX_E}},
+ {"ldc",{A_REG_N,A_VBR},{HEX_4,REG_N,HEX_2,HEX_E}},
+ {"ldc",{A_REG_N,A_SSR},{HEX_4,REG_N,HEX_3,HEX_E}},
+ {"ldc",{A_REG_N,A_SPC},{HEX_4,REG_N,HEX_4,HEX_E}},
+ {"ldc",{A_REG_N,A_DBR},{HEX_4,REG_N,HEX_7,HEX_E}},
+ {"ldc",{A_REG_N,A_REG_B},{HEX_4,REG_N,REG_B,HEX_E}},
+ {"ldc.l",{A_INC_N,A_SR},{HEX_4,REG_N,HEX_0,HEX_7}},
+ {"ldc.l",{A_INC_N,A_GBR},{HEX_4,REG_N,HEX_1,HEX_7}},
+ {"ldc.l",{A_INC_N,A_VBR},{HEX_4,REG_N,HEX_2,HEX_7}},
+ {"ldc.l",{A_INC_N,A_SSR},{HEX_4,REG_N,HEX_3,HEX_7}},
+ {"ldc.l",{A_INC_N,A_SPC},{HEX_4,REG_N,HEX_4,HEX_7}},
+ {"ldc.l",{A_INC_N,A_DBR},{HEX_4,REG_N,HEX_7,HEX_7}},
+ {"ldc.l",{A_INC_N,A_REG_B},{HEX_4,REG_N,REG_B,HEX_7}},
+ {"lds",{A_REG_N,A_MACH},{HEX_4,REG_N,HEX_0,HEX_A}},
+ {"lds",{A_REG_N,A_MACL},{HEX_4,REG_N,HEX_1,HEX_A}},
+ {"lds",{A_REG_N,A_PR},{HEX_4,REG_N,HEX_2,HEX_A}},
+ {"lds",{A_REG_M,FPUL_N},{HEX_4,REG_M,HEX_5,HEX_A}},
+ {"lds",{A_REG_M,FPSCR_N},{HEX_4,REG_M,HEX_6,HEX_A}},
+ {"lds.l",{A_INC_N,A_MACH},{HEX_4,REG_N,HEX_0,HEX_6}},
+ {"lds.l",{A_INC_N,A_MACL},{HEX_4,REG_N,HEX_1,HEX_6}},
+ {"lds.l",{A_INC_N,A_PR},{HEX_4,REG_N,HEX_2,HEX_6}},
+ {"lds.l",{A_INC_M,FPUL_N},{HEX_4,REG_M,HEX_5,HEX_6}},
+ {"lds.l",{A_INC_M,FPSCR_N},{HEX_4,REG_M,HEX_6,HEX_6}},
+ {"ldtlb",{0},{HEX_0,HEX_0,HEX_3,HEX_8}},
+ {"mac.w",{A_INC_M,A_INC_N},{HEX_4,REG_N,REG_M,HEX_F}},
+ {"mov",{A_IMM,A_REG_N},{HEX_E,REG_N,IMM_8}},
+ {"mov",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_3}},
+ {"mov.b",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_4}},
+ {"mov.b",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_4}},
+ {"mov.b",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_0}},
+ {"mov.b",{A_DISP_REG_M,A_R0},{HEX_8,HEX_4,REG_M,IMM_4}},
+ {"mov.b",{A_DISP_GBR,A_R0},{HEX_C,HEX_4,IMM_8}},
+ {"mov.b",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_C}},
+ {"mov.b",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_4}},
+ {"mov.b",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_0}},
+ {"mov.b",{A_R0,A_DISP_REG_M},{HEX_8,HEX_0,REG_M,IMM_4}},
+ {"mov.b",{A_R0,A_DISP_GBR},{HEX_C,HEX_0,IMM_8}},
+ {"mov.l",{ A_REG_M,A_DISP_REG_N},{HEX_1,REG_N,REG_M,IMM_4BY4}},
+ {"mov.l",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_6}},
+ {"mov.l",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_6}},
+ {"mov.l",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_2}},
+ {"mov.l",{A_DISP_REG_M,A_REG_N},{HEX_5,REG_N,REG_M,IMM_4BY4}},
+ {"mov.l",{A_DISP_GBR,A_R0},{HEX_C,HEX_6,IMM_8BY4}},
+ {"mov.l",{A_DISP_PC,A_REG_N},{HEX_D,REG_N,PCRELIMM_8BY4}},
+ {"mov.l",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_E}},
+ {"mov.l",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_6}},
+ {"mov.l",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_2}},
+ {"mov.l",{A_R0,A_DISP_GBR},{HEX_C,HEX_2,IMM_8BY4}},
+ {"mov.w",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_5}},
+ {"mov.w",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_5}},
+ {"mov.w",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_1}},
+ {"mov.w",{A_DISP_REG_M,A_R0},{HEX_8,HEX_5,REG_M,IMM_4BY2}},
+ {"mov.w",{A_DISP_GBR,A_R0},{HEX_C,HEX_5,IMM_8BY2}},
+ {"mov.w",{A_DISP_PC,A_REG_N},{HEX_9,REG_N,PCRELIMM_8BY2}},
+ {"mov.w",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_D}},
+ {"mov.w",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_5}},
+ {"mov.w",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_1}},
+ {"mov.w",{A_R0,A_DISP_REG_M},{HEX_8,HEX_1,REG_M,IMM_4BY2}},
+ {"mov.w",{A_R0,A_DISP_GBR},{HEX_C,HEX_1,IMM_8BY2}},
+ {"mova",{A_DISP_PC,A_R0},{HEX_C,HEX_7,PCRELIMM_8BY4}},
+ {"movca.l",{A_R0,A_IND_N},{HEX_0,REG_N,HEX_C,HEX_3}},
+ {"movt",{A_REG_N},{HEX_0,REG_N,HEX_2,HEX_9}},
+ {"muls",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_F}},
+ {"mul.l",{ A_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_7}},
+ {"mulu",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_E}},
+ {"neg",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_B}},
+ {"negc",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_A}},
+ {"nop",{0},{HEX_0,HEX_0,HEX_0,HEX_9}},
+ {"not",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_7}},
+ {"ocbi",{A_IND_N},{HEX_0,REG_N,HEX_9,HEX_3}},
+ {"ocbp",{A_IND_N},{HEX_0,REG_N,HEX_A,HEX_3}},
+ {"ocbwb",{A_IND_N},{HEX_0,REG_N,HEX_B,HEX_3}},
+ {"or",{A_IMM,A_R0},{HEX_C,HEX_B,IMM_8}},
+ {"or",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_B}},
+ {"or.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_F,IMM_8}},
+ {"pref",{A_IND_N},{HEX_0,REG_N,HEX_8,HEX_3}},
+ {"rotcl",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_4}},
+ {"rotcr",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_5}},
+ {"rotl",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_4}},
+ {"rotr",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_5}},
+ {"rte",{0},{HEX_0,HEX_0,HEX_2,HEX_B}},
+ {"rts",{0},{HEX_0,HEX_0,HEX_0,HEX_B}},
+ {"sets",{0},{HEX_0,HEX_0,HEX_5,HEX_8}},
+ {"sett",{0},{HEX_0,HEX_0,HEX_1,HEX_8}},
+ {"shad",{ A_REG_M,A_REG_N},{HEX_4,REG_N,REG_M,HEX_C}},
+ {"shld",{ A_REG_M,A_REG_N},{HEX_4,REG_N,REG_M,HEX_D}},
+ {"shal",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_0}},
+ {"shar",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_1}},
+ {"shll",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_0}},
+ {"shll16",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_8}},
+ {"shll2",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_8}},
+ {"shll8",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_8}},
+ {"shlr",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_1}},
+ {"shlr16",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_9}},
+ {"shlr2",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_9}},
+ {"shlr8",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_9}},
+ {"sleep",{0},{HEX_0,HEX_0,HEX_1,HEX_B}},
+ {"stc",{A_SR,A_REG_N},{HEX_0,REG_N,HEX_0,HEX_2}},
+ {"stc",{A_GBR,A_REG_N},{HEX_0,REG_N,HEX_1,HEX_2}},
+ {"stc",{A_VBR,A_REG_N},{HEX_0,REG_N,HEX_2,HEX_2}},
+ {"stc",{A_SSR,A_REG_N},{HEX_0,REG_N,HEX_3,HEX_2}},
+ {"stc",{A_SPC,A_REG_N},{HEX_0,REG_N,HEX_4,HEX_2}},
+ {"stc",{A_SGR,A_REG_N},{HEX_0,REG_N,HEX_6,HEX_2}},
+ {"stc",{A_DBR,A_REG_N},{HEX_0,REG_N,HEX_7,HEX_2}},
+ {"stc",{A_REG_B,A_REG_N},{HEX_0,REG_N,REG_B,HEX_2}},
+ {"stc.l",{A_SR,A_DEC_N},{HEX_4,REG_N,HEX_0,HEX_3}},
+ {"stc.l",{A_GBR,A_DEC_N},{HEX_4,REG_N,HEX_1,HEX_3}},
+ {"stc.l",{A_VBR,A_DEC_N},{HEX_4,REG_N,HEX_2,HEX_3}},
+ {"stc.l",{A_SSR,A_DEC_N},{HEX_4,REG_N,HEX_3,HEX_3}},
+ {"stc.l",{A_SPC,A_DEC_N},{HEX_4,REG_N,HEX_4,HEX_3}},
+ {"stc.l",{A_SGR,A_DEC_N},{HEX_4,REG_N,HEX_6,HEX_3}},
+ {"stc.l",{A_DBR,A_DEC_N},{HEX_4,REG_N,HEX_7,HEX_3}},
+ {"stc.l",{A_REG_B,A_DEC_N},{HEX_4,REG_N,REG_B,HEX_3}},
+ {"sts",{A_MACH,A_REG_N},{HEX_0,REG_N,HEX_0,HEX_A}},
+ {"sts",{A_MACL,A_REG_N},{HEX_0,REG_N,HEX_1,HEX_A}},
+ {"sts",{A_PR,A_REG_N},{HEX_0,REG_N,HEX_2,HEX_A}},
+ {"sts",{FPUL_M,A_REG_N},{HEX_0,REG_N,HEX_5,HEX_A}},
+ {"sts",{FPSCR_M,A_REG_N},{HEX_0,REG_N,HEX_6,HEX_A}},
+ {"sts.l",{A_MACH,A_DEC_N},{HEX_4,REG_N,HEX_0,HEX_2}},
+ {"sts.l",{A_MACL,A_DEC_N},{HEX_4,REG_N,HEX_1,HEX_2}},
+ {"sts.l",{A_PR,A_DEC_N},{HEX_4,REG_N,HEX_2,HEX_2}},
+ {"sts.l",{FPUL_M,A_DEC_N},{HEX_4,REG_N,HEX_5,HEX_2}},
+ {"sts.l",{FPSCR_M,A_DEC_N},{HEX_4,REG_N,HEX_6,HEX_2}},
+ {"sub",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_8}},
+ {"subc",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_A}},
+ {"subv",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_B}},
+ {"swap.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_8}},
+ {"swap.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_9}},
+ {"tas.b",{A_IND_N},{HEX_4,REG_N,HEX_1,HEX_B}},
+ {"trapa",{A_IMM},{HEX_C,HEX_3,IMM_8}},
+ {"tst",{A_IMM,A_R0},{HEX_C,HEX_8,IMM_8}},
+ {"tst",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_8}},
+ {"tst.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_C,IMM_8}},
+ {"xor",{A_IMM,A_R0},{HEX_C,HEX_A,IMM_8}},
+ {"xor",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_A}},
+ {"xor.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_E,IMM_8}},
+ {"xtrct",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_D}},
+ {"mul.l",{ A_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_7}},
+ {"dt",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_0}},
+ {"dmuls.l",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_D}},
+ {"dmulu.l",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_5}},
+ {"mac.l",{A_INC_M,A_INC_N},{HEX_0,REG_N,REG_M,HEX_F}},
+ {"braf",{A_REG_N},{HEX_0,REG_N,HEX_2,HEX_3}},
+ {"bsrf",{A_REG_N},{HEX_0,REG_N,HEX_0,HEX_3}},
+ {"fabs",{FD_REG_N},{HEX_F,REG_N,HEX_5,HEX_D}},
+ {"fadd",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_0}},
+ {"fadd",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_0}},
+ {"fcmp/eq",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_4}},
+ {"fcmp/eq",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_4}},
+ {"fcmp/gt",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_5}},
+ {"fcmp/gt",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_5}},
+ {"fcnvds",{D_REG_N,FPUL_M},{HEX_F,REG_N,HEX_B,HEX_D}},
+ {"fcnvsd",{FPUL_M,D_REG_N},{HEX_F,REG_N,HEX_A,HEX_D}},
+ {"fdiv",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_3}},
+ {"fdiv",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_3}},
+ {"fipr",{V_REG_M,V_REG_N},{HEX_F,REG_NM,HEX_E,HEX_D}},
+ {"fldi0",{F_REG_N},{HEX_F,REG_N,HEX_8,HEX_D}},
+ {"fldi1",{F_REG_N},{HEX_F,REG_N,HEX_9,HEX_D}},
+ {"flds",{F_REG_N,FPUL_M},{HEX_F,REG_N,HEX_1,HEX_D}},
+ {"float",{FPUL_M,FD_REG_N},{HEX_F,REG_N,HEX_2,HEX_D}},
+ {"fmac",{F_FR0,F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_E}},
+ {"fmov",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_C}},
+ {"fmov",{DX_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_C}},
+ {"fmov",{A_IND_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
+ {"fmov",{A_IND_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
+ {"fmov",{F_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
+ {"fmov",{DX_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
+ {"fmov",{A_INC_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
+ {"fmov",{A_INC_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
+ {"fmov",{F_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
+ {"fmov",{DX_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
+ {"fmov",{A_IND_R0_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
+ {"fmov",{A_IND_R0_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
+ {"fmov",{F_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
+ {"fmov",{DX_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
+ {"fmov.d",{A_IND_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
+ {"fmov.d",{DX_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
+ {"fmov.d",{A_INC_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
+ {"fmov.d",{DX_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
+ {"fmov.d",{A_IND_R0_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
+ {"fmov.d",{DX_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
+ {"fmov.s",{A_IND_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
+ {"fmov.s",{F_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
+ {"fmov.s",{A_INC_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
+ {"fmov.s",{F_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
+ {"fmov.s",{A_IND_R0_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
+ {"fmov.s",{F_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
+ {"fmul",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_2}},
+ {"fmul",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_2}},
+ {"fneg",{FD_REG_N},{HEX_F,REG_N,HEX_4,HEX_D}},
+ {"frchg",{0},{HEX_F,HEX_B,HEX_F,HEX_D}},
+ {"fschg",{0},{HEX_F,HEX_3,HEX_F,HEX_D}},
+ {"fsqrt",{FD_REG_N},{HEX_F,REG_N,HEX_6,HEX_D}},
+ {"fsts",{FPUL_M,F_REG_N},{HEX_F,REG_N,HEX_0,HEX_D}},
+ {"fsub",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_1}},
+ {"fsub",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_1}},
+ {"ftrc",{FD_REG_N,FPUL_M},{HEX_F,REG_N,HEX_3,HEX_D}},
+ {"ftrv",{XMTRX_M4,V_REG_N},{HEX_F,REG_NM,HEX_F,HEX_D}},
+ { 0 },
+};
+
+static void print_sh_insn(u32 memaddr, u16 insn)
+{
+ int relmask = ~0;
+ int nibs[4] = { (insn >> 12) & 0xf, (insn >> 8) & 0xf, (insn >> 4) & 0xf, insn & 0xf};
+ int lastsp;
+ struct sh_opcode_info *op = sh_table;
+
+ for (; op->name; op++) {
+ int n;
+ int imm = 0;
+ int rn = 0;
+ int rm = 0;
+ int rb = 0;
+ int disp_pc;
+ int disp_pc_addr = 0;
+
+ for (n = 0; n < 4; n++) {
+ int i = op->nibbles[n];
+
+ if (i < 16) {
+ if (nibs[n] == i)
+ continue;
+ goto fail;
+ }
+ switch (i) {
+ case BRANCH_8:
+ imm = (nibs[2] << 4) | (nibs[3]);
+ if (imm & 0x80)
+ imm |= ~0xff;
+ imm = ((char)imm) * 2 + 4 ;
+ goto ok;
+ case BRANCH_12:
+ imm = ((nibs[1]) << 8) | (nibs[2] << 4) | (nibs[3]);
+ if (imm & 0x800)
+ imm |= ~0xfff;
+ imm = imm * 2 + 4;
+ goto ok;
+ case IMM_4:
+ imm = nibs[3];
+ goto ok;
+ case IMM_4BY2:
+ imm = nibs[3] <<1;
+ goto ok;
+ case IMM_4BY4:
+ imm = nibs[3] <<2;
+ goto ok;
+ case IMM_8:
+ imm = (nibs[2] << 4) | nibs[3];
+ goto ok;
+ case PCRELIMM_8BY2:
+ imm = ((nibs[2] << 4) | nibs[3]) <<1;
+ relmask = ~1;
+ goto ok;
+ case PCRELIMM_8BY4:
+ imm = ((nibs[2] << 4) | nibs[3]) <<2;
+ relmask = ~3;
+ goto ok;
+ case IMM_8BY2:
+ imm = ((nibs[2] << 4) | nibs[3]) <<1;
+ goto ok;
+ case IMM_8BY4:
+ imm = ((nibs[2] << 4) | nibs[3]) <<2;
+ goto ok;
+ case DISP_8:
+ imm = (nibs[2] << 4) | (nibs[3]);
+ goto ok;
+ case DISP_4:
+ imm = nibs[3];
+ goto ok;
+ case REG_N:
+ rn = nibs[n];
+ break;
+ case REG_M:
+ rm = nibs[n];
+ break;
+ case REG_NM:
+ rn = (nibs[n] & 0xc) >> 2;
+ rm = (nibs[n] & 0x3);
+ break;
+ case REG_B:
+ rb = nibs[n] & 0x07;
+ break;
+ default:
+ return;
+ }
+ }
+
+ ok:
+ printk("%-8s ", op->name);
+ lastsp = (op->arg[0] == A_END);
+ disp_pc = 0;
+ for (n = 0; n < 6 && op->arg[n] != A_END; n++) {
+ if (n && op->arg[1] != A_END)
+ printk(", ");
+ switch (op->arg[n]) {
+ case A_IMM:
+ printk("#%d", (char)(imm));
+ break;
+ case A_R0:
+ printk("r0");
+ break;
+ case A_REG_N:
+ printk("r%d", rn);
+ break;
+ case A_INC_N:
+ printk("@r%d+", rn);
+ break;
+ case A_DEC_N:
+ printk("@-r%d", rn);
+ break;
+ case A_IND_N:
+ printk("@r%d", rn);
+ break;
+ case A_DISP_REG_N:
+ printk("@(%d,r%d)", imm, rn);
+ break;
+ case A_REG_M:
+ printk("r%d", rm);
+ break;
+ case A_INC_M:
+ printk("@r%d+", rm);
+ break;
+ case A_DEC_M:
+ printk("@-r%d", rm);
+ break;
+ case A_IND_M:
+ printk("@r%d", rm);
+ break;
+ case A_DISP_REG_M:
+ printk("@(%d,r%d)", imm, rm);
+ break;
+ case A_REG_B:
+ printk("r%d_bank", rb);
+ break;
+ case A_DISP_PC:
+ disp_pc = 1;
+ disp_pc_addr = imm + 4 + (memaddr & relmask);
+ printk("%08x <%pS>", disp_pc_addr,
+ (void *)disp_pc_addr);
+ break;
+ case A_IND_R0_REG_N:
+ printk("@(r0,r%d)", rn);
+ break;
+ case A_IND_R0_REG_M:
+ printk("@(r0,r%d)", rm);
+ break;
+ case A_DISP_GBR:
+ printk("@(%d,gbr)",imm);
+ break;
+ case A_R0_GBR:
+ printk("@(r0,gbr)");
+ break;
+ case A_BDISP12:
+ case A_BDISP8:
+ printk("%08x", imm + memaddr);
+ break;
+ case A_SR:
+ printk("sr");
+ break;
+ case A_GBR:
+ printk("gbr");
+ break;
+ case A_VBR:
+ printk("vbr");
+ break;
+ case A_SSR:
+ printk("ssr");
+ break;
+ case A_SPC:
+ printk("spc");
+ break;
+ case A_MACH:
+ printk("mach");
+ break;
+ case A_MACL:
+ printk("macl");
+ break;
+ case A_PR:
+ printk("pr");
+ break;
+ case A_SGR:
+ printk("sgr");
+ break;
+ case A_DBR:
+ printk("dbr");
+ break;
+ case FD_REG_N:
+ if (0)
+ goto d_reg_n;
+ case F_REG_N:
+ printk("fr%d", rn);
+ break;
+ case F_REG_M:
+ printk("fr%d", rm);
+ break;
+ case DX_REG_N:
+ if (rn & 1) {
+ printk("xd%d", rn & ~1);
+ break;
+ }
+ d_reg_n:
+ case D_REG_N:
+ printk("dr%d", rn);
+ break;
+ case DX_REG_M:
+ if (rm & 1) {
+ printk("xd%d", rm & ~1);
+ break;
+ }
+ case D_REG_M:
+ printk("dr%d", rm);
+ break;
+ case FPSCR_M:
+ case FPSCR_N:
+ printk("fpscr");
+ break;
+ case FPUL_M:
+ case FPUL_N:
+ printk("fpul");
+ break;
+ case F_FR0:
+ printk("fr0");
+ break;
+ case V_REG_N:
+ printk("fv%d", rn*4);
+ break;
+ case V_REG_M:
+ printk("fv%d", rm*4);
+ break;
+ case XMTRX_M4:
+ printk("xmtrx");
+ break;
+ default:
+ return;
+ }
+ }
+
+ if (disp_pc && strcmp(op->name, "mova") != 0) {
+ u32 val;
+
+ if (relmask == ~1)
+ __get_user(val, (u16 *)disp_pc_addr);
+ else
+ __get_user(val, (u32 *)disp_pc_addr);
+
+ printk(" ! %08x <%pS>", val, (void *)val);
+ }
+
+ return;
+ fail:
+ ;
+
+ }
+
+ printk(".word 0x%x%x%x%x", nibs[0], nibs[1], nibs[2], nibs[3]);
+}
+
+void show_code(struct pt_regs *regs)
+{
+ unsigned short *pc = (unsigned short *)regs->pc;
+ long i;
+
+ if (regs->pc & 0x1)
+ return;
+
+ printk("Code:\n");
+
+ for (i = -3 ; i < 6 ; i++) {
+ unsigned short insn;
+
+ if (__get_user(insn, pc + i)) {
+ printk(" (Bad address in pc)\n");
+ break;
+ }
+
+ printk("%s%08lx: ", (i ? " ": "->"), (unsigned long)(pc + i));
+ print_sh_insn((unsigned long)(pc + i), insn);
+ printk("\n");
+ }
+
+ printk("\n");
+}
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
new file mode 100644
index 00000000000..5b0bfcda6d0
--- /dev/null
+++ b/arch/sh/kernel/dma-nommu.c
@@ -0,0 +1,82 @@
+/*
+ * DMA mapping support for platforms lacking IOMMUs.
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t addr = page_to_phys(page) + offset;
+
+ WARN_ON(size == 0);
+ dma_cache_sync(dev, page_address(page) + offset, size, dir);
+
+ return addr;
+}
+
+static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ WARN_ON(nents == 0 || sg[0].length == 0);
+
+ for_each_sg(sg, s, nents, i) {
+ BUG_ON(!sg_page(s));
+
+ dma_cache_sync(dev, sg_virt(s), s->length, dir);
+
+ s->dma_address = sg_phys(s);
+ s->dma_length = s->length;
+ }
+
+ return nents;
+}
+
+#ifdef CONFIG_DMA_NONCOHERENT
+static void nommu_sync_single(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ dma_cache_sync(dev, phys_to_virt(addr), size, dir);
+}
+
+static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nelems, i)
+ dma_cache_sync(dev, sg_virt(s), s->length, dir);
+}
+#endif
+
+struct dma_map_ops nommu_dma_ops = {
+ .alloc = dma_generic_alloc_coherent,
+ .free = dma_generic_free_coherent,
+ .map_page = nommu_map_page,
+ .map_sg = nommu_map_sg,
+#ifdef CONFIG_DMA_NONCOHERENT
+ .sync_single_for_device = nommu_sync_single,
+ .sync_sg_for_device = nommu_sync_sg,
+#endif
+ .is_phys = 1,
+};
+
+void __init no_iommu_init(void)
+{
+ if (dma_ops)
+ return;
+ dma_ops = &nommu_dma_ops;
+}
diff --git a/arch/sh/kernel/dump_task.c b/arch/sh/kernel/dump_task.c
deleted file mode 100644
index 1db7ce0f25d..00000000000
--- a/arch/sh/kernel/dump_task.c
+++ /dev/null
@@ -1,32 +0,0 @@
-#include <linux/elfcore.h>
-#include <linux/sched.h>
-#include <asm/fpu.h>
-
-/*
- * Capture the user space registers if the task is not running (in user space)
- */
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
- struct pt_regs ptregs;
-
- ptregs = *task_pt_regs(tsk);
- elf_core_copy_regs(regs, &ptregs);
-
- return 1;
-}
-
-int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpu)
-{
- int fpvalid = 0;
-
-#if defined(CONFIG_SH_FPU)
- fpvalid = !!tsk_used_math(tsk);
- if (fpvalid) {
- unlazy_fpu(tsk, task_pt_regs(tsk));
- memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
- }
-#endif
-
- return fpvalid;
-}
-
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
new file mode 100644
index 00000000000..8dfe645bcc4
--- /dev/null
+++ b/arch/sh/kernel/dumpstack.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ * Copyright (C) 2009 Matt Fleming
+ * Copyright (C) 2002 - 2012 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kallsyms.h>
+#include <linux/ftrace.h>
+#include <linux/debug_locks.h>
+#include <linux/kdebug.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
+#include <asm/unwinder.h>
+#include <asm/stacktrace.h>
+
+void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+{
+ unsigned long p;
+ int i;
+
+ printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+
+ for (p = bottom & ~31; p < top; ) {
+ printk("%04lx: ", p & 0xffff);
+
+ for (i = 0; i < 8; i++, p += 4) {
+ unsigned int val;
+
+ if (p < bottom || p >= top)
+ printk(" ");
+ else {
+ if (__get_user(val, (unsigned int __user *)p)) {
+ printk("\n");
+ return;
+ }
+ printk("%08x ", val);
+ }
+ }
+ printk("\n");
+ }
+}
+
+void printk_address(unsigned long address, int reliable)
+{
+ printk(" [<%p>] %s%pS\n", (void *) address,
+ reliable ? "" : "? ", (void *) address);
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void
+print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+ struct thread_info *tinfo, int *graph)
+{
+ struct task_struct *task = tinfo->task;
+ unsigned long ret_addr;
+ int index = task->curr_ret_stack;
+
+ if (addr != (unsigned long)return_to_handler)
+ return;
+
+ if (!task->ret_stack || index < *graph)
+ return;
+
+ index -= *graph;
+ ret_addr = task->ret_stack[index].ret;
+
+ ops->address(data, ret_addr, 1);
+
+ (*graph)++;
+}
+#else
+static inline void
+print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+ struct thread_info *tinfo, int *graph)
+{ }
+#endif
+
+void
+stack_reader_dump(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp, const struct stacktrace_ops *ops,
+ void *data)
+{
+ struct thread_info *context;
+ int graph = 0;
+
+ context = (struct thread_info *)
+ ((unsigned long)sp & (~(THREAD_SIZE - 1)));
+
+ while (!kstack_end(sp)) {
+ unsigned long addr = *sp++;
+
+ if (__kernel_text_address(addr)) {
+ ops->address(data, addr, 1);
+
+ print_ftrace_graph_addr(addr, data, ops,
+ context, &graph);
+ }
+ }
+}
+
+static int print_trace_stack(void *data, char *name)
+{
+ printk("%s <%s> ", (char *)data, name);
+ return 0;
+}
+
+/*
+ * Print one address/symbol entries per line.
+ */
+static void print_trace_address(void *data, unsigned long addr, int reliable)
+{
+ printk("%s", (char *)data);
+ printk_address(addr, reliable);
+}
+
+static const struct stacktrace_ops print_trace_ops = {
+ .stack = print_trace_stack,
+ .address = print_trace_address,
+};
+
+void show_trace(struct task_struct *tsk, unsigned long *sp,
+ struct pt_regs *regs)
+{
+ if (regs && user_mode(regs))
+ return;
+
+ printk("\nCall trace:\n");
+
+ unwind_stack(tsk, regs, sp, &print_trace_ops, "");
+
+ printk("\n");
+
+ if (!tsk)
+ tsk = current;
+
+ debug_show_held_locks(tsk);
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ unsigned long stack;
+
+ if (!tsk)
+ tsk = current;
+ if (tsk == current)
+ sp = (unsigned long *)current_stack_pointer;
+ else
+ sp = (unsigned long *)tsk->thread.sp;
+
+ stack = (unsigned long)sp;
+ dump_mem("Stack: ", stack, THREAD_SIZE +
+ (unsigned long)task_stack_page(tsk));
+ show_trace(tsk, sp, NULL);
+}
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
new file mode 100644
index 00000000000..67a049e75ec
--- /dev/null
+++ b/arch/sh/kernel/dwarf.c
@@ -0,0 +1,1210 @@
+/*
+ * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This is an implementation of a DWARF unwinder. Its main purpose is
+ * for generating stacktrace information. Based on the DWARF 3
+ * specification from http://www.dwarfstd.org.
+ *
+ * TODO:
+ * - DWARF64 doesn't work.
+ * - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
+ */
+
+/* #define DEBUG */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/mm.h>
+#include <linux/elf.h>
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/dwarf.h>
+#include <asm/unwinder.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <asm/stacktrace.h>
+
+/* Reserve enough memory for two stack frames */
+#define DWARF_FRAME_MIN_REQ 2
+/* ... with 4 registers per frame. */
+#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
+
+static struct kmem_cache *dwarf_frame_cachep;
+static mempool_t *dwarf_frame_pool;
+
+static struct kmem_cache *dwarf_reg_cachep;
+static mempool_t *dwarf_reg_pool;
+
+static struct rb_root cie_root;
+static DEFINE_SPINLOCK(dwarf_cie_lock);
+
+static struct rb_root fde_root;
+static DEFINE_SPINLOCK(dwarf_fde_lock);
+
+static struct dwarf_cie *cached_cie;
+
+static unsigned int dwarf_unwinder_ready;
+
+/**
+ * dwarf_frame_alloc_reg - allocate memory for a DWARF register
+ * @frame: the DWARF frame whose list of registers we insert on
+ * @reg_num: the register number
+ *
+ * Allocate space for, and initialise, a dwarf reg from
+ * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
+ * dwarf registers for @frame.
+ *
+ * Return the initialised DWARF reg.
+ */
+static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
+ unsigned int reg_num)
+{
+ struct dwarf_reg *reg;
+
+ reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
+ if (!reg) {
+ printk(KERN_WARNING "Unable to allocate a DWARF register\n");
+ /*
+ * Let's just bomb hard here, we have no way to
+ * gracefully recover.
+ */
+ UNWINDER_BUG();
+ }
+
+ reg->number = reg_num;
+ reg->addr = 0;
+ reg->flags = 0;
+
+ list_add(&reg->link, &frame->reg_list);
+
+ return reg;
+}
+
+static void dwarf_frame_free_regs(struct dwarf_frame *frame)
+{
+ struct dwarf_reg *reg, *n;
+
+ list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
+ list_del(&reg->link);
+ mempool_free(reg, dwarf_reg_pool);
+ }
+}
+
+/**
+ * dwarf_frame_reg - return a DWARF register
+ * @frame: the DWARF frame to search in for @reg_num
+ * @reg_num: the register number to search for
+ *
+ * Lookup and return the dwarf reg @reg_num for this frame. Return
+ * NULL if @reg_num is an register invalid number.
+ */
+static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
+ unsigned int reg_num)
+{
+ struct dwarf_reg *reg;
+
+ list_for_each_entry(reg, &frame->reg_list, link) {
+ if (reg->number == reg_num)
+ return reg;
+ }
+
+ return NULL;
+}
+
+/**
+ * dwarf_read_addr - read dwarf data
+ * @src: source address of data
+ * @dst: destination address to store the data to
+ *
+ * Read 'n' bytes from @src, where 'n' is the size of an address on
+ * the native machine. We return the number of bytes read, which
+ * should always be 'n'. We also have to be careful when reading
+ * from @src and writing to @dst, because they can be arbitrarily
+ * aligned. Return 'n' - the number of bytes read.
+ */
+static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
+{
+ u32 val = get_unaligned(src);
+ put_unaligned(val, dst);
+ return sizeof(unsigned long *);
+}
+
+/**
+ * dwarf_read_uleb128 - read unsigned LEB128 data
+ * @addr: the address where the ULEB128 data is stored
+ * @ret: address to store the result
+ *
+ * Decode an unsigned LEB128 encoded datum. The algorithm is taken
+ * from Appendix C of the DWARF 3 spec. For information on the
+ * encodings refer to section "7.6 - Variable Length Data". Return
+ * the number of bytes read.
+ */
+static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
+{
+ unsigned int result;
+ unsigned char byte;
+ int shift, count;
+
+ result = 0;
+ shift = 0;
+ count = 0;
+
+ while (1) {
+ byte = __raw_readb(addr);
+ addr++;
+ count++;
+
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+
+ if (!(byte & 0x80))
+ break;
+ }
+
+ *ret = result;
+
+ return count;
+}
+
+/**
+ * dwarf_read_leb128 - read signed LEB128 data
+ * @addr: the address of the LEB128 encoded data
+ * @ret: address to store the result
+ *
+ * Decode signed LEB128 data. The algorithm is taken from Appendix
+ * C of the DWARF 3 spec. Return the number of bytes read.
+ */
+static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
+{
+ unsigned char byte;
+ int result, shift;
+ int num_bits;
+ int count;
+
+ result = 0;
+ shift = 0;
+ count = 0;
+
+ while (1) {
+ byte = __raw_readb(addr);
+ addr++;
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+ count++;
+
+ if (!(byte & 0x80))
+ break;
+ }
+
+ /* The number of bits in a signed integer. */
+ num_bits = 8 * sizeof(result);
+
+ if ((shift < num_bits) && (byte & 0x40))
+ result |= (-1 << shift);
+
+ *ret = result;
+
+ return count;
+}
+
+/**
+ * dwarf_read_encoded_value - return the decoded value at @addr
+ * @addr: the address of the encoded value
+ * @val: where to write the decoded value
+ * @encoding: the encoding with which we can decode @addr
+ *
+ * GCC emits encoded address in the .eh_frame FDE entries. Decode
+ * the value at @addr using @encoding. The decoded value is written
+ * to @val and the number of bytes read is returned.
+ */
+static int dwarf_read_encoded_value(char *addr, unsigned long *val,
+ char encoding)
+{
+ unsigned long decoded_addr = 0;
+ int count = 0;
+
+ switch (encoding & 0x70) {
+ case DW_EH_PE_absptr:
+ break;
+ case DW_EH_PE_pcrel:
+ decoded_addr = (unsigned long)addr;
+ break;
+ default:
+ pr_debug("encoding=0x%x\n", (encoding & 0x70));
+ UNWINDER_BUG();
+ }
+
+ if ((encoding & 0x07) == 0x00)
+ encoding |= DW_EH_PE_udata4;
+
+ switch (encoding & 0x0f) {
+ case DW_EH_PE_sdata4:
+ case DW_EH_PE_udata4:
+ count += 4;
+ decoded_addr += get_unaligned((u32 *)addr);
+ __raw_writel(decoded_addr, val);
+ break;
+ default:
+ pr_debug("encoding=0x%x\n", encoding);
+ UNWINDER_BUG();
+ }
+
+ return count;
+}
+
+/**
+ * dwarf_entry_len - return the length of an FDE or CIE
+ * @addr: the address of the entry
+ * @len: the length of the entry
+ *
+ * Read the initial_length field of the entry and store the size of
+ * the entry in @len. We return the number of bytes read. Return a
+ * count of 0 on error.
+ */
+static inline int dwarf_entry_len(char *addr, unsigned long *len)
+{
+ u32 initial_len;
+ int count;
+
+ initial_len = get_unaligned((u32 *)addr);
+ count = 4;
+
+ /*
+ * An initial length field value in the range DW_LEN_EXT_LO -
+ * DW_LEN_EXT_HI indicates an extension, and should not be
+ * interpreted as a length. The only extension that we currently
+ * understand is the use of DWARF64 addresses.
+ */
+ if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
+ /*
+ * The 64-bit length field immediately follows the
+ * compulsory 32-bit length field.
+ */
+ if (initial_len == DW_EXT_DWARF64) {
+ *len = get_unaligned((u64 *)addr + 4);
+ count = 12;
+ } else {
+ printk(KERN_WARNING "Unknown DWARF extension\n");
+ count = 0;
+ }
+ } else
+ *len = initial_len;
+
+ return count;
+}
+
+/**
+ * dwarf_lookup_cie - locate the cie
+ * @cie_ptr: pointer to help with lookup
+ */
+static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
+{
+ struct rb_node **rb_node = &cie_root.rb_node;
+ struct dwarf_cie *cie = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+ /*
+ * We've cached the last CIE we looked up because chances are
+ * that the FDE wants this CIE.
+ */
+ if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
+ cie = cached_cie;
+ goto out;
+ }
+
+ while (*rb_node) {
+ struct dwarf_cie *cie_tmp;
+
+ cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+ BUG_ON(!cie_tmp);
+
+ if (cie_ptr == cie_tmp->cie_pointer) {
+ cie = cie_tmp;
+ cached_cie = cie_tmp;
+ goto out;
+ } else {
+ if (cie_ptr < cie_tmp->cie_pointer)
+ rb_node = &(*rb_node)->rb_left;
+ else
+ rb_node = &(*rb_node)->rb_right;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+ return cie;
+}
+
+/**
+ * dwarf_lookup_fde - locate the FDE that covers pc
+ * @pc: the program counter
+ */
+struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
+{
+ struct rb_node **rb_node = &fde_root.rb_node;
+ struct dwarf_fde *fde = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+
+ while (*rb_node) {
+ struct dwarf_fde *fde_tmp;
+ unsigned long tmp_start, tmp_end;
+
+ fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+ BUG_ON(!fde_tmp);
+
+ tmp_start = fde_tmp->initial_location;
+ tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
+
+ if (pc < tmp_start) {
+ rb_node = &(*rb_node)->rb_left;
+ } else {
+ if (pc < tmp_end) {
+ fde = fde_tmp;
+ goto out;
+ } else
+ rb_node = &(*rb_node)->rb_right;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+
+ return fde;
+}
+
+/**
+ * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
+ * @insn_start: address of the first instruction
+ * @insn_end: address of the last instruction
+ * @cie: the CIE for this function
+ * @fde: the FDE for this function
+ * @frame: the instructions calculate the CFA for this frame
+ * @pc: the program counter of the address we're interested in
+ *
+ * Execute the Call Frame instruction sequence starting at
+ * @insn_start and ending at @insn_end. The instructions describe
+ * how to calculate the Canonical Frame Address of a stackframe.
+ * Store the results in @frame.
+ */
+static int dwarf_cfa_execute_insns(unsigned char *insn_start,
+ unsigned char *insn_end,
+ struct dwarf_cie *cie,
+ struct dwarf_fde *fde,
+ struct dwarf_frame *frame,
+ unsigned long pc)
+{
+ unsigned char insn;
+ unsigned char *current_insn;
+ unsigned int count, delta, reg, expr_len, offset;
+ struct dwarf_reg *regp;
+
+ current_insn = insn_start;
+
+ while (current_insn < insn_end && frame->pc <= pc) {
+ insn = __raw_readb(current_insn++);
+
+ /*
+ * Firstly, handle the opcodes that embed their operands
+ * in the instructions.
+ */
+ switch (DW_CFA_opcode(insn)) {
+ case DW_CFA_advance_loc:
+ delta = DW_CFA_operand(insn);
+ delta *= cie->code_alignment_factor;
+ frame->pc += delta;
+ continue;
+ /* NOTREACHED */
+ case DW_CFA_offset:
+ reg = DW_CFA_operand(insn);
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->addr = offset;
+ regp->flags |= DWARF_REG_OFFSET;
+ continue;
+ /* NOTREACHED */
+ case DW_CFA_restore:
+ reg = DW_CFA_operand(insn);
+ continue;
+ /* NOTREACHED */
+ }
+
+ /*
+ * Secondly, handle the opcodes that don't embed their
+ * operands in the instruction.
+ */
+ switch (insn) {
+ case DW_CFA_nop:
+ continue;
+ case DW_CFA_advance_loc1:
+ delta = *current_insn++;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_advance_loc2:
+ delta = get_unaligned((u16 *)current_insn);
+ current_insn += 2;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_advance_loc4:
+ delta = get_unaligned((u32 *)current_insn);
+ current_insn += 4;
+ frame->pc += delta * cie->code_alignment_factor;
+ break;
+ case DW_CFA_offset_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ break;
+ case DW_CFA_restore_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ break;
+ case DW_CFA_undefined:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_UNDEFINED;
+ break;
+ case DW_CFA_def_cfa:
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_register);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_offset);
+ current_insn += count;
+
+ frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
+ break;
+ case DW_CFA_def_cfa_register:
+ count = dwarf_read_uleb128(current_insn,
+ &frame->cfa_register);
+ current_insn += count;
+ frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
+ break;
+ case DW_CFA_def_cfa_offset:
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ frame->cfa_offset = offset;
+ break;
+ case DW_CFA_def_cfa_expression:
+ count = dwarf_read_uleb128(current_insn, &expr_len);
+ current_insn += count;
+
+ frame->cfa_expr = current_insn;
+ frame->cfa_expr_len = expr_len;
+ current_insn += expr_len;
+
+ frame->flags |= DWARF_FRAME_CFA_REG_EXP;
+ break;
+ case DW_CFA_offset_extended_sf:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_leb128(current_insn, &offset);
+ current_insn += count;
+ offset *= cie->data_alignment_factor;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_REG_OFFSET;
+ regp->addr = offset;
+ break;
+ case DW_CFA_val_offset:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_leb128(current_insn, &offset);
+ offset *= cie->data_alignment_factor;
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_VAL_OFFSET;
+ regp->addr = offset;
+ break;
+ case DW_CFA_GNU_args_size:
+ count = dwarf_read_uleb128(current_insn, &offset);
+ current_insn += count;
+ break;
+ case DW_CFA_GNU_negative_offset_extended:
+ count = dwarf_read_uleb128(current_insn, &reg);
+ current_insn += count;
+ count = dwarf_read_uleb128(current_insn, &offset);
+ offset *= cie->data_alignment_factor;
+
+ regp = dwarf_frame_alloc_reg(frame, reg);
+ regp->flags |= DWARF_REG_OFFSET;
+ regp->addr = -offset;
+ break;
+ default:
+ pr_debug("unhandled DWARF instruction 0x%x\n", insn);
+ UNWINDER_BUG();
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * dwarf_free_frame - free the memory allocated for @frame
+ * @frame: the frame to free
+ */
+void dwarf_free_frame(struct dwarf_frame *frame)
+{
+ dwarf_frame_free_regs(frame);
+ mempool_free(frame, dwarf_frame_pool);
+}
+
+extern void ret_from_irq(void);
+
+/**
+ * dwarf_unwind_stack - unwind the stack
+ *
+ * @pc: address of the function to unwind
+ * @prev: struct dwarf_frame of the previous stackframe on the callstack
+ *
+ * Return a struct dwarf_frame representing the most recent frame
+ * on the callstack. Each of the lower (older) stack frames are
+ * linked via the "prev" member.
+ */
+struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
+ struct dwarf_frame *prev)
+{
+ struct dwarf_frame *frame;
+ struct dwarf_cie *cie;
+ struct dwarf_fde *fde;
+ struct dwarf_reg *reg;
+ unsigned long addr;
+
+ /*
+ * If we've been called in to before initialization has
+ * completed, bail out immediately.
+ */
+ if (!dwarf_unwinder_ready)
+ return NULL;
+
+ /*
+ * If we're starting at the top of the stack we need get the
+ * contents of a physical register to get the CFA in order to
+ * begin the virtual unwinding of the stack.
+ *
+ * NOTE: the return address is guaranteed to be setup by the
+ * time this function makes its first function call.
+ */
+ if (!pc || !prev)
+ pc = (unsigned long)current_text_addr();
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /*
+ * If our stack has been patched by the function graph tracer
+ * then we might see the address of return_to_handler() where we
+ * expected to find the real return address.
+ */
+ if (pc == (unsigned long)&return_to_handler) {
+ int index = current->curr_ret_stack;
+
+ /*
+ * We currently have no way of tracking how many
+ * return_to_handler()'s we've seen. If there is more
+ * than one patched return address on our stack,
+ * complain loudly.
+ */
+ WARN_ON(index > 0);
+
+ pc = current->ret_stack[index].ret;
+ }
+#endif
+
+ frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
+ if (!frame) {
+ printk(KERN_ERR "Unable to allocate a dwarf frame\n");
+ UNWINDER_BUG();
+ }
+
+ INIT_LIST_HEAD(&frame->reg_list);
+ frame->flags = 0;
+ frame->prev = prev;
+ frame->return_addr = 0;
+
+ fde = dwarf_lookup_fde(pc);
+ if (!fde) {
+ /*
+ * This is our normal exit path. There are two reasons
+ * why we might exit here,
+ *
+ * a) pc has no asscociated DWARF frame info and so
+ * we don't know how to unwind this frame. This is
+ * usually the case when we're trying to unwind a
+ * frame that was called from some assembly code
+ * that has no DWARF info, e.g. syscalls.
+ *
+ * b) the DEBUG info for pc is bogus. There's
+ * really no way to distinguish this case from the
+ * case above, which sucks because we could print a
+ * warning here.
+ */
+ goto bail;
+ }
+
+ cie = dwarf_lookup_cie(fde->cie_pointer);
+
+ frame->pc = fde->initial_location;
+
+ /* CIE initial instructions */
+ dwarf_cfa_execute_insns(cie->initial_instructions,
+ cie->instructions_end, cie, fde,
+ frame, pc);
+
+ /* FDE instructions */
+ dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
+ fde, frame, pc);
+
+ /* Calculate the CFA */
+ switch (frame->flags) {
+ case DWARF_FRAME_CFA_REG_OFFSET:
+ if (prev) {
+ reg = dwarf_frame_reg(prev, frame->cfa_register);
+ UNWINDER_BUG_ON(!reg);
+ UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
+
+ addr = prev->cfa + reg->addr;
+ frame->cfa = __raw_readl(addr);
+
+ } else {
+ /*
+ * Again, we're starting from the top of the
+ * stack. We need to physically read
+ * the contents of a register in order to get
+ * the Canonical Frame Address for this
+ * function.
+ */
+ frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
+ }
+
+ frame->cfa += frame->cfa_offset;
+ break;
+ default:
+ UNWINDER_BUG();
+ }
+
+ reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
+
+ /*
+ * If we haven't seen the return address register or the return
+ * address column is undefined then we must assume that this is
+ * the end of the callstack.
+ */
+ if (!reg || reg->flags == DWARF_UNDEFINED)
+ goto bail;
+
+ UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
+
+ addr = frame->cfa + reg->addr;
+ frame->return_addr = __raw_readl(addr);
+
+ /*
+ * Ah, the joys of unwinding through interrupts.
+ *
+ * Interrupts are tricky - the DWARF info needs to be _really_
+ * accurate and unfortunately I'm seeing a lot of bogus DWARF
+ * info. For example, I've seen interrupts occur in epilogues
+ * just after the frame pointer (r14) had been restored. The
+ * problem was that the DWARF info claimed that the CFA could be
+ * reached by using the value of the frame pointer before it was
+ * restored.
+ *
+ * So until the compiler can be trusted to produce reliable
+ * DWARF info when it really matters, let's stop unwinding once
+ * we've calculated the function that was interrupted.
+ */
+ if (prev && prev->pc == (unsigned long)ret_from_irq)
+ frame->return_addr = 0;
+
+ return frame;
+
+bail:
+ dwarf_free_frame(frame);
+ return NULL;
+}
+
+static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
+ unsigned char *end, struct module *mod)
+{
+ struct rb_node **rb_node = &cie_root.rb_node;
+ struct rb_node *parent = *rb_node;
+ struct dwarf_cie *cie;
+ unsigned long flags;
+ int count;
+
+ cie = kzalloc(sizeof(*cie), GFP_KERNEL);
+ if (!cie)
+ return -ENOMEM;
+
+ cie->length = len;
+
+ /*
+ * Record the offset into the .eh_frame section
+ * for this CIE. It allows this CIE to be
+ * quickly and easily looked up from the
+ * corresponding FDE.
+ */
+ cie->cie_pointer = (unsigned long)entry;
+
+ cie->version = *(char *)p++;
+ UNWINDER_BUG_ON(cie->version != 1);
+
+ cie->augmentation = p;
+ p += strlen(cie->augmentation) + 1;
+
+ count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
+ p += count;
+
+ count = dwarf_read_leb128(p, &cie->data_alignment_factor);
+ p += count;
+
+ /*
+ * Which column in the rule table contains the
+ * return address?
+ */
+ if (cie->version == 1) {
+ cie->return_address_reg = __raw_readb(p);
+ p++;
+ } else {
+ count = dwarf_read_uleb128(p, &cie->return_address_reg);
+ p += count;
+ }
+
+ if (cie->augmentation[0] == 'z') {
+ unsigned int length, count;
+ cie->flags |= DWARF_CIE_Z_AUGMENTATION;
+
+ count = dwarf_read_uleb128(p, &length);
+ p += count;
+
+ UNWINDER_BUG_ON((unsigned char *)p > end);
+
+ cie->initial_instructions = p + length;
+ cie->augmentation++;
+ }
+
+ while (*cie->augmentation) {
+ /*
+ * "L" indicates a byte showing how the
+ * LSDA pointer is encoded. Skip it.
+ */
+ if (*cie->augmentation == 'L') {
+ p++;
+ cie->augmentation++;
+ } else if (*cie->augmentation == 'R') {
+ /*
+ * "R" indicates a byte showing
+ * how FDE addresses are
+ * encoded.
+ */
+ cie->encoding = *(char *)p++;
+ cie->augmentation++;
+ } else if (*cie->augmentation == 'P') {
+ /*
+ * "R" indicates a personality
+ * routine in the CIE
+ * augmentation.
+ */
+ UNWINDER_BUG();
+ } else if (*cie->augmentation == 'S') {
+ UNWINDER_BUG();
+ } else {
+ /*
+ * Unknown augmentation. Assume
+ * 'z' augmentation.
+ */
+ p = cie->initial_instructions;
+ UNWINDER_BUG_ON(!p);
+ break;
+ }
+ }
+
+ cie->initial_instructions = p;
+ cie->instructions_end = end;
+
+ /* Add to list */
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+ while (*rb_node) {
+ struct dwarf_cie *cie_tmp;
+
+ cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
+
+ parent = *rb_node;
+
+ if (cie->cie_pointer < cie_tmp->cie_pointer)
+ rb_node = &parent->rb_left;
+ else if (cie->cie_pointer >= cie_tmp->cie_pointer)
+ rb_node = &parent->rb_right;
+ else
+ WARN_ON(1);
+ }
+
+ rb_link_node(&cie->node, parent, rb_node);
+ rb_insert_color(&cie->node, &cie_root);
+
+#ifdef CONFIG_MODULES
+ if (mod != NULL)
+ list_add_tail(&cie->link, &mod->arch.cie_list);
+#endif
+
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+ return 0;
+}
+
+static int dwarf_parse_fde(void *entry, u32 entry_type,
+ void *start, unsigned long len,
+ unsigned char *end, struct module *mod)
+{
+ struct rb_node **rb_node = &fde_root.rb_node;
+ struct rb_node *parent = *rb_node;
+ struct dwarf_fde *fde;
+ struct dwarf_cie *cie;
+ unsigned long flags;
+ int count;
+ void *p = start;
+
+ fde = kzalloc(sizeof(*fde), GFP_KERNEL);
+ if (!fde)
+ return -ENOMEM;
+
+ fde->length = len;
+
+ /*
+ * In a .eh_frame section the CIE pointer is the
+ * delta between the address within the FDE
+ */
+ fde->cie_pointer = (unsigned long)(p - entry_type - 4);
+
+ cie = dwarf_lookup_cie(fde->cie_pointer);
+ fde->cie = cie;
+
+ if (cie->encoding)
+ count = dwarf_read_encoded_value(p, &fde->initial_location,
+ cie->encoding);
+ else
+ count = dwarf_read_addr(p, &fde->initial_location);
+
+ p += count;
+
+ if (cie->encoding)
+ count = dwarf_read_encoded_value(p, &fde->address_range,
+ cie->encoding & 0x0f);
+ else
+ count = dwarf_read_addr(p, &fde->address_range);
+
+ p += count;
+
+ if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
+ unsigned int length;
+ count = dwarf_read_uleb128(p, &length);
+ p += count + length;
+ }
+
+ /* Call frame instructions. */
+ fde->instructions = p;
+ fde->end = end;
+
+ /* Add to list. */
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+
+ while (*rb_node) {
+ struct dwarf_fde *fde_tmp;
+ unsigned long tmp_start, tmp_end;
+ unsigned long start, end;
+
+ fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
+
+ start = fde->initial_location;
+ end = fde->initial_location + fde->address_range;
+
+ tmp_start = fde_tmp->initial_location;
+ tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
+
+ parent = *rb_node;
+
+ if (start < tmp_start)
+ rb_node = &parent->rb_left;
+ else if (start >= tmp_end)
+ rb_node = &parent->rb_right;
+ else
+ WARN_ON(1);
+ }
+
+ rb_link_node(&fde->node, parent, rb_node);
+ rb_insert_color(&fde->node, &fde_root);
+
+#ifdef CONFIG_MODULES
+ if (mod != NULL)
+ list_add_tail(&fde->link, &mod->arch.fde_list);
+#endif
+
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+
+ return 0;
+}
+
+static void dwarf_unwinder_dump(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *sp,
+ const struct stacktrace_ops *ops,
+ void *data)
+{
+ struct dwarf_frame *frame, *_frame;
+ unsigned long return_addr;
+
+ _frame = NULL;
+ return_addr = 0;
+
+ while (1) {
+ frame = dwarf_unwind_stack(return_addr, _frame);
+
+ if (_frame)
+ dwarf_free_frame(_frame);
+
+ _frame = frame;
+
+ if (!frame || !frame->return_addr)
+ break;
+
+ return_addr = frame->return_addr;
+ ops->address(data, return_addr, 1);
+ }
+
+ if (frame)
+ dwarf_free_frame(frame);
+}
+
+static struct unwinder dwarf_unwinder = {
+ .name = "dwarf-unwinder",
+ .dump = dwarf_unwinder_dump,
+ .rating = 150,
+};
+
+static void dwarf_unwinder_cleanup(void)
+{
+ struct dwarf_fde *fde, *next_fde;
+ struct dwarf_cie *cie, *next_cie;
+
+ /*
+ * Deallocate all the memory allocated for the DWARF unwinder.
+ * Traverse all the FDE/CIE lists and remove and free all the
+ * memory associated with those data structures.
+ */
+ rbtree_postorder_for_each_entry_safe(fde, next_fde, &fde_root, node)
+ kfree(fde);
+
+ rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node)
+ kfree(cie);
+
+ kmem_cache_destroy(dwarf_reg_cachep);
+ kmem_cache_destroy(dwarf_frame_cachep);
+}
+
+/**
+ * dwarf_parse_section - parse DWARF section
+ * @eh_frame_start: start address of the .eh_frame section
+ * @eh_frame_end: end address of the .eh_frame section
+ * @mod: the kernel module containing the .eh_frame section
+ *
+ * Parse the information in a .eh_frame section.
+ */
+static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
+ struct module *mod)
+{
+ u32 entry_type;
+ void *p, *entry;
+ int count, err = 0;
+ unsigned long len = 0;
+ unsigned int c_entries, f_entries;
+ unsigned char *end;
+
+ c_entries = 0;
+ f_entries = 0;
+ entry = eh_frame_start;
+
+ while ((char *)entry < eh_frame_end) {
+ p = entry;
+
+ count = dwarf_entry_len(p, &len);
+ if (count == 0) {
+ /*
+ * We read a bogus length field value. There is
+ * nothing we can do here apart from disabling
+ * the DWARF unwinder. We can't even skip this
+ * entry and move to the next one because 'len'
+ * tells us where our next entry is.
+ */
+ err = -EINVAL;
+ goto out;
+ } else
+ p += count;
+
+ /* initial length does not include itself */
+ end = p + len;
+
+ entry_type = get_unaligned((u32 *)p);
+ p += 4;
+
+ if (entry_type == DW_EH_FRAME_CIE) {
+ err = dwarf_parse_cie(entry, p, len, end, mod);
+ if (err < 0)
+ goto out;
+ else
+ c_entries++;
+ } else {
+ err = dwarf_parse_fde(entry, entry_type, p, len,
+ end, mod);
+ if (err < 0)
+ goto out;
+ else
+ f_entries++;
+ }
+
+ entry = (char *)entry + len + 4;
+ }
+
+ printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
+ c_entries, f_entries);
+
+ return 0;
+
+out:
+ return err;
+}
+
+#ifdef CONFIG_MODULES
+int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ unsigned int i, err;
+ unsigned long start, end;
+ char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ start = end = 0;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ /* Alloc bit cleared means "ignore it." */
+ if ((sechdrs[i].sh_flags & SHF_ALLOC)
+ && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
+ start = sechdrs[i].sh_addr;
+ end = start + sechdrs[i].sh_size;
+ break;
+ }
+ }
+
+ /* Did we find the .eh_frame section? */
+ if (i != hdr->e_shnum) {
+ INIT_LIST_HEAD(&me->arch.cie_list);
+ INIT_LIST_HEAD(&me->arch.fde_list);
+ err = dwarf_parse_section((char *)start, (char *)end, me);
+ if (err) {
+ printk(KERN_WARNING "%s: failed to parse DWARF info\n",
+ me->name);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * module_dwarf_cleanup - remove FDE/CIEs associated with @mod
+ * @mod: the module that is being unloaded
+ *
+ * Remove any FDEs and CIEs from the global lists that came from
+ * @mod's .eh_frame section because @mod is being unloaded.
+ */
+void module_dwarf_cleanup(struct module *mod)
+{
+ struct dwarf_fde *fde, *ftmp;
+ struct dwarf_cie *cie, *ctmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwarf_cie_lock, flags);
+
+ list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
+ list_del(&cie->link);
+ rb_erase(&cie->node, &cie_root);
+ kfree(cie);
+ }
+
+ spin_unlock_irqrestore(&dwarf_cie_lock, flags);
+
+ spin_lock_irqsave(&dwarf_fde_lock, flags);
+
+ list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
+ list_del(&fde->link);
+ rb_erase(&fde->node, &fde_root);
+ kfree(fde);
+ }
+
+ spin_unlock_irqrestore(&dwarf_fde_lock, flags);
+}
+#endif /* CONFIG_MODULES */
+
+/**
+ * dwarf_unwinder_init - initialise the dwarf unwinder
+ *
+ * Build the data structures describing the .dwarf_frame section to
+ * make it easier to lookup CIE and FDE entries. Because the
+ * .eh_frame section is packed as tightly as possible it is not
+ * easy to lookup the FDE for a given PC, so we build a list of FDE
+ * and CIE entries that make it easier.
+ */
+static int __init dwarf_unwinder_init(void)
+{
+ int err = -ENOMEM;
+
+ dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
+ sizeof(struct dwarf_frame), 0,
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+
+ dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
+ sizeof(struct dwarf_reg), 0,
+ SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+
+ dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ,
+ mempool_alloc_slab,
+ mempool_free_slab,
+ dwarf_frame_cachep);
+ if (!dwarf_frame_pool)
+ goto out;
+
+ dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ,
+ mempool_alloc_slab,
+ mempool_free_slab,
+ dwarf_reg_cachep);
+ if (!dwarf_reg_pool)
+ goto out;
+
+ err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
+ if (err)
+ goto out;
+
+ err = unwinder_register(&dwarf_unwinder);
+ if (err)
+ goto out;
+
+ dwarf_unwinder_ready = 1;
+
+ return 0;
+
+out:
+ printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
+ dwarf_unwinder_cleanup();
+ return err;
+}
+early_initcall(dwarf_unwinder_init);
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
deleted file mode 100644
index 957f2561154..00000000000
--- a/arch/sh/kernel/early_printk.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * arch/sh/kernel/early_printk.c
- *
- * Copyright (C) 1999, 2000 Niibe Yutaka
- * Copyright (C) 2002 M. R. Brown
- * Copyright (C) 2004 - 2007 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/console.h>
-#include <linux/tty.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-
-#ifdef CONFIG_SH_STANDARD_BIOS
-#include <asm/sh_bios.h>
-
-/*
- * Print a string through the BIOS
- */
-static void sh_console_write(struct console *co, const char *s,
- unsigned count)
-{
- sh_bios_console_write(s, count);
-}
-
-/*
- * Setup initial baud/bits/parity. We do two things here:
- * - construct a cflag setting for the first rs_open()
- * - initialize the serial port
- * Return non-zero if we didn't find a serial port.
- */
-static int __init sh_console_setup(struct console *co, char *options)
-{
- int cflag = CREAD | HUPCL | CLOCAL;
-
- /*
- * Now construct a cflag setting.
- * TODO: this is a totally bogus cflag, as we have
- * no idea what serial settings the BIOS is using, or
- * even if its using the serial port at all.
- */
- cflag |= B115200 | CS8 | /*no parity*/0;
-
- co->cflag = cflag;
-
- return 0;
-}
-
-static struct console bios_console = {
- .name = "bios",
- .write = sh_console_write,
- .setup = sh_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-#endif
-
-#ifdef CONFIG_EARLY_SCIF_CONSOLE
-#include <linux/serial_core.h>
-#include "../../../drivers/serial/sh-sci.h"
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721)
-#define EPK_SCSMR_VALUE 0x000
-#define EPK_SCBRR_VALUE 0x00C
-#define EPK_FIFO_SIZE 64
-#define EPK_FIFO_BITS (0x7f00 >> 8)
-#else
-#define EPK_FIFO_SIZE 16
-#define EPK_FIFO_BITS (0x1f00 >> 8)
-#endif
-
-static struct uart_port scif_port = {
- .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT,
- .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT,
-};
-
-static void scif_sercon_putc(int c)
-{
- while (((sci_in(&scif_port, SCFDR) & EPK_FIFO_BITS) >= EPK_FIFO_SIZE))
- ;
-
- sci_out(&scif_port, SCxTDR, c);
- sci_in(&scif_port, SCxSR);
- sci_out(&scif_port, SCxSR, 0xf3 & ~(0x20 | 0x40));
-
- while ((sci_in(&scif_port, SCxSR) & 0x40) == 0)
- ;
-
- if (c == '\n')
- scif_sercon_putc('\r');
-}
-
-static void scif_sercon_write(struct console *con, const char *s,
- unsigned count)
-{
- while (count-- > 0)
- scif_sercon_putc(*s++);
-}
-
-static int __init scif_sercon_setup(struct console *con, char *options)
-{
- con->cflag = CREAD | HUPCL | CLOCAL | B115200 | CS8;
-
- return 0;
-}
-
-static struct console scif_console = {
- .name = "sercon",
- .write = scif_sercon_write,
- .setup = scif_sercon_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-#if !defined(CONFIG_SH_STANDARD_BIOS)
-#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721)
-static void scif_sercon_init(char *s)
-{
- sci_out(&scif_port, SCSCR, 0x0000); /* clear TE and RE */
- sci_out(&scif_port, SCFCR, 0x4006); /* reset */
- sci_out(&scif_port, SCSCR, 0x0000); /* select internal clock */
- sci_out(&scif_port, SCSMR, EPK_SCSMR_VALUE);
- sci_out(&scif_port, SCBRR, EPK_SCBRR_VALUE);
-
- mdelay(1); /* wait 1-bit time */
-
- sci_out(&scif_port, SCFCR, 0x0030); /* TTRG=b'11 */
- sci_out(&scif_port, SCSCR, 0x0030); /* TE, RE */
-}
-#elif defined(CONFIG_CPU_SH4)
-#define DEFAULT_BAUD 115200
-/*
- * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
- * devices that aren't using sh-ipl+g.
- */
-static void scif_sercon_init(char *s)
-{
- unsigned baud = DEFAULT_BAUD;
- char *e;
-
- if (*s == ',')
- ++s;
-
- if (*s) {
- /* ignore ioport/device name */
- s += strcspn(s, ",");
- if (*s == ',')
- s++;
- }
-
- if (*s) {
- baud = simple_strtoul(s, &e, 0);
- if (baud == 0 || s == e)
- baud = DEFAULT_BAUD;
- }
-
- ctrl_outw(0, scif_port.mapbase + 8);
- ctrl_outw(0, scif_port.mapbase);
-
- /* Set baud rate */
- ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) /
- (32 * baud) - 1, scif_port.mapbase + 4);
-
- ctrl_outw(12, scif_port.mapbase + 24);
- ctrl_outw(8, scif_port.mapbase + 24);
- ctrl_outw(0, scif_port.mapbase + 32);
- ctrl_outw(0x60, scif_port.mapbase + 16);
- ctrl_outw(0, scif_port.mapbase + 36);
- ctrl_outw(0x30, scif_port.mapbase + 8);
-}
-#endif /* defined(CONFIG_CPU_SUBTYPE_SH7720) */
-#endif /* !defined(CONFIG_SH_STANDARD_BIOS) */
-#endif /* CONFIG_EARLY_SCIF_CONSOLE */
-
-/*
- * Setup a default console, if more than one is compiled in, rely on the
- * earlyprintk= parsing to give priority.
- */
-static struct console *early_console =
-#ifdef CONFIG_SH_STANDARD_BIOS
- &bios_console
-#elif defined(CONFIG_EARLY_SCIF_CONSOLE)
- &scif_console
-#else
- NULL
-#endif
- ;
-
-static int __init setup_early_printk(char *buf)
-{
- int keep_early = 0;
-
- if (!buf)
- return 0;
-
- if (strstr(buf, "keep"))
- keep_early = 1;
-
-#ifdef CONFIG_SH_STANDARD_BIOS
- if (!strncmp(buf, "bios", 4))
- early_console = &bios_console;
-#endif
-#if defined(CONFIG_EARLY_SCIF_CONSOLE)
- if (!strncmp(buf, "serial", 6)) {
- early_console = &scif_console;
-
-#if !defined(CONFIG_SH_STANDARD_BIOS)
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721)
- scif_sercon_init(buf + 6);
-#endif
-#endif
- }
-#endif
-
- if (likely(early_console)) {
- if (keep_early)
- early_console->flags &= ~CON_BOOT;
- else
- early_console->flags |= CON_BOOT;
- register_console(early_console);
- }
-
- return 0;
-}
-early_param("earlyprintk", setup_early_printk);
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index 926b2e7b11c..13047a4facd 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -1,9 +1,6 @@
-/* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $
- *
- * linux/arch/sh/entry.S
- *
+/*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
- * Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2003 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -46,9 +43,10 @@
* syscall #
*
*/
+#include <asm/dwarf.h>
#if defined(CONFIG_PREEMPT)
-# define preempt_stop() cli
+# define preempt_stop() cli ; TRACE_IRQS_OFF
#else
# define preempt_stop()
# define resume_kernel __restore_all
@@ -58,11 +56,7 @@
.align 2
ENTRY(exception_error)
!
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 2f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_ON
sti
mov.l 1f, r0
jmp @r0
@@ -70,18 +64,21 @@ ENTRY(exception_error)
.align 2
1: .long do_exception_error
-#ifdef CONFIG_TRACE_IRQFLAGS
-2: .long trace_hardirqs_on
-#endif
.align 2
ret_from_exception:
+ CFI_STARTPROC simple
+ CFI_DEF_CFA r14, 0
+ CFI_REL_OFFSET 17, 64
+ CFI_REL_OFFSET 15, 60
+ CFI_REL_OFFSET 14, 56
+ CFI_REL_OFFSET 13, 52
+ CFI_REL_OFFSET 12, 48
+ CFI_REL_OFFSET 11, 44
+ CFI_REL_OFFSET 10, 40
+ CFI_REL_OFFSET 9, 36
+ CFI_REL_OFFSET 8, 32
preempt_stop()
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 4f, r0
- jsr @r0
- nop
-#endif
ENTRY(ret_from_irq)
!
mov #OFF_SR, r0
@@ -95,6 +92,8 @@ ENTRY(ret_from_irq)
bra resume_userspace
nop
ENTRY(resume_kernel)
+ cli
+ TRACE_IRQS_OFF
mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
tst r0, r0
bf noresched
@@ -105,31 +104,13 @@ need_resched:
mov #OFF_SR, r0
mov.l @(r0,r15), r0 ! get status register
- and #0xf0, r0 ! interrupts off (exception path)?
- cmp/eq #0xf0, r0
+ shlr r0
+ and #(0xf0>>1), r0 ! interrupts off (exception path)?
+ cmp/eq #(0xf0>>1), r0
bt noresched
-
mov.l 1f, r0
- mov.l r0, @(TI_PRE_COUNT,r8)
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 3f, r0
- jsr @r0
+ jsr @r0 ! call preempt_schedule_irq
nop
-#endif
- sti
- mov.l 2f, r0
- jsr @r0
- nop
- mov #0, r0
- mov.l r0, @(TI_PRE_COUNT,r8)
- cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 4f, r0
- jsr @r0
- nop
-#endif
-
bra need_resched
nop
@@ -138,24 +119,15 @@ noresched:
nop
.align 2
-1: .long PREEMPT_ACTIVE
-2: .long schedule
-#ifdef CONFIG_TRACE_IRQFLAGS
-3: .long trace_hardirqs_on
-4: .long trace_hardirqs_off
-#endif
+1: .long preempt_schedule_irq
#endif
ENTRY(resume_userspace)
! r8: current_thread_info
cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_OFF
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_WORK_MASK, r0
+ tst #(_TIF_WORK_MASK & 0xff), r0
bt/s __restore_all
tst #_TIF_NEED_RESCHED, r0
@@ -165,12 +137,13 @@ work_pending:
! r8: current_thread_info
! t: result of "tst #_TIF_NEED_RESCHED, r0"
bf/s work_resched
- tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
+ tst #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0
work_notifysig:
bt/s __restore_all
mov r15, r4
mov r12, r5 ! set arg1(save_r0)
mov r0, r6
+ sti
mov.l 2f, r1
mov.l 3f, r0
jmp @r1
@@ -180,14 +153,10 @@ work_resched:
jsr @r1 ! schedule
nop
cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_OFF
!
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_WORK_MASK, r0
+ tst #(_TIF_WORK_MASK & 0xff), r0
bt __restore_all
bra work_pending
tst #_TIF_NEED_RESCHED, r0
@@ -195,29 +164,19 @@ work_resched:
.align 2
1: .long schedule
2: .long do_notify_resume
-3: .long restore_all
-#ifdef CONFIG_TRACE_IRQFLAGS
-4: .long trace_hardirqs_on
-5: .long trace_hardirqs_off
-#endif
+3: .long resume_userspace
.align 2
syscall_exit_work:
! r0: current_thread_info->flags
! r8: current_thread_info
- tst #_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT, r0
+ tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0
bt/s work_pending
tst #_TIF_NEED_RESCHED, r0
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_ON
sti
- ! XXX setup arguments...
mov r15, r4
- mov #1, r5
- mov.l 4f, r0 ! do_syscall_trace
+ mov.l 8f, r0 ! do_syscall_trace_leave
jsr @r0
nop
bra resume_userspace
@@ -226,19 +185,18 @@ syscall_exit_work:
.align 2
syscall_trace_entry:
! Yes it is traced.
- ! XXX setup arguments...
mov r15, r4
- mov #0, r5
- mov.l 4f, r11 ! Call do_syscall_trace which notifies
+ mov.l 7f, r11 ! Call do_syscall_trace_enter which notifies
jsr @r11 ! superior (will chomp R[0-7])
nop
+ mov.l r0, @(OFF_R0,r15) ! Save return value
! Reload R0-R4 from kernel stack, where the
! parent may have modified them using
! ptrace(POKEUSR). (Note that R0-R2 are
- ! used by the system call handler directly
- ! from the kernel stack anyway, so don't need
- ! to be reloaded here.) This allows the parent
- ! to rewrite system calls and args on the fly.
+ ! reloaded from the kernel stack by syscall_call
+ ! below, so don't need to be reloaded here.)
+ ! This allows the parent to rewrite system calls
+ ! and args on the fly.
mov.l @(OFF_R4,r15), r4 ! arg0
mov.l @(OFF_R5,r15), r5
mov.l @(OFF_R6,r15), r6
@@ -253,15 +211,29 @@ syscall_trace_entry:
mov.l r0, @(OFF_R0,r15) ! Return value
__restore_all:
- mov.l 1f, r0
+ mov #OFF_SR, r0
+ mov.l @(r0,r15), r0 ! get status register
+
+ shlr2 r0
+ and #0x3c, r0
+ cmp/eq #0x3c, r0
+ bt 1f
+ TRACE_IRQS_ON
+ bra 2f
+ nop
+1:
+ TRACE_IRQS_OFF
+2:
+ mov.l 3f, r0
jmp @r0
nop
.align 2
-1: .long restore_all
+3: .long restore_all
.align 2
syscall_badsys: ! Bad syscall number
+ get_current_thread_info r8, r0
mov #-ENOSYS, r0
bra resume_userspace
mov.l r0, @(OFF_R0,r15) ! Return value
@@ -281,8 +253,11 @@ debug_trap:
mov.l 1f, r8
add r0, r8
mov.l @r8, r8
- jmp @r8
+ jsr @r8
nop
+ bra __restore_all
+ nop
+ CFI_ENDPROC
.align 2
1: .long debug_trap_table
@@ -320,6 +295,19 @@ ret_from_fork:
mov r0, r4
bra syscall_exit
nop
+
+ .align 2
+ .globl ret_from_kernel_thread
+ret_from_kernel_thread:
+ mov.l 1f, r8
+ jsr @r8
+ mov r0, r4
+ mov.l @(OFF_R5,r15), r5 ! fn
+ jsr @r5
+ mov.l @(OFF_R4,r15), r4 ! arg
+ bra syscall_exit
+ nop
+
.align 2
1: .long schedule_tail
@@ -328,31 +316,36 @@ ret_from_fork:
* system calls and debug traps through their respective jump tables.
*/
ENTRY(system_call)
+ setup_frame_reg
#if !defined(CONFIG_CPU_SH2)
mov.l 1f, r9
mov.l @r9, r8 ! Read from TRA (Trap Address) Register
#endif
+
+ mov #OFF_TRA, r10
+ add r15, r10
+ mov.l r8, @r10 ! set TRA value to tra
+
/*
* Check the trap type
*/
mov #((0x20 << 2) - 1), r9
cmp/hi r9, r8
bt/s debug_trap ! it's a debug trap..
- mov #OFF_TRA, r9
- add r15, r9
- mov.l r8, @r9 ! set TRA value to tra
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 5f, r10
- jsr @r10
nop
-#endif
+
+ TRACE_IRQS_ON
sti
!
get_current_thread_info r8, r10
mov.l @(TI_FLAGS,r8), r8
- mov #(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT), r10
+ mov #(_TIF_WORK_SYSCALL_MASK & 0xff), r10
+ mov #(_TIF_WORK_SYSCALL_MASK >> 8), r9
tst r10, r8
+ shll8 r9
+ bf syscall_trace_entry
+ tst r9, r8
bf syscall_trace_entry
!
mov.l 2f, r8 ! Number of syscalls
@@ -364,22 +357,29 @@ syscall_call:
mov.l 3f, r8 ! Load the address of sys_call_table
add r8, r3
mov.l @r3, r8
+ mov.l @(OFF_R2,r15), r2
+ mov.l @(OFF_R1,r15), r1
+ mov.l @(OFF_R0,r15), r0
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ mov.l r0, @-r15
jsr @r8 ! jump to specific syscall handler
nop
+ add #12, r15
mov.l @(OFF_R0,r15), r12 ! save r0
mov.l r0, @(OFF_R0,r15) ! save the return value
!
syscall_exit:
cli
-#ifdef CONFIG_TRACE_IRQFLAGS
- mov.l 6f, r0
- jsr @r0
- nop
-#endif
+ TRACE_IRQS_OFF
!
get_current_thread_info r8, r0
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
- tst #_TIF_ALLWORK_MASK, r0
+ tst #(_TIF_ALLWORK_MASK & 0xff), r0
+ mov #(_TIF_ALLWORK_MASK >> 8), r1
+ bf syscall_exit_work
+ shlr8 r0
+ tst r0, r1
bf syscall_exit_work
bra __restore_all
nop
@@ -389,8 +389,5 @@ syscall_exit:
#endif
2: .long NR_syscalls
3: .long sys_call_table
-4: .long do_syscall_trace
-#ifdef CONFIG_TRACE_IRQFLAGS
-5: .long trace_hardirqs_on
-6: .long trace_hardirqs_off
-#endif
+7: .long do_syscall_trace_enter
+8: .long do_syscall_trace_leave
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
new file mode 100644
index 00000000000..3c74f53db6d
--- /dev/null
+++ b/arch/sh/kernel/ftrace.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org>
+ * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org>
+ *
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ * Thanks goes to Ingo Molnar, for suggesting the idea.
+ * Mathieu Desnoyers, for suggesting postponing the modifications.
+ * Arjan van de Ven, for keeping me straight, and explaining to me
+ * the dangers of modifying code on the run.
+ */
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <asm/ftrace.h>
+#include <asm/cacheflush.h>
+#include <asm/unistd.h>
+#include <trace/syscall.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
+
+static unsigned char ftrace_nop[4];
+/*
+ * If we're trying to nop out a call to a function, we instead
+ * place a call to the address after the memory table.
+ *
+ * 8c011060 <a>:
+ * 8c011060: 02 d1 mov.l 8c01106c <a+0xc>,r1
+ * 8c011062: 22 4f sts.l pr,@-r15
+ * 8c011064: 02 c7 mova 8c011070 <a+0x10>,r0
+ * 8c011066: 2b 41 jmp @r1
+ * 8c011068: 2a 40 lds r0,pr
+ * 8c01106a: 09 00 nop
+ * 8c01106c: 68 24 .word 0x2468 <--- ip
+ * 8c01106e: 1d 8c .word 0x8c1d
+ * 8c011070: 26 4f lds.l @r15+,pr <--- ip + MCOUNT_INSN_SIZE
+ *
+ * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch
+ * past the _mcount call and continue executing code like normal.
+ */
+static unsigned char *ftrace_nop_replace(unsigned long ip)
+{
+ __raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop);
+ return ftrace_nop;
+}
+
+static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+ /* Place the address in the memory table. */
+ __raw_writel(addr, ftrace_replaced_code);
+
+ /*
+ * No locking needed, this must be called via kstop_machine
+ * which in essence is like running on a uniprocessor machine.
+ */
+ return ftrace_replaced_code;
+}
+
+/*
+ * Modifying code must take extra care. On an SMP machine, if
+ * the code being modified is also being executed on another CPU
+ * that CPU will have undefined results and possibly take a GPF.
+ * We use kstop_machine to stop other CPUS from exectuing code.
+ * But this does not stop NMIs from happening. We still need
+ * to protect against that. We separate out the modification of
+ * the code to take care of this.
+ *
+ * Two buffers are added: An IP buffer and a "code" buffer.
+ *
+ * 1) Put the instruction pointer into the IP buffer
+ * and the new code into the "code" buffer.
+ * 2) Wait for any running NMIs to finish and set a flag that says
+ * we are modifying code, it is done in an atomic operation.
+ * 3) Write the code
+ * 4) clear the flag.
+ * 5) Wait for any running NMIs to finish.
+ *
+ * If an NMI is executed, the first thing it does is to call
+ * "ftrace_nmi_enter". This will check if the flag is set to write
+ * and if it is, it will write what is in the IP and "code" buffers.
+ *
+ * The trick is, it does not matter if everyone is writing the same
+ * content to the code location. Also, if a CPU is executing code
+ * it is OK to write to that code location if the contents being written
+ * are the same as what exists.
+ */
+#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
+static atomic_t nmi_running = ATOMIC_INIT(0);
+static int mod_code_status; /* holds return value of text write */
+static void *mod_code_ip; /* holds the IP to write to */
+static void *mod_code_newcode; /* holds the text to write to the IP */
+
+static unsigned nmi_wait_count;
+static atomic_t nmi_update_count = ATOMIC_INIT(0);
+
+int ftrace_arch_read_dyn_info(char *buf, int size)
+{
+ int r;
+
+ r = snprintf(buf, size, "%u %u",
+ nmi_wait_count,
+ atomic_read(&nmi_update_count));
+ return r;
+}
+
+static void clear_mod_flag(void)
+{
+ int old = atomic_read(&nmi_running);
+
+ for (;;) {
+ int new = old & ~MOD_CODE_WRITE_FLAG;
+
+ if (old == new)
+ break;
+
+ old = atomic_cmpxchg(&nmi_running, old, new);
+ }
+}
+
+static void ftrace_mod_code(void)
+{
+ /*
+ * Yes, more than one CPU process can be writing to mod_code_status.
+ * (and the code itself)
+ * But if one were to fail, then they all should, and if one were
+ * to succeed, then they all should.
+ */
+ mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
+ MCOUNT_INSN_SIZE);
+
+ /* if we fail, then kill any new writers */
+ if (mod_code_status)
+ clear_mod_flag();
+}
+
+void ftrace_nmi_enter(void)
+{
+ if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
+ smp_rmb();
+ ftrace_mod_code();
+ atomic_inc(&nmi_update_count);
+ }
+ /* Must have previous changes seen before executions */
+ smp_mb();
+}
+
+void ftrace_nmi_exit(void)
+{
+ /* Finish all executions before clearing nmi_running */
+ smp_mb();
+ atomic_dec(&nmi_running);
+}
+
+static void wait_for_nmi_and_set_mod_flag(void)
+{
+ if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
+ return;
+
+ do {
+ cpu_relax();
+ } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
+
+ nmi_wait_count++;
+}
+
+static void wait_for_nmi(void)
+{
+ if (!atomic_read(&nmi_running))
+ return;
+
+ do {
+ cpu_relax();
+ } while (atomic_read(&nmi_running));
+
+ nmi_wait_count++;
+}
+
+static int
+do_ftrace_mod_code(unsigned long ip, void *new_code)
+{
+ mod_code_ip = (void *)ip;
+ mod_code_newcode = new_code;
+
+ /* The buffers need to be visible before we let NMIs write them */
+ smp_mb();
+
+ wait_for_nmi_and_set_mod_flag();
+
+ /* Make sure all running NMIs have finished before we write the code */
+ smp_mb();
+
+ ftrace_mod_code();
+
+ /* Make sure the write happens before clearing the bit */
+ smp_mb();
+
+ clear_mod_flag();
+ wait_for_nmi();
+
+ return mod_code_status;
+}
+
+static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+ unsigned char *new_code)
+{
+ unsigned char replaced[MCOUNT_INSN_SIZE];
+
+ /*
+ * Note: Due to modules and __init, code can
+ * disappear and change, we need to protect against faulting
+ * as well as code changing. We do this by using the
+ * probe_kernel_* functions.
+ *
+ * No real locking needed, this code is run through
+ * kstop_machine, or before SMP starts.
+ */
+
+ /* read the text we want to modify */
+ if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ /* Make sure it is what we expect it to be */
+ if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+ return -EINVAL;
+
+ /* replace the text with the new text */
+ if (do_ftrace_mod_code(ip, new_code))
+ return -EPERM;
+
+ flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
+
+ return 0;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET;
+ unsigned char old[MCOUNT_INSN_SIZE], *new;
+
+ memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+
+ return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned char *new, *old;
+ unsigned long ip = rec->ip;
+
+ old = ftrace_call_replace(ip, addr);
+ new = ftrace_nop_replace(ip);
+
+ return ftrace_modify_code(rec->ip, old, new);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned char *new, *old;
+ unsigned long ip = rec->ip;
+
+ old = ftrace_nop_replace(ip);
+ new = ftrace_call_replace(ip, addr);
+
+ return ftrace_modify_code(rec->ip, old, new);
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+
+static int ftrace_mod(unsigned long ip, unsigned long old_addr,
+ unsigned long new_addr)
+{
+ unsigned char code[MCOUNT_INSN_SIZE];
+
+ if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ if (old_addr != __raw_readl((unsigned long *)code))
+ return -EINVAL;
+
+ __raw_writel(new_addr, ip);
+ return 0;
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned long ip, old_addr, new_addr;
+
+ ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
+ old_addr = (unsigned long)(&skip_trace);
+ new_addr = (unsigned long)(&ftrace_graph_caller);
+
+ return ftrace_mod(ip, old_addr, new_addr);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned long ip, old_addr, new_addr;
+
+ ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
+ old_addr = (unsigned long)(&ftrace_graph_caller);
+ new_addr = (unsigned long)(&skip_trace);
+
+ return ftrace_mod(ip, old_addr, new_addr);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in the current thread info.
+ *
+ * This is the main routine for the function graph tracer. The function
+ * graph tracer essentially works like this:
+ *
+ * parent is the stack address containing self_addr's return address.
+ * We pull the real return address out of parent and store it in
+ * current's ret_stack. Then, we replace the return address on the stack
+ * with the address of return_to_handler. self_addr is the function that
+ * called mcount.
+ *
+ * When self_addr returns, it will jump to return_to_handler which calls
+ * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
+ * return address off of current's ret_stack and jump to it.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ int faulted, err;
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Protect against fault, even if it shouldn't
+ * happen. This tool is too much intrusive to
+ * ignore such a protection.
+ */
+ __asm__ __volatile__(
+ "1: \n\t"
+ "mov.l @%2, %0 \n\t"
+ "2: \n\t"
+ "mov.l %3, @%2 \n\t"
+ "mov #0, %1 \n\t"
+ "3: \n\t"
+ ".section .fixup, \"ax\" \n\t"
+ "4: \n\t"
+ "mov.l 5f, %0 \n\t"
+ "jmp @%0 \n\t"
+ " mov #1, %1 \n\t"
+ ".balign 4 \n\t"
+ "5: .long 3b \n\t"
+ ".previous \n\t"
+ ".section __ex_table,\"a\" \n\t"
+ ".long 1b, 4b \n\t"
+ ".long 2b, 4b \n\t"
+ ".previous \n\t"
+ : "=&r" (old), "=r" (faulted)
+ : "r" (parent), "r" (return_hooker)
+ );
+
+ if (unlikely(faulted)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
+ if (err == -EBUSY) {
+ __raw_writel(old, parent);
+ return;
+ }
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ __raw_writel(old, parent);
+ }
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
index d67d7ed09f2..7db248936b6 100644
--- a/arch/sh/kernel/head_32.S
+++ b/arch/sh/kernel/head_32.S
@@ -3,6 +3,7 @@
* arch/sh/kernel/head.S
*
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
+ * Copyright (C) 2010 Matt Fleming
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -10,8 +11,11 @@
*
* Head.S contains the SH exception handlers and startup code.
*/
+#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
+#include <asm/mmu.h>
+#include <cpu/mmu_context.h>
#ifdef CONFIG_CPU_SH4A
#define SYNCO() synco
@@ -30,8 +34,8 @@ ENTRY(empty_zero_page)
.long 0 /* RAMDISK_FLAGS */
.long 0x0200 /* ORIG_ROOT_DEV */
.long 1 /* LOADER_TYPE */
- .long 0x00360000 /* INITRD_START */
- .long 0x000a0000 /* INITRD_SIZE */
+ .long 0x00000000 /* INITRD_START */
+ .long 0x00000000 /* INITRD_SIZE */
#ifdef CONFIG_32BIT
.long 0x53453f00 + 32 /* "SE?" = 32 bit */
#else
@@ -40,7 +44,7 @@ ENTRY(empty_zero_page)
1:
.skip PAGE_SIZE - empty_zero_page - 1b
- .section .text.head, "ax"
+ __HEAD
/*
* Condition at the entry of _stext:
@@ -80,8 +84,219 @@ ENTRY(_stext)
mov.l 7f, r0
ldc r0, r7_bank ! ... and initial thread_info
#endif
-
- ! Clear BSS area
+
+#ifdef CONFIG_PMB
+/*
+ * Reconfigure the initial PMB mappings setup by the hardware.
+ *
+ * When we boot in 32-bit MMU mode there are 2 PMB entries already
+ * setup for us.
+ *
+ * Entry VPN PPN V SZ C UB WT
+ * ---------------------------------------------------------------
+ * 0 0x80000000 0x00000000 1 512MB 1 0 1
+ * 1 0xA0000000 0x00000000 1 512MB 0 0 0
+ *
+ * But we reprogram them here because we want complete control over
+ * our address space and the initial mappings may not map PAGE_OFFSET
+ * to __MEMORY_START (or even map all of our RAM).
+ *
+ * Once we've setup cached and uncached mappings we clear the rest of the
+ * PMB entries. This clearing also deals with the fact that PMB entries
+ * can persist across reboots. The PMB could have been left in any state
+ * when the reboot occurred, so to be safe we clear all entries and start
+ * with with a clean slate.
+ *
+ * The uncached mapping is constructed using the smallest possible
+ * mapping with a single unbufferable page. Only the kernel text needs to
+ * be covered via the uncached mapping so that certain functions can be
+ * run uncached.
+ *
+ * Drivers and the like that have previously abused the 1:1 identity
+ * mapping are unsupported in 32-bit mode and must specify their caching
+ * preference when page tables are constructed.
+ *
+ * This frees up the P2 space for more nefarious purposes.
+ *
+ * Register utilization is as follows:
+ *
+ * r0 = PMB_DATA data field
+ * r1 = PMB_DATA address field
+ * r2 = PMB_ADDR data field
+ * r3 = PMB_ADDR address field
+ * r4 = PMB_E_SHIFT
+ * r5 = remaining amount of RAM to map
+ * r6 = PMB mapping size we're trying to use
+ * r7 = cached_to_uncached
+ * r8 = scratch register
+ * r9 = scratch register
+ * r10 = number of PMB entries we've setup
+ * r11 = scratch register
+ */
+
+ mov.l .LMMUCR, r1 /* Flush the TLB */
+ mov.l @r1, r0
+ or #MMUCR_TI, r0
+ mov.l r0, @r1
+
+ mov.l .LMEMORY_SIZE, r5
+
+ mov #PMB_E_SHIFT, r0
+ mov #0x1, r4
+ shld r0, r4
+
+ mov.l .LFIRST_DATA_ENTRY, r0
+ mov.l .LPMB_DATA, r1
+ mov.l .LFIRST_ADDR_ENTRY, r2
+ mov.l .LPMB_ADDR, r3
+
+ /*
+ * First we need to walk the PMB and figure out if there are any
+ * existing mappings that match the initial mappings VPN/PPN.
+ * If these have already been established by the bootloader, we
+ * don't bother setting up new entries here, and let the late PMB
+ * initialization take care of things instead.
+ *
+ * Note that we may need to coalesce and merge entries in order
+ * to reclaim more available PMB slots, which is much more than
+ * we want to do at this early stage.
+ */
+ mov #0, r10
+ mov #NR_PMB_ENTRIES, r9
+
+ mov r1, r7 /* temporary PMB_DATA iter */
+
+.Lvalidate_existing_mappings:
+
+ mov.l .LPMB_DATA_MASK, r11
+ mov.l @r7, r8
+ and r11, r8
+ cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
+ bt .Lpmb_done
+
+ add #1, r10 /* Increment the loop counter */
+ cmp/eq r9, r10
+ bf/s .Lvalidate_existing_mappings
+ add r4, r7 /* Increment to the next PMB_DATA entry */
+
+ /*
+ * If we've fallen through, continue with setting up the initial
+ * mappings.
+ */
+
+ mov r5, r7 /* cached_to_uncached */
+ mov #0, r10
+
+#ifdef CONFIG_UNCACHED_MAPPING
+ /*
+ * Uncached mapping
+ */
+ mov #(PMB_SZ_16M >> 2), r9
+ shll2 r9
+
+ mov #(PMB_UB >> 8), r8
+ shll8 r8
+
+ or r0, r8
+ or r9, r8
+ mov.l r8, @r1
+ mov r2, r8
+ add r7, r8
+ mov.l r8, @r3
+
+ add r4, r1
+ add r4, r3
+ add #1, r10
+#endif
+
+/*
+ * Iterate over all of the available sizes from largest to
+ * smallest for constructing the cached mapping.
+ */
+#define __PMB_ITER_BY_SIZE(size) \
+.L##size: \
+ mov #(size >> 4), r6; \
+ shll16 r6; \
+ shll8 r6; \
+ \
+ cmp/hi r5, r6; \
+ bt 9999f; \
+ \
+ mov #(PMB_SZ_##size##M >> 2), r9; \
+ shll2 r9; \
+ \
+ /* \
+ * Cached mapping \
+ */ \
+ mov #PMB_C, r8; \
+ or r0, r8; \
+ or r9, r8; \
+ mov.l r8, @r1; \
+ mov.l r2, @r3; \
+ \
+ /* Increment to the next PMB_DATA entry */ \
+ add r4, r1; \
+ /* Increment to the next PMB_ADDR entry */ \
+ add r4, r3; \
+ /* Increment number of PMB entries */ \
+ add #1, r10; \
+ \
+ sub r6, r5; \
+ add r6, r0; \
+ add r6, r2; \
+ \
+ bra .L##size; \
+9999:
+
+ __PMB_ITER_BY_SIZE(512)
+ __PMB_ITER_BY_SIZE(128)
+ __PMB_ITER_BY_SIZE(64)
+ __PMB_ITER_BY_SIZE(16)
+
+#ifdef CONFIG_UNCACHED_MAPPING
+ /*
+ * Now that we can access it, update cached_to_uncached and
+ * uncached_size.
+ */
+ mov.l .Lcached_to_uncached, r0
+ mov.l r7, @r0
+
+ mov.l .Luncached_size, r0
+ mov #1, r7
+ shll16 r7
+ shll8 r7
+ mov.l r7, @r0
+#endif
+
+ /*
+ * Clear the remaining PMB entries.
+ *
+ * r3 = entry to begin clearing from
+ * r10 = number of entries we've setup so far
+ */
+ mov #0, r1
+ mov #NR_PMB_ENTRIES, r0
+
+.Lagain:
+ mov.l r1, @r3 /* Clear PMB_ADDR entry */
+ add #1, r10 /* Increment the loop counter */
+ cmp/eq r0, r10
+ bf/s .Lagain
+ add r4, r3 /* Increment to the next PMB_ADDR entry */
+
+ mov.l 6f, r0
+ icbi @r0
+
+.Lpmb_done:
+#endif /* CONFIG_PMB */
+
+#ifndef CONFIG_SH_NO_BSS_INIT
+ /*
+ * Don't clear BSS if running on slow platforms such as an RTL simulation,
+ * remote memory via SHdebug link, etc. For these the memory can be guaranteed
+ * to be all zero on boot anyway.
+ */
+ ! Clear BSS area
#ifdef CONFIG_SMP
mov.l 3f, r0
cmp/eq #0, r0 ! skip clear if set to zero
@@ -97,6 +312,8 @@ ENTRY(_stext)
mov.l r0,@-r2
10:
+#endif
+
! Additional CPU initialization
mov.l 6f, r0
jsr @r0
@@ -113,12 +330,26 @@ ENTRY(_stext)
#if defined(CONFIG_CPU_SH2)
1: .long 0x000000F0 ! IMASK=0xF
#else
-1: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
+1: .long 0x500080F0 ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF
#endif
ENTRY(stack_start)
2: .long init_thread_union+THREAD_SIZE
3: .long __bss_start
4: .long _end
5: .long start_kernel
-6: .long sh_cpu_init
+6: .long cpu_init
7: .long init_thread_union
+
+#ifdef CONFIG_PMB
+.LPMB_ADDR: .long PMB_ADDR
+.LPMB_DATA: .long PMB_DATA
+.LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V
+.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
+.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
+.LMMUCR: .long MMUCR
+.LMEMORY_SIZE: .long __MEMORY_SIZE
+#ifdef CONFIG_UNCACHED_MAPPING
+.Lcached_to_uncached: .long cached_to_uncached
+.Luncached_size: .long uncached_size
+#endif
+#endif
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
index f42d4c0feb7..defd851abef 100644
--- a/arch/sh/kernel/head_64.S
+++ b/arch/sh/kernel/head_64.S
@@ -8,11 +8,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+
+#include <linux/init.h>
+
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/tlb.h>
-#include <asm/cpu/registers.h>
-#include <asm/cpu/mmu_context.h>
+#include <cpu/registers.h>
+#include <cpu/mmu_context.h>
#include <asm/thread_info.h>
/*
@@ -110,7 +113,7 @@ empty_bad_pte_table:
fpu_in_use: .quad 0
- .section .text.head, "ax"
+ __HEAD
.balign L1_CACHE_BYTES
/*
* Condition at the entry of __stext:
@@ -217,7 +220,6 @@ clear_DTLB:
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
-#ifdef CONFIG_EARLY_PRINTK
/*
* Setup a DTLB translation for SCIF phys.
*/
@@ -228,7 +230,6 @@ clear_DTLB:
movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
shori 0x0003, r22
putcfg r21, 0, r22 /* PTEH last */
-#endif
/*
* Set cache behaviours.
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
new file mode 100644
index 00000000000..2197fc58418
--- /dev/null
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -0,0 +1,421 @@
+/*
+ * arch/sh/kernel/hw_breakpoint.c
+ *
+ * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
+ *
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/percpu.h>
+#include <linux/kallsyms.h>
+#include <linux/notifier.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+
+/*
+ * Stores the breakpoints currently in use on each breakpoint address
+ * register for each cpus
+ */
+static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
+
+/*
+ * A dummy placeholder for early accesses until the CPUs get a chance to
+ * register their UBCs later in the boot process.
+ */
+static struct sh_ubc ubc_dummy = { .num_events = 0 };
+
+static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
+
+/*
+ * Install a perf counter breakpoint.
+ *
+ * We seek a free UBC channel and use it for this breakpoint.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ int i;
+
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
+
+ if (!*slot) {
+ *slot = bp;
+ break;
+ }
+ }
+
+ if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
+ return -EBUSY;
+
+ clk_enable(sh_ubc->clk);
+ sh_ubc->enable(info, i);
+
+ return 0;
+}
+
+/*
+ * Uninstall the breakpoint contained in the given counter.
+ *
+ * First we search the debug address register it uses and then we disable
+ * it.
+ *
+ * Atomic: we hold the counter->ctx->lock and we only handle variables
+ * and registers local to this cpu.
+ */
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ int i;
+
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
+
+ if (*slot == bp) {
+ *slot = NULL;
+ break;
+ }
+ }
+
+ if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
+ return;
+
+ sh_ubc->disable(info, i);
+ clk_disable(sh_ubc->clk);
+}
+
+static int get_hbp_len(u16 hbp_len)
+{
+ unsigned int len_in_bytes = 0;
+
+ switch (hbp_len) {
+ case SH_BREAKPOINT_LEN_1:
+ len_in_bytes = 1;
+ break;
+ case SH_BREAKPOINT_LEN_2:
+ len_in_bytes = 2;
+ break;
+ case SH_BREAKPOINT_LEN_4:
+ len_in_bytes = 4;
+ break;
+ case SH_BREAKPOINT_LEN_8:
+ len_in_bytes = 8;
+ break;
+ }
+ return len_in_bytes;
+}
+
+/*
+ * Check for virtual address in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct perf_event *bp)
+{
+ unsigned int len;
+ unsigned long va;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ va = info->address;
+ len = get_hbp_len(info->len);
+
+ return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+int arch_bp_generic_fields(int sh_len, int sh_type,
+ int *gen_len, int *gen_type)
+{
+ /* Len */
+ switch (sh_len) {
+ case SH_BREAKPOINT_LEN_1:
+ *gen_len = HW_BREAKPOINT_LEN_1;
+ break;
+ case SH_BREAKPOINT_LEN_2:
+ *gen_len = HW_BREAKPOINT_LEN_2;
+ break;
+ case SH_BREAKPOINT_LEN_4:
+ *gen_len = HW_BREAKPOINT_LEN_4;
+ break;
+ case SH_BREAKPOINT_LEN_8:
+ *gen_len = HW_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Type */
+ switch (sh_type) {
+ case SH_BREAKPOINT_READ:
+ *gen_type = HW_BREAKPOINT_R;
+ case SH_BREAKPOINT_WRITE:
+ *gen_type = HW_BREAKPOINT_W;
+ break;
+ case SH_BREAKPOINT_RW:
+ *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int arch_build_bp_info(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ info->address = bp->attr.bp_addr;
+
+ /* Len */
+ switch (bp->attr.bp_len) {
+ case HW_BREAKPOINT_LEN_1:
+ info->len = SH_BREAKPOINT_LEN_1;
+ break;
+ case HW_BREAKPOINT_LEN_2:
+ info->len = SH_BREAKPOINT_LEN_2;
+ break;
+ case HW_BREAKPOINT_LEN_4:
+ info->len = SH_BREAKPOINT_LEN_4;
+ break;
+ case HW_BREAKPOINT_LEN_8:
+ info->len = SH_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Type */
+ switch (bp->attr.bp_type) {
+ case HW_BREAKPOINT_R:
+ info->type = SH_BREAKPOINT_READ;
+ break;
+ case HW_BREAKPOINT_W:
+ info->type = SH_BREAKPOINT_WRITE;
+ break;
+ case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
+ info->type = SH_BREAKPOINT_RW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Validate the arch-specific HW Breakpoint register settings
+ */
+int arch_validate_hwbkpt_settings(struct perf_event *bp)
+{
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ unsigned int align;
+ int ret;
+
+ ret = arch_build_bp_info(bp);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ switch (info->len) {
+ case SH_BREAKPOINT_LEN_1:
+ align = 0;
+ break;
+ case SH_BREAKPOINT_LEN_2:
+ align = 1;
+ break;
+ case SH_BREAKPOINT_LEN_4:
+ align = 3;
+ break;
+ case SH_BREAKPOINT_LEN_8:
+ align = 7;
+ break;
+ default:
+ return ret;
+ }
+
+ /*
+ * For kernel-addresses, either the address or symbol name can be
+ * specified.
+ */
+ if (info->name)
+ info->address = (unsigned long)kallsyms_lookup_name(info->name);
+
+ /*
+ * Check that the low-order bits of the address are appropriate
+ * for the alignment implied by len.
+ */
+ if (info->address & align)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Release the user breakpoints used by ptrace
+ */
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ int i;
+ struct thread_struct *t = &tsk->thread;
+
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ unregister_hw_breakpoint(t->ptrace_bps[i]);
+ t->ptrace_bps[i] = NULL;
+ }
+}
+
+static int __kprobes hw_breakpoint_handler(struct die_args *args)
+{
+ int cpu, i, rc = NOTIFY_STOP;
+ struct perf_event *bp;
+ unsigned int cmf, resume_mask;
+
+ /*
+ * Do an early return if none of the channels triggered.
+ */
+ cmf = sh_ubc->triggered_mask();
+ if (unlikely(!cmf))
+ return NOTIFY_DONE;
+
+ /*
+ * By default, resume all of the active channels.
+ */
+ resume_mask = sh_ubc->active_mask();
+
+ /*
+ * Disable breakpoints during exception handling.
+ */
+ sh_ubc->disable_all();
+
+ cpu = get_cpu();
+ for (i = 0; i < sh_ubc->num_events; i++) {
+ unsigned long event_mask = (1 << i);
+
+ if (likely(!(cmf & event_mask)))
+ continue;
+
+ /*
+ * The counter may be concurrently released but that can only
+ * occur from a call_rcu() path. We can then safely fetch
+ * the breakpoint, use its callback, touch its counter
+ * while we are in an rcu_read_lock() path.
+ */
+ rcu_read_lock();
+
+ bp = per_cpu(bp_per_reg[i], cpu);
+ if (bp)
+ rc = NOTIFY_DONE;
+
+ /*
+ * Reset the condition match flag to denote completion of
+ * exception handling.
+ */
+ sh_ubc->clear_triggered_mask(event_mask);
+
+ /*
+ * bp can be NULL due to concurrent perf counter
+ * removing.
+ */
+ if (!bp) {
+ rcu_read_unlock();
+ break;
+ }
+
+ /*
+ * Don't restore the channel if the breakpoint is from
+ * ptrace, as it always operates in one-shot mode.
+ */
+ if (bp->overflow_handler == ptrace_triggered)
+ resume_mask &= ~(1 << i);
+
+ perf_bp_event(bp, args->regs);
+
+ /* Deliver the signal to userspace */
+ if (!arch_check_bp_in_kernelspace(bp)) {
+ siginfo_t info;
+
+ info.si_signo = args->signr;
+ info.si_errno = notifier_to_errno(rc);
+ info.si_code = TRAP_HWBKPT;
+
+ force_sig_info(args->signr, &info, current);
+ }
+
+ rcu_read_unlock();
+ }
+
+ if (cmf == 0)
+ rc = NOTIFY_DONE;
+
+ sh_ubc->enable_all(resume_mask);
+
+ put_cpu();
+
+ return rc;
+}
+
+BUILD_TRAP_HANDLER(breakpoint)
+{
+ unsigned long ex = lookup_exception_vector();
+ TRAP_HANDLER_DECL;
+
+ notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
+}
+
+/*
+ * Handle debug exception notifications.
+ */
+int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+
+ if (val != DIE_BREAKPOINT)
+ return NOTIFY_DONE;
+
+ /*
+ * If the breakpoint hasn't been triggered by the UBC, it's
+ * probably from a debugger, so don't do anything more here.
+ *
+ * This also permits the UBC interface clock to remain off for
+ * non-UBC breakpoints, as we don't need to check the triggered
+ * or active channel masks.
+ */
+ if (args->trapnr != sh_ubc->trap_nr)
+ return NOTIFY_DONE;
+
+ return hw_breakpoint_handler(data);
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+ /* TODO */
+}
+
+int register_sh_ubc(struct sh_ubc *ubc)
+{
+ /* Bail if it's already assigned */
+ if (sh_ubc != &ubc_dummy)
+ return -EBUSY;
+ sh_ubc = ubc;
+
+ pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
+
+ WARN_ON(ubc->num_events > HBP_NUM);
+
+ return 0;
+}
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
new file mode 100644
index 00000000000..be616ee0cf8
--- /dev/null
+++ b/arch/sh/kernel/idle.c
@@ -0,0 +1,61 @@
+/*
+ * The idle loop for all SuperH platforms.
+ *
+ * Copyright (C) 2002 - 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/pm.h>
+#include <linux/tick.h>
+#include <linux/preempt.h>
+#include <linux/thread_info.h>
+#include <linux/irqflags.h>
+#include <linux/smp.h>
+#include <linux/atomic.h>
+#include <asm/pgalloc.h>
+#include <asm/smp.h>
+#include <asm/bl_bit.h>
+
+static void (*sh_idle)(void);
+
+void default_idle(void)
+{
+ set_bl_bit();
+ local_irq_enable();
+ /* Isn't this racy ? */
+ cpu_sleep();
+ clear_bl_bit();
+}
+
+void arch_cpu_idle_dead(void)
+{
+ play_dead();
+}
+
+void arch_cpu_idle(void)
+{
+ sh_idle();
+}
+
+void __init select_idle_routine(void)
+{
+ /*
+ * If a platform has set its own idle routine, leave it alone.
+ */
+ if (!sh_idle)
+ sh_idle = default_idle;
+}
+
+void stop_this_cpu(void *unused)
+{
+ local_irq_disable();
+ set_cpu_online(smp_processor_id(), false);
+
+ for (;;)
+ cpu_sleep();
+}
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
deleted file mode 100644
index f9bcc606127..00000000000
--- a/arch/sh/kernel/init_task.c
+++ /dev/null
@@ -1,36 +0,0 @@
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init_task.h>
-#include <linux/mqueue.h>
-#include <linux/fs.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct pt_regs fake_swapper_regs;
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
-
-/*
- * Initial thread structure.
- *
- * We need to make sure that this is 8192-byte aligned due to the
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
- { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 2b899122990..5c51b794ba2 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -1,49 +1,101 @@
/*
- * linux/arch/sh/kernel/io.c
+ * arch/sh/kernel/io.c - Machine independent I/O functions.
*
- * Copyright (C) 2000 Stuart Menefy
+ * Copyright (C) 2000 - 2009 Stuart Menefy
* Copyright (C) 2005 Paul Mundt
*
- * Provide real functions which expand to whatever the header file defined.
- * Also definitions of machine independent IO functions.
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <asm/machvec.h>
#include <asm/io.h>
/*
* Copy data from IO memory space to "real" memory space.
- * This needs to be optimized.
*/
-void memcpy_fromio(void *to, volatile void __iomem *from, unsigned long count)
+void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
{
- char *p = to;
- while (count) {
- count--;
- *p = readb((void __iomem *)from);
- p++;
- from++;
- }
+ /*
+ * Would it be worthwhile doing byte and long transfers first
+ * to try and get aligned?
+ */
+#ifdef CONFIG_CPU_SH4
+ if ((count >= 0x20) &&
+ (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) {
+ int tmp2, tmp3, tmp4, tmp5, tmp6;
+
+ __asm__ __volatile__(
+ "1: \n\t"
+ "mov.l @%7+, r0 \n\t"
+ "mov.l @%7+, %2 \n\t"
+ "movca.l r0, @%0 \n\t"
+ "mov.l @%7+, %3 \n\t"
+ "mov.l @%7+, %4 \n\t"
+ "mov.l @%7+, %5 \n\t"
+ "mov.l @%7+, %6 \n\t"
+ "mov.l @%7+, r7 \n\t"
+ "mov.l @%7+, r0 \n\t"
+ "mov.l %2, @(0x04,%0) \n\t"
+ "mov #0x20, %2 \n\t"
+ "mov.l %3, @(0x08,%0) \n\t"
+ "sub %2, %1 \n\t"
+ "mov.l %4, @(0x0c,%0) \n\t"
+ "cmp/hi %1, %2 ! T if 32 > count \n\t"
+ "mov.l %5, @(0x10,%0) \n\t"
+ "mov.l %6, @(0x14,%0) \n\t"
+ "mov.l r7, @(0x18,%0) \n\t"
+ "mov.l r0, @(0x1c,%0) \n\t"
+ "bf.s 1b \n\t"
+ " add #0x20, %0 \n\t"
+ : "=&r" (to), "=&r" (count),
+ "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
+ "=&r" (tmp5), "=&r" (tmp6), "=&r" (from)
+ : "7"(from), "0" (to), "1" (count)
+ : "r0", "r7", "t", "memory");
+ }
+#endif
+
+ if ((((u32)to | (u32)from) & 0x3) == 0) {
+ for (; count > 3; count -= 4) {
+ *(u32 *)to = *(volatile u32 *)from;
+ to += 4;
+ from += 4;
+ }
+ }
+
+ for (; count > 0; count--) {
+ *(u8 *)to = *(volatile u8 *)from;
+ to++;
+ from++;
+ }
+
+ mb();
}
EXPORT_SYMBOL(memcpy_fromio);
/*
* Copy data from "real" memory space to IO memory space.
- * This needs to be optimized.
*/
void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
{
- const char *p = from;
- while (count) {
- count--;
- writeb(*p, (void __iomem *)to);
- p++;
- to++;
- }
+ if ((((u32)to | (u32)from) & 0x3) == 0) {
+ for ( ; count > 3; count -= 4) {
+ *(volatile u32 *)to = *(u32 *)from;
+ to += 4;
+ from += 4;
+ }
+ }
+
+ for (; count > 0; count--) {
+ *(volatile u8 *)to = *(u8 *)from;
+ to++;
+ from++;
+ }
+
+ mb();
}
EXPORT_SYMBOL(memcpy_toio);
@@ -55,26 +107,8 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
{
while (count) {
count--;
- writeb(c, (void __iomem *)dst);
+ writeb(c, dst);
dst++;
}
}
EXPORT_SYMBOL(memset_io);
-
-void __iomem *ioport_map(unsigned long port, unsigned int nr)
-{
- void __iomem *ret;
-
- ret = __ioport_map_trapped(port, nr);
- if (ret)
- return ret;
-
- return __ioport_map(port, nr);
-}
-EXPORT_SYMBOL(ioport_map);
-
-void ioport_unmap(void __iomem *addr)
-{
- sh_mv.mv_ioport_unmap(addr);
-}
-EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c
deleted file mode 100644
index db769449f5a..00000000000
--- a/arch/sh/kernel/io_generic.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * arch/sh/kernel/io_generic.c
- *
- * Copyright (C) 2000 Niibe Yutaka
- * Copyright (C) 2005 - 2007 Paul Mundt
- *
- * Generic I/O routine. These can be used where a machine specific version
- * is not required.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/module.h>
-#include <linux/io.h>
-#include <asm/machvec.h>
-
-#ifdef CONFIG_CPU_SH3
-/* SH3 has a PCMCIA bug that needs a dummy read from area 6 for a
- * workaround. */
-/* I'm not sure SH7709 has this kind of bug */
-#define dummy_read() ctrl_inb(0xba000000)
-#else
-#define dummy_read()
-#endif
-
-unsigned long generic_io_base;
-
-static inline void delay(void)
-{
- ctrl_inw(0xa0000000);
-}
-
-u8 generic_inb(unsigned long port)
-{
- return ctrl_inb((unsigned long __force)__ioport_map(port, 1));
-}
-
-u16 generic_inw(unsigned long port)
-{
- return ctrl_inw((unsigned long __force)__ioport_map(port, 2));
-}
-
-u32 generic_inl(unsigned long port)
-{
- return ctrl_inl((unsigned long __force)__ioport_map(port, 4));
-}
-
-u8 generic_inb_p(unsigned long port)
-{
- unsigned long v = generic_inb(port);
-
- delay();
- return v;
-}
-
-u16 generic_inw_p(unsigned long port)
-{
- unsigned long v = generic_inw(port);
-
- delay();
- return v;
-}
-
-u32 generic_inl_p(unsigned long port)
-{
- unsigned long v = generic_inl(port);
-
- delay();
- return v;
-}
-
-/*
- * insb/w/l all read a series of bytes/words/longs from a fixed port
- * address. However as the port address doesn't change we only need to
- * convert the port address to real address once.
- */
-
-void generic_insb(unsigned long port, void *dst, unsigned long count)
-{
- volatile u8 *port_addr;
- u8 *buf = dst;
-
- port_addr = (volatile u8 *)__ioport_map(port, 1);
- while (count--)
- *buf++ = *port_addr;
-}
-
-void generic_insw(unsigned long port, void *dst, unsigned long count)
-{
- volatile u16 *port_addr;
- u16 *buf = dst;
-
- port_addr = (volatile u16 *)__ioport_map(port, 2);
- while (count--)
- *buf++ = *port_addr;
-
- dummy_read();
-}
-
-void generic_insl(unsigned long port, void *dst, unsigned long count)
-{
- volatile u32 *port_addr;
- u32 *buf = dst;
-
- port_addr = (volatile u32 *)__ioport_map(port, 4);
- while (count--)
- *buf++ = *port_addr;
-
- dummy_read();
-}
-
-void generic_outb(u8 b, unsigned long port)
-{
- ctrl_outb(b, (unsigned long __force)__ioport_map(port, 1));
-}
-
-void generic_outw(u16 b, unsigned long port)
-{
- ctrl_outw(b, (unsigned long __force)__ioport_map(port, 2));
-}
-
-void generic_outl(u32 b, unsigned long port)
-{
- ctrl_outl(b, (unsigned long __force)__ioport_map(port, 4));
-}
-
-void generic_outb_p(u8 b, unsigned long port)
-{
- generic_outb(b, port);
- delay();
-}
-
-void generic_outw_p(u16 b, unsigned long port)
-{
- generic_outw(b, port);
- delay();
-}
-
-void generic_outl_p(u32 b, unsigned long port)
-{
- generic_outl(b, port);
- delay();
-}
-
-/*
- * outsb/w/l all write a series of bytes/words/longs to a fixed port
- * address. However as the port address doesn't change we only need to
- * convert the port address to real address once.
- */
-void generic_outsb(unsigned long port, const void *src, unsigned long count)
-{
- volatile u8 *port_addr;
- const u8 *buf = src;
-
- port_addr = (volatile u8 __force *)__ioport_map(port, 1);
-
- while (count--)
- *port_addr = *buf++;
-}
-
-void generic_outsw(unsigned long port, const void *src, unsigned long count)
-{
- volatile u16 *port_addr;
- const u16 *buf = src;
-
- port_addr = (volatile u16 __force *)__ioport_map(port, 2);
-
- while (count--)
- *port_addr = *buf++;
-
- dummy_read();
-}
-
-void generic_outsl(unsigned long port, const void *src, unsigned long count)
-{
- volatile u32 *port_addr;
- const u32 *buf = src;
-
- port_addr = (volatile u32 __force *)__ioport_map(port, 4);
- while (count--)
- *port_addr = *buf++;
-
- dummy_read();
-}
-
-u8 generic_readb(void __iomem *addr)
-{
- return ctrl_inb((unsigned long __force)addr);
-}
-
-u16 generic_readw(void __iomem *addr)
-{
- return ctrl_inw((unsigned long __force)addr);
-}
-
-u32 generic_readl(void __iomem *addr)
-{
- return ctrl_inl((unsigned long __force)addr);
-}
-
-void generic_writeb(u8 b, void __iomem *addr)
-{
- ctrl_outb(b, (unsigned long __force)addr);
-}
-
-void generic_writew(u16 b, void __iomem *addr)
-{
- ctrl_outw(b, (unsigned long __force)addr);
-}
-
-void generic_writel(u32 b, void __iomem *addr)
-{
- ctrl_outl(b, (unsigned long __force)addr);
-}
-
-void __iomem *generic_ioport_map(unsigned long addr, unsigned int size)
-{
- return (void __iomem *)(addr + generic_io_base);
-}
-
-void generic_ioport_unmap(void __iomem *addr)
-{
-}
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
index 39cd7f3aec7..f8ce36286ce 100644
--- a/arch/sh/kernel/io_trapped.c
+++ b/arch/sh/kernel/io_trapped.c
@@ -14,7 +14,7 @@
#include <linux/bitops.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
-#include <asm/system.h>
+#include <linux/init.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -22,7 +22,7 @@
#define TRAPPED_PAGES_MAX 16
-#ifdef CONFIG_HAS_IOPORT
+#ifdef CONFIG_HAS_IOPORT_MAP
LIST_HEAD(trapped_io);
EXPORT_SYMBOL_GPL(trapped_io);
#endif
@@ -32,6 +32,15 @@ EXPORT_SYMBOL_GPL(trapped_mem);
#endif
static DEFINE_SPINLOCK(trapped_lock);
+static int trapped_io_disable __read_mostly;
+
+static int __init trapped_io_setup(char *__unused)
+{
+ trapped_io_disable = 1;
+ return 1;
+}
+__setup("noiotrap", trapped_io_setup);
+
int register_trapped_io(struct trapped_io *tiop)
{
struct resource *res;
@@ -39,13 +48,16 @@ int register_trapped_io(struct trapped_io *tiop)
struct page *pages[TRAPPED_PAGES_MAX];
int k, n;
+ if (unlikely(trapped_io_disable))
+ return 0;
+
/* structure must be page aligned */
if ((unsigned long)tiop & (PAGE_SIZE - 1))
goto bad;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
- len += roundup((res->end - res->start) + 1, PAGE_SIZE);
+ len += roundup(resource_size(res), PAGE_SIZE);
flags |= res->flags;
}
@@ -72,16 +84,20 @@ int register_trapped_io(struct trapped_io *tiop)
(unsigned long)(tiop->virt_base + len),
res->flags & IORESOURCE_IO ? "io" : "mmio",
(unsigned long)res->start);
- len += roundup((res->end - res->start) + 1, PAGE_SIZE);
+ len += roundup(resource_size(res), PAGE_SIZE);
}
tiop->magic = IO_TRAPPED_MAGIC;
INIT_LIST_HEAD(&tiop->list);
spin_lock_irq(&trapped_lock);
+#ifdef CONFIG_HAS_IOPORT_MAP
if (flags & IORESOURCE_IO)
list_add(&tiop->list, &trapped_io);
+#endif
+#ifdef CONFIG_HAS_IOMEM
if (flags & IORESOURCE_MEM)
list_add(&tiop->list, &trapped_mem);
+#endif
spin_unlock_irq(&trapped_lock);
return 0;
@@ -99,22 +115,23 @@ void __iomem *match_trapped_io_handler(struct list_head *list,
struct trapped_io *tiop;
struct resource *res;
int k, len;
+ unsigned long flags;
- spin_lock_irq(&trapped_lock);
+ spin_lock_irqsave(&trapped_lock, flags);
list_for_each_entry(tiop, list, list) {
voffs = 0;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
if (res->start == offset) {
- spin_unlock_irq(&trapped_lock);
+ spin_unlock_irqrestore(&trapped_lock, flags);
return tiop->virt_base + voffs;
}
- len = (res->end - res->start) + 1;
+ len = resource_size(res);
voffs += roundup(len, PAGE_SIZE);
}
}
- spin_unlock_irq(&trapped_lock);
+ spin_unlock_irqrestore(&trapped_lock, flags);
return NULL;
}
EXPORT_SYMBOL_GPL(match_trapped_io_handler);
@@ -155,7 +172,7 @@ static unsigned long lookup_address(struct trapped_io *tiop,
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
- len = roundup((res->end - res->start) + 1, PAGE_SIZE);
+ len = roundup(resource_size(res), PAGE_SIZE);
if (address < (vaddr + len))
return res->start + (address - vaddr);
vaddr += len;
@@ -170,31 +187,31 @@ static unsigned long long copy_word(unsigned long src_addr, int src_len,
switch (src_len) {
case 1:
- tmp = ctrl_inb(src_addr);
+ tmp = __raw_readb(src_addr);
break;
case 2:
- tmp = ctrl_inw(src_addr);
+ tmp = __raw_readw(src_addr);
break;
case 4:
- tmp = ctrl_inl(src_addr);
+ tmp = __raw_readl(src_addr);
break;
case 8:
- tmp = ctrl_inq(src_addr);
+ tmp = __raw_readq(src_addr);
break;
}
switch (dst_len) {
case 1:
- ctrl_outb(tmp, dst_addr);
+ __raw_writeb(tmp, dst_addr);
break;
case 2:
- ctrl_outw(tmp, dst_addr);
+ __raw_writew(tmp, dst_addr);
break;
case 4:
- ctrl_outl(tmp, dst_addr);
+ __raw_writel(tmp, dst_addr);
break;
case 8:
- ctrl_outq(tmp, dst_addr);
+ __raw_writeq(tmp, dst_addr);
break;
}
@@ -254,9 +271,11 @@ static struct mem_access trapped_io_access = {
int handle_trapped_io(struct pt_regs *regs, unsigned long address)
{
mm_segment_t oldfs;
- opcode_t instruction;
+ insn_size_t instruction;
int tmp;
+ if (trapped_io_disable)
+ return 0;
if (!lookup_tiop(address))
return 0;
@@ -270,7 +289,8 @@ int handle_trapped_io(struct pt_regs *regs, unsigned long address)
return 0;
}
- tmp = handle_unaligned_access(instruction, regs, &trapped_io_access);
+ tmp = handle_unaligned_access(instruction, regs,
+ &trapped_io_access, 1, address);
set_fs(oldfs);
return tmp == 0;
}
diff --git a/arch/sh/kernel/iomap.c b/arch/sh/kernel/iomap.c
new file mode 100644
index 00000000000..2e8e8b9b9ce
--- /dev/null
+++ b/arch/sh/kernel/iomap.c
@@ -0,0 +1,165 @@
+/*
+ * arch/sh/kernel/iomap.c
+ *
+ * Copyright (C) 2000 Niibe Yutaka
+ * Copyright (C) 2005 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/io.h>
+
+unsigned int ioread8(void __iomem *addr)
+{
+ return readb(addr);
+}
+EXPORT_SYMBOL(ioread8);
+
+unsigned int ioread16(void __iomem *addr)
+{
+ return readw(addr);
+}
+EXPORT_SYMBOL(ioread16);
+
+unsigned int ioread16be(void __iomem *addr)
+{
+ return be16_to_cpu(__raw_readw(addr));
+}
+EXPORT_SYMBOL(ioread16be);
+
+unsigned int ioread32(void __iomem *addr)
+{
+ return readl(addr);
+}
+EXPORT_SYMBOL(ioread32);
+
+unsigned int ioread32be(void __iomem *addr)
+{
+ return be32_to_cpu(__raw_readl(addr));
+}
+EXPORT_SYMBOL(ioread32be);
+
+void iowrite8(u8 val, void __iomem *addr)
+{
+ writeb(val, addr);
+}
+EXPORT_SYMBOL(iowrite8);
+
+void iowrite16(u16 val, void __iomem *addr)
+{
+ writew(val, addr);
+}
+EXPORT_SYMBOL(iowrite16);
+
+void iowrite16be(u16 val, void __iomem *addr)
+{
+ __raw_writew(cpu_to_be16(val), addr);
+}
+EXPORT_SYMBOL(iowrite16be);
+
+void iowrite32(u32 val, void __iomem *addr)
+{
+ writel(val, addr);
+}
+EXPORT_SYMBOL(iowrite32);
+
+void iowrite32be(u32 val, void __iomem *addr)
+{
+ __raw_writel(cpu_to_be32(val), addr);
+}
+EXPORT_SYMBOL(iowrite32be);
+
+/*
+ * These are the "repeat MMIO read/write" functions.
+ * Note the "__raw" accesses, since we don't want to
+ * convert to CPU byte order. We write in "IO byte
+ * order" (we also don't have IO barriers).
+ */
+static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
+{
+ while (--count >= 0) {
+ u8 data = __raw_readb(addr);
+ *dst = data;
+ dst++;
+ }
+}
+
+static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
+{
+ while (--count >= 0) {
+ u16 data = __raw_readw(addr);
+ *dst = data;
+ dst++;
+ }
+}
+
+static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
+{
+ while (--count >= 0) {
+ u32 data = __raw_readl(addr);
+ *dst = data;
+ dst++;
+ }
+}
+
+static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
+{
+ while (--count >= 0) {
+ __raw_writeb(*src, addr);
+ src++;
+ }
+}
+
+static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
+{
+ while (--count >= 0) {
+ __raw_writew(*src, addr);
+ src++;
+ }
+}
+
+static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
+{
+ while (--count >= 0) {
+ __raw_writel(*src, addr);
+ src++;
+ }
+}
+
+void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+ mmio_insb(addr, dst, count);
+}
+EXPORT_SYMBOL(ioread8_rep);
+
+void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+ mmio_insw(addr, dst, count);
+}
+EXPORT_SYMBOL(ioread16_rep);
+
+void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
+{
+ mmio_insl(addr, dst, count);
+}
+EXPORT_SYMBOL(ioread32_rep);
+
+void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ mmio_outsb(addr, src, count);
+}
+EXPORT_SYMBOL(iowrite8_rep);
+
+void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ mmio_outsw(addr, src, count);
+}
+EXPORT_SYMBOL(iowrite16_rep);
+
+void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ mmio_outsl(addr, src, count);
+}
+EXPORT_SYMBOL(iowrite32_rep);
diff --git a/arch/sh/kernel/ioport.c b/arch/sh/kernel/ioport.c
new file mode 100644
index 00000000000..cca14ba84a3
--- /dev/null
+++ b/arch/sh/kernel/ioport.c
@@ -0,0 +1,43 @@
+/*
+ * arch/sh/kernel/ioport.c
+ *
+ * Copyright (C) 2000 Niibe Yutaka
+ * Copyright (C) 2005 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/io.h>
+
+unsigned long sh_io_port_base __read_mostly = -1;
+EXPORT_SYMBOL(sh_io_port_base);
+
+void __iomem *__ioport_map(unsigned long addr, unsigned int size)
+{
+ if (sh_mv.mv_ioport_map)
+ return sh_mv.mv_ioport_map(addr, size);
+
+ return (void __iomem *)(addr + sh_io_port_base);
+}
+EXPORT_SYMBOL(__ioport_map);
+
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ void __iomem *ret;
+
+ ret = __ioport_map_trapped(port, nr);
+ if (ret)
+ return ret;
+
+ return __ioport_map(port, nr);
+}
+EXPORT_SYMBOL(ioport_map);
+
+void ioport_unmap(void __iomem *addr)
+{
+ if (sh_mv.mv_ioport_unmap)
+ sh_mv.mv_ioport_unmap(addr);
+}
+EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 9bf19b00696..65a1ecd77f9 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -11,11 +11,14 @@
#include <linux/module.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
+#include <linux/ftrace.h>
+#include <linux/delay.h>
+#include <linux/ratelimit.h>
#include <asm/processor.h>
#include <asm/machvec.h>
#include <asm/uaccess.h>
#include <asm/thread_info.h>
-#include <asm/cpu/mmu_context.h>
+#include <cpu/mmu_context.h>
atomic_t irq_err_count;
@@ -31,38 +34,19 @@ void ack_bad_irq(unsigned int irq)
}
#if defined(CONFIG_PROC_FS)
-int show_interrupts(struct seq_file *p, void *v)
+/*
+ * /proc/interrupts printing for arch specific interrupts
+ */
+int arch_show_interrupts(struct seq_file *p, int prec)
{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
- unsigned long flags;
-
- if (i == 0) {
- seq_puts(p, " ");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%d ",j);
- seq_putc(p, '\n');
- }
+ int j;
+
+ seq_printf(p, "%*s: ", prec, "NMI");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
+ seq_printf(p, " Non-maskable interrupts\n");
- if (i < sh_mv.mv_nr_irqs) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if (!action)
- goto unlock;
- seq_printf(p, "%3d: ",i);
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
- seq_printf(p, " %14s", irq_desc[i].chip->name);
- seq_printf(p, "-%-8s", irq_desc[i].name);
- seq_printf(p, " %s", action->name);
-
- for (action=action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
- seq_putc(p, '\n');
-unlock:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == sh_mv.mv_nr_irqs)
- seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
return 0;
}
@@ -79,36 +63,14 @@ union irq_ctx {
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
-#endif
-asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs)
+static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+
+static inline void handle_one_irq(unsigned int irq)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
-#ifdef CONFIG_IRQSTACKS
union irq_ctx *curctx, *irqctx;
-#endif
-
- irq_enter();
-
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
- /* Debugging check for stack overflow: is there less than 1KB free? */
- {
- long sp;
- __asm__ __volatile__ ("and r15, %0" :
- "=r" (sp) : "0" (THREAD_SIZE - 1));
-
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- printk("do_IRQ: stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
- dump_stack();
- }
- }
-#endif
-
- irq = irq_demux(evt2irq(irq));
-
-#ifdef CONFIG_IRQSTACKS
curctx = (union irq_ctx *)current_thread_info();
irqctx = hardirq_ctx[smp_processor_id()];
@@ -147,22 +109,9 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs)
"r5", "r6", "r7", "r8", "t", "pr"
);
} else
-#endif
generic_handle_irq(irq);
-
- irq_exit();
-
- set_irq_regs(old_regs);
- return 1;
}
-#ifdef CONFIG_IRQSTACKS
-static char softirq_stack[NR_CPUS * THREAD_SIZE]
- __attribute__((__section__(".bss.page_aligned")));
-
-static char hardirq_stack[NR_CPUS * THREAD_SIZE]
- __attribute__((__section__(".bss.page_aligned")));
-
/*
* allocate per-cpu stacks for hardirq and for softirq processing
*/
@@ -200,51 +149,59 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL;
}
-extern asmlinkage void __do_softirq(void);
-
-asmlinkage void do_softirq(void)
+void do_softirq_own_stack(void)
{
- unsigned long flags;
struct thread_info *curctx;
union irq_ctx *irqctx;
u32 *isp;
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
+ curctx = current_thread_info();
+ irqctx = softirq_ctx[smp_processor_id()];
+ irqctx->tinfo.task = curctx->task;
+ irqctx->tinfo.previous_sp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+ isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
+
+ __asm__ __volatile__ (
+ "mov r15, r9 \n"
+ "jsr @%0 \n"
+ /* switch to the softirq stack */
+ " mov %1, r15 \n"
+ /* restore the thread stack */
+ "mov r9, r15 \n"
+ : /* no outputs */
+ : "r" (__do_softirq), "r" (isp)
+ : "memory", "r0", "r1", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+ );
+}
+#else
+static inline void handle_one_irq(unsigned int irq)
+{
+ generic_handle_irq(irq);
+}
+#endif
- if (local_softirq_pending()) {
- curctx = current_thread_info();
- irqctx = softirq_ctx[smp_processor_id()];
- irqctx->tinfo.task = curctx->task;
- irqctx->tinfo.previous_sp = current_stack_pointer;
+asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
- /* build the stack frame on the softirq stack */
- isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
+ irq_enter();
- __asm__ __volatile__ (
- "mov r15, r9 \n"
- "jsr @%0 \n"
- /* switch to the softirq stack */
- " mov %1, r15 \n"
- /* restore the thread stack */
- "mov r9, r15 \n"
- : /* no outputs */
- : "r" (__do_softirq), "r" (isp)
- : "memory", "r0", "r1", "r2", "r3", "r4",
- "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
- );
+ irq = irq_demux(irq_lookup(irq));
- /*
- * Shouldnt happen, we returned above if in_interrupt():
- */
- WARN_ON_ONCE(softirq_count());
+ if (irq != NO_IRQ_IGNORE) {
+ handle_one_irq(irq);
+ irq_finish(irq);
}
- local_irq_restore(flags);
+ irq_exit();
+
+ set_irq_regs(old_regs);
+
+ return IRQ_HANDLED;
}
-#endif
void __init init_IRQ(void)
{
@@ -254,5 +211,35 @@ void __init init_IRQ(void)
if (sh_mv.mv_init_irq)
sh_mv.mv_init_irq();
+ intc_finalize();
+
irq_ctx_init(smp_processor_id());
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The CPU has been marked offline. Migrate IRQs off this CPU. If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
+{
+ unsigned int irq, cpu = smp_processor_id();
+
+ for_each_active_irq(irq) {
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ if (data->node == cpu) {
+ unsigned int newcpu = cpumask_any_and(data->affinity,
+ cpu_online_mask);
+ if (newcpu >= nr_cpu_ids) {
+ pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ irq, cpu);
+
+ cpumask_setall(data->affinity);
+ }
+ irq_set_affinity(irq, data->affinity);
+ }
+ }
+}
+#endif
diff --git a/arch/sh/kernel/irq_32.c b/arch/sh/kernel/irq_32.c
new file mode 100644
index 00000000000..e5a755be912
--- /dev/null
+++ b/arch/sh/kernel/irq_32.c
@@ -0,0 +1,57 @@
+/*
+ * SHcompact irqflags support
+ *
+ * Copyright (C) 2006 - 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/irqflags.h>
+#include <linux/module.h>
+
+void notrace arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long __dummy0, __dummy1;
+
+ if (flags == ARCH_IRQ_DISABLED) {
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "or #0xf0, %0\n\t"
+ "ldc %0, sr\n\t"
+ : "=&z" (__dummy0)
+ : /* no inputs */
+ : "memory"
+ );
+ } else {
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "and %1, %0\n\t"
+#ifdef CONFIG_CPU_HAS_SR_RB
+ "stc r6_bank, %1\n\t"
+ "or %1, %0\n\t"
+#endif
+ "ldc %0, sr\n\t"
+ : "=&r" (__dummy0), "=r" (__dummy1)
+ : "1" (~ARCH_IRQ_DISABLED)
+ : "memory"
+ );
+ }
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+unsigned long notrace arch_local_save_flags(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__ (
+ "stc sr, %0\n\t"
+ "and #0xf0, %0\n\t"
+ : "=&z" (flags)
+ : /* no inputs */
+ : "memory"
+ );
+
+ return flags;
+}
+EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c
new file mode 100644
index 00000000000..8fc05b997b6
--- /dev/null
+++ b/arch/sh/kernel/irq_64.c
@@ -0,0 +1,51 @@
+/*
+ * SHmedia irqflags support
+ *
+ * Copyright (C) 2006 - 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <cpu/registers.h>
+
+void notrace arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long long __dummy;
+
+ if (flags == ARCH_IRQ_DISABLED) {
+ __asm__ __volatile__ (
+ "getcon " __SR ", %0\n\t"
+ "or %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy)
+ : "r" (ARCH_IRQ_DISABLED)
+ );
+ } else {
+ __asm__ __volatile__ (
+ "getcon " __SR ", %0\n\t"
+ "and %0, %1, %0\n\t"
+ "putcon %0, " __SR "\n\t"
+ : "=&r" (__dummy)
+ : "r" (~ARCH_IRQ_DISABLED)
+ );
+ }
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+unsigned long notrace arch_local_save_flags(void)
+{
+ unsigned long flags;
+
+ __asm__ __volatile__ (
+ "getcon " __SR ", %0\n\t"
+ "and %0, %1, %0"
+ : "=&r" (flags)
+ : "r" (ARCH_IRQ_DISABLED)
+ );
+
+ return flags;
+}
+EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/kdebugfs.c b/arch/sh/kernel/kdebugfs.c
new file mode 100644
index 00000000000..e11c30bb100
--- /dev/null
+++ b/arch/sh/kernel/kdebugfs.c
@@ -0,0 +1,16 @@
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+
+struct dentry *arch_debugfs_dir;
+EXPORT_SYMBOL(arch_debugfs_dir);
+
+static int __init arch_kdebugfs_init(void)
+{
+ arch_debugfs_dir = debugfs_create_dir("sh", NULL);
+ if (!arch_debugfs_dir)
+ return -ENOMEM;
+
+ return 0;
+}
+arch_initcall(arch_kdebugfs_init);
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
new file mode 100644
index 00000000000..adad46e41a1
--- /dev/null
+++ b/arch/sh/kernel/kgdb.c
@@ -0,0 +1,390 @@
+/*
+ * SuperH KGDB support
+ *
+ * Copyright (C) 2008 - 2012 Paul Mundt
+ *
+ * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+
+/* Macros for single step instruction identification */
+#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
+#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
+#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
+ (((op) & 0x7f ) << 1))
+#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00)
+#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00)
+#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000)
+#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
+ (((op) & 0x7ff) << 1))
+#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023)
+#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8)
+#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000)
+#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
+ (((op) & 0x7ff) << 1))
+#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003)
+#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf)
+#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b)
+#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf)
+#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b)
+#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf)
+#define OPCODE_RTS(op) ((op) == 0xb)
+#define OPCODE_RTE(op) ((op) == 0x2b)
+
+#define SR_T_BIT_MASK 0x1
+#define STEP_OPCODE 0xc33d
+
+/* Calculate the new address for after a step */
+static short *get_step_address(struct pt_regs *linux_regs)
+{
+ insn_size_t op = __raw_readw(linux_regs->pc);
+ long addr;
+
+ /* BT */
+ if (OPCODE_BT(op)) {
+ if (linux_regs->sr & SR_T_BIT_MASK)
+ addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
+ else
+ addr = linux_regs->pc + 2;
+ }
+
+ /* BTS */
+ else if (OPCODE_BTS(op)) {
+ if (linux_regs->sr & SR_T_BIT_MASK)
+ addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
+ else
+ addr = linux_regs->pc + 4; /* Not in delay slot */
+ }
+
+ /* BF */
+ else if (OPCODE_BF(op)) {
+ if (!(linux_regs->sr & SR_T_BIT_MASK))
+ addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
+ else
+ addr = linux_regs->pc + 2;
+ }
+
+ /* BFS */
+ else if (OPCODE_BFS(op)) {
+ if (!(linux_regs->sr & SR_T_BIT_MASK))
+ addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
+ else
+ addr = linux_regs->pc + 4; /* Not in delay slot */
+ }
+
+ /* BRA */
+ else if (OPCODE_BRA(op))
+ addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
+
+ /* BRAF */
+ else if (OPCODE_BRAF(op))
+ addr = linux_regs->pc + 4
+ + linux_regs->regs[OPCODE_BRAF_REG(op)];
+
+ /* BSR */
+ else if (OPCODE_BSR(op))
+ addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
+
+ /* BSRF */
+ else if (OPCODE_BSRF(op))
+ addr = linux_regs->pc + 4
+ + linux_regs->regs[OPCODE_BSRF_REG(op)];
+
+ /* JMP */
+ else if (OPCODE_JMP(op))
+ addr = linux_regs->regs[OPCODE_JMP_REG(op)];
+
+ /* JSR */
+ else if (OPCODE_JSR(op))
+ addr = linux_regs->regs[OPCODE_JSR_REG(op)];
+
+ /* RTS */
+ else if (OPCODE_RTS(op))
+ addr = linux_regs->pr;
+
+ /* RTE */
+ else if (OPCODE_RTE(op))
+ addr = linux_regs->regs[15];
+
+ /* Other */
+ else
+ addr = linux_regs->pc + instruction_size(op);
+
+ flush_icache_range(addr, addr + instruction_size(op));
+ return (short *)addr;
+}
+
+/*
+ * Replace the instruction immediately after the current instruction
+ * (i.e. next in the expected flow of control) with a trap instruction,
+ * so that returning will cause only a single instruction to be executed.
+ * Note that this model is slightly broken for instructions with delay
+ * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
+ * instruction in the delay slot will be executed.
+ */
+
+static unsigned long stepped_address;
+static insn_size_t stepped_opcode;
+
+static void do_single_step(struct pt_regs *linux_regs)
+{
+ /* Determine where the target instruction will send us to */
+ unsigned short *addr = get_step_address(linux_regs);
+
+ stepped_address = (int)addr;
+
+ /* Replace it */
+ stepped_opcode = __raw_readw((long)addr);
+ *addr = STEP_OPCODE;
+
+ /* Flush and return */
+ flush_icache_range((long)addr, (long)addr +
+ instruction_size(stepped_opcode));
+}
+
+/* Undo a single step */
+static void undo_single_step(struct pt_regs *linux_regs)
+{
+ /* If we have stepped, put back the old instruction */
+ /* Use stepped_address in case we stopped elsewhere */
+ if (stepped_opcode != 0) {
+ __raw_writew(stepped_opcode, stepped_address);
+ flush_icache_range(stepped_address, stepped_address + 2);
+ }
+
+ stepped_opcode = 0;
+}
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
+ { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
+ { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
+ { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
+ { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
+ { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
+ { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
+ { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
+ { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
+ { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
+ { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
+ { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
+ { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
+ { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
+ { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
+ { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc) },
+ { "pr", GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) },
+ { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, sr) },
+ { "gbr", GDB_SIZEOF_REG, offsetof(struct pt_regs, gbr) },
+ { "mach", GDB_SIZEOF_REG, offsetof(struct pt_regs, mach) },
+ { "macl", GDB_SIZEOF_REG, offsetof(struct pt_regs, macl) },
+ { "vbr", GDB_SIZEOF_REG, -1 },
+};
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+
+ return 0;
+}
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].size != -1)
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+
+ switch (regno) {
+ case GDB_VBR:
+ __asm__ __volatile__ ("stc vbr, %0" : "=r" (mem));
+ break;
+ }
+
+ return dbg_reg_def[regno].name;
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ struct pt_regs *thread_regs = task_pt_regs(p);
+ int reg;
+
+ /* Initialize to zero */
+ for (reg = 0; reg < DBG_MAX_REG_NUM; reg++)
+ gdb_regs[reg] = 0;
+
+ /*
+ * Copy out GP regs 8 to 14.
+ *
+ * switch_to() relies on SR.RB toggling, so regs 0->7 are banked
+ * and need privileged instructions to get to. The r15 value we
+ * fetch from the thread info directly.
+ */
+ for (reg = GDB_R8; reg < GDB_R15; reg++)
+ gdb_regs[reg] = thread_regs->regs[reg];
+
+ gdb_regs[GDB_R15] = p->thread.sp;
+ gdb_regs[GDB_PC] = p->thread.pc;
+
+ /*
+ * Additional registers we have context for
+ */
+ gdb_regs[GDB_PR] = thread_regs->pr;
+ gdb_regs[GDB_GBR] = thread_regs->gbr;
+}
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ char *remcomInBuffer, char *remcomOutBuffer,
+ struct pt_regs *linux_regs)
+{
+ unsigned long addr;
+ char *ptr;
+
+ /* Undo any stepping we may have done */
+ undo_single_step(linux_regs);
+
+ switch (remcomInBuffer[0]) {
+ case 'c':
+ case 's':
+ /* try to read optional parameter, pc unchanged if no parm */
+ ptr = &remcomInBuffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ linux_regs->pc = addr;
+ case 'D':
+ case 'k':
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+ if (remcomInBuffer[0] == 's') {
+ do_single_step(linux_regs);
+ kgdb_single_step = 1;
+
+ atomic_set(&kgdb_cpu_doing_single_step,
+ raw_smp_processor_id());
+ }
+
+ return 0;
+ }
+
+ /* this means that we do not want to exit from the handler: */
+ return -1;
+}
+
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ if (exception == 60)
+ return instruction_pointer(regs) - 2;
+ return instruction_pointer(regs);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->pc = ip;
+}
+
+/*
+ * The primary entry points for the kgdb debug trap table entries.
+ */
+BUILD_TRAP_HANDLER(singlestep)
+{
+ unsigned long flags;
+ TRAP_HANDLER_DECL;
+
+ local_irq_save(flags);
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
+ local_irq_restore(flags);
+}
+
+static void kgdb_call_nmi_hook(void *ignored)
+{
+ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+ local_irq_enable();
+ smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+ local_irq_disable();
+}
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+ int ret;
+
+ switch (cmd) {
+ case DIE_BREAKPOINT:
+ /*
+ * This means a user thread is single stepping
+ * a system call which should be ignored
+ */
+ if (test_thread_flag(TIF_SINGLESTEP))
+ return NOTIFY_DONE;
+
+ ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
+ args->err, args->regs);
+ if (ret)
+ return NOTIFY_DONE;
+
+ break;
+ }
+
+ return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __kgdb_notify(ptr, cmd);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_notify,
+
+ /*
+ * Lowest-prio notifier priority, we want to be notified last:
+ */
+ .priority = -INT_MAX,
+};
+
+int kgdb_arch_init(void)
+{
+ return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: trapa #0x3c */
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ .gdb_bpt_instr = { 0x3c, 0xc3 },
+#else
+ .gdb_bpt_instr = { 0xc3, 0x3c },
+#endif
+};
diff --git a/arch/sh/kernel/kgdb_jmp.S b/arch/sh/kernel/kgdb_jmp.S
deleted file mode 100644
index 339bb1d7ff0..00000000000
--- a/arch/sh/kernel/kgdb_jmp.S
+++ /dev/null
@@ -1,33 +0,0 @@
-#include <linux/linkage.h>
-
-ENTRY(setjmp)
- add #(9*4), r4
- sts.l pr, @-r4
- mov.l r15, @-r4
- mov.l r14, @-r4
- mov.l r13, @-r4
- mov.l r12, @-r4
- mov.l r11, @-r4
- mov.l r10, @-r4
- mov.l r9, @-r4
- mov.l r8, @-r4
- rts
- mov #0, r0
-
-ENTRY(longjmp)
- mov.l @r4+, r8
- mov.l @r4+, r9
- mov.l @r4+, r10
- mov.l @r4+, r11
- mov.l @r4+, r12
- mov.l @r4+, r13
- mov.l @r4+, r14
- mov.l @r4+, r15
- lds.l @r4+, pr
- mov r5, r0
- tst r0, r0
- bf 1f
- mov #1, r0 ! in case val==0
-1: rts
- nop
-
diff --git a/arch/sh/kernel/kgdb_stub.c b/arch/sh/kernel/kgdb_stub.c
deleted file mode 100644
index d453c3a1c79..00000000000
--- a/arch/sh/kernel/kgdb_stub.c
+++ /dev/null
@@ -1,1061 +0,0 @@
-/*
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- * Contains extracts from code by Glenn Engel, Jim Kingdon,
- * David Grothe <dave@gcom.com>, Tigran Aivazian <tigran@sco.com>,
- * Amit S. Kale <akale@veritas.com>, William Gatliff <bgat@open-widgets.com>,
- * Ben Lee, Steve Chamberlain and Benoit Miller <fulg@iname.com>.
- *
- * This version by Henry Bell <henry.bell@st.com>
- * Minor modifications by Jeremy Siegel <jsiegel@mvista.com>
- *
- * Contains low-level support for remote debug using GDB.
- *
- * To enable debugger support, two things need to happen. A call to
- * set_debug_traps() is necessary in order to allow any breakpoints
- * or error conditions to be properly intercepted and reported to gdb.
- * A breakpoint also needs to be generated to begin communication. This
- * is most easily accomplished by a call to breakpoint() which does
- * a trapa if the initialisation phase has been successfully completed.
- *
- * In this case, set_debug_traps() is not used to "take over" exceptions;
- * other kernel code is modified instead to enter the kgdb functions here
- * when appropriate (see entry.S for breakpoint traps and NMI interrupts,
- * see traps.c for kernel error exceptions).
- *
- * The following gdb commands are supported:
- *
- * Command Function Return value
- *
- * g return the value of the CPU registers hex data or ENN
- * G set the value of the CPU registers OK or ENN
- *
- * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
- * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
- * XAA..AA,LLLL: Same, but data is binary (not hex) OK or ENN
- *
- * c Resume at current address SNN ( signal NN)
- * cAA..AA Continue at address AA..AA SNN
- * CNN; Resume at current address with signal SNN
- * CNN;AA..AA Resume at address AA..AA with signal SNN
- *
- * s Step one instruction SNN
- * sAA..AA Step one instruction from AA..AA SNN
- * SNN; Step one instruction with signal SNN
- * SNNAA..AA Step one instruction from AA..AA w/NN SNN
- *
- * k kill (Detach GDB)
- *
- * d Toggle debug flag
- * D Detach GDB
- *
- * Hct Set thread t for operations, OK or ENN
- * c = 'c' (step, cont), c = 'g' (other
- * operations)
- *
- * qC Query current thread ID QCpid
- * qfThreadInfo Get list of current threads (first) m<id>
- * qsThreadInfo " " " " " (subsequent)
- * qOffsets Get section offsets Text=x;Data=y;Bss=z
- *
- * TXX Find if thread XX is alive OK or ENN
- * ? What was the last sigval ? SNN (signal NN)
- * O Output to GDB console
- *
- * Remote communication protocol.
- *
- * A debug packet whose contents are <data> is encapsulated for
- * transmission in the form:
- *
- * $ <data> # CSUM1 CSUM2
- *
- * <data> must be ASCII alphanumeric and cannot include characters
- * '$' or '#'. If <data> starts with two characters followed by
- * ':', then the existing stubs interpret this as a sequence number.
- *
- * CSUM1 and CSUM2 are ascii hex representation of an 8-bit
- * checksum of <data>, the most significant nibble is sent first.
- * the hex digits 0-9,a-f are used.
- *
- * Receiver responds with:
- *
- * + - if CSUM is correct and ready for next packet
- * - - if CSUM is incorrect
- *
- * Responses can be run-length encoded to save space. A '*' means that
- * the next character is an ASCII encoding giving a repeat count which
- * stands for that many repetitions of the character preceding the '*'.
- * The encoding is n+29, yielding a printable character where n >=3
- * (which is where RLE starts to win). Don't use an n > 126.
- *
- * So "0* " means the same as "0000".
- */
-
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/module.h>
-#include <asm/system.h>
-#include <asm/cacheflush.h>
-#include <asm/current.h>
-#include <asm/signal.h>
-#include <asm/pgtable.h>
-#include <asm/ptrace.h>
-#include <asm/kgdb.h>
-#include <asm/io.h>
-
-/* Function pointers for linkage */
-kgdb_debug_hook_t *kgdb_debug_hook;
-kgdb_bus_error_hook_t *kgdb_bus_err_hook;
-
-int (*kgdb_getchar)(void);
-EXPORT_SYMBOL_GPL(kgdb_getchar);
-void (*kgdb_putchar)(int);
-EXPORT_SYMBOL_GPL(kgdb_putchar);
-
-static void put_debug_char(int c)
-{
- if (!kgdb_putchar)
- return;
- (*kgdb_putchar)(c);
-}
-static int get_debug_char(void)
-{
- if (!kgdb_getchar)
- return -1;
- return (*kgdb_getchar)();
-}
-
-/* Num chars in in/out bound buffers, register packets need NUMREGBYTES * 2 */
-#define BUFMAX 1024
-#define NUMREGBYTES (MAXREG*4)
-#define OUTBUFMAX (NUMREGBYTES*2+512)
-
-enum {
- R0 = 0, R1, R2, R3, R4, R5, R6, R7,
- R8, R9, R10, R11, R12, R13, R14, R15,
- PC, PR, GBR, VBR, MACH, MACL, SR,
- /* */
- MAXREG
-};
-
-static unsigned int registers[MAXREG];
-struct kgdb_regs trap_registers;
-
-char kgdb_in_gdb_mode;
-char in_nmi; /* Set during NMI to prevent reentry */
-int kgdb_nofault; /* Boolean to ignore bus errs (i.e. in GDB) */
-
-/* Default values for SCI (can override via kernel args in setup.c) */
-#ifndef CONFIG_KGDB_DEFPORT
-#define CONFIG_KGDB_DEFPORT 1
-#endif
-
-#ifndef CONFIG_KGDB_DEFBAUD
-#define CONFIG_KGDB_DEFBAUD 115200
-#endif
-
-#if defined(CONFIG_KGDB_DEFPARITY_E)
-#define CONFIG_KGDB_DEFPARITY 'E'
-#elif defined(CONFIG_KGDB_DEFPARITY_O)
-#define CONFIG_KGDB_DEFPARITY 'O'
-#else /* CONFIG_KGDB_DEFPARITY_N */
-#define CONFIG_KGDB_DEFPARITY 'N'
-#endif
-
-#ifdef CONFIG_KGDB_DEFBITS_7
-#define CONFIG_KGDB_DEFBITS '7'
-#else /* CONFIG_KGDB_DEFBITS_8 */
-#define CONFIG_KGDB_DEFBITS '8'
-#endif
-
-/* SCI/UART settings, used in kgdb_console_setup() */
-int kgdb_portnum = CONFIG_KGDB_DEFPORT;
-EXPORT_SYMBOL_GPL(kgdb_portnum);
-int kgdb_baud = CONFIG_KGDB_DEFBAUD;
-EXPORT_SYMBOL_GPL(kgdb_baud);
-char kgdb_parity = CONFIG_KGDB_DEFPARITY;
-EXPORT_SYMBOL_GPL(kgdb_parity);
-char kgdb_bits = CONFIG_KGDB_DEFBITS;
-EXPORT_SYMBOL_GPL(kgdb_bits);
-
-/* Jump buffer for setjmp/longjmp */
-static jmp_buf rem_com_env;
-
-/* TRA differs sh3/4 */
-#if defined(CONFIG_CPU_SH3)
-#define TRA 0xffffffd0
-#elif defined(CONFIG_CPU_SH4)
-#define TRA 0xff000020
-#endif
-
-/* Macros for single step instruction identification */
-#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
-#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
-#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
- (((op) & 0x7f ) << 1))
-#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00)
-#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00)
-#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000)
-#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
- (((op) & 0x7ff) << 1))
-#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023)
-#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8)
-#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000)
-#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
- (((op) & 0x7ff) << 1))
-#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003)
-#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf)
-#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b)
-#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf)
-#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b)
-#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf)
-#define OPCODE_RTS(op) ((op) == 0xb)
-#define OPCODE_RTE(op) ((op) == 0x2b)
-
-#define SR_T_BIT_MASK 0x1
-#define STEP_OPCODE 0xc320
-#define BIOS_CALL_TRAP 0x3f
-
-/* Exception codes as per SH-4 core manual */
-#define ADDRESS_ERROR_LOAD_VEC 7
-#define ADDRESS_ERROR_STORE_VEC 8
-#define TRAP_VEC 11
-#define INVALID_INSN_VEC 12
-#define INVALID_SLOT_VEC 13
-#define NMI_VEC 14
-#define USER_BREAK_VEC 15
-#define SERIAL_BREAK_VEC 58
-
-/* Misc static */
-static int stepped_address;
-static short stepped_opcode;
-static char in_buffer[BUFMAX];
-static char out_buffer[OUTBUFMAX];
-
-static void kgdb_to_gdb(const char *s);
-
-/* Convert ch to hex */
-static int hex(const char ch)
-{
- if ((ch >= 'a') && (ch <= 'f'))
- return (ch - 'a' + 10);
- if ((ch >= '0') && (ch <= '9'))
- return (ch - '0');
- if ((ch >= 'A') && (ch <= 'F'))
- return (ch - 'A' + 10);
- return (-1);
-}
-
-/* Convert the memory pointed to by mem into hex, placing result in buf.
- Returns a pointer to the last char put in buf (null) */
-static char *mem_to_hex(const char *mem, char *buf, const int count)
-{
- int i;
- int ch;
- unsigned short s_val;
- unsigned long l_val;
-
- /* Check for 16 or 32 */
- if (count == 2 && ((long) mem & 1) == 0) {
- s_val = *(unsigned short *) mem;
- mem = (char *) &s_val;
- } else if (count == 4 && ((long) mem & 3) == 0) {
- l_val = *(unsigned long *) mem;
- mem = (char *) &l_val;
- }
- for (i = 0; i < count; i++) {
- ch = *mem++;
- *buf++ = highhex(ch);
- *buf++ = lowhex(ch);
- }
- *buf = 0;
- return (buf);
-}
-
-/* Convert the hex array pointed to by buf into binary, to be placed in mem.
- Return a pointer to the character after the last byte written */
-static char *hex_to_mem(const char *buf, char *mem, const int count)
-{
- int i;
- unsigned char ch;
-
- for (i = 0; i < count; i++) {
- ch = hex(*buf++) << 4;
- ch = ch + hex(*buf++);
- *mem++ = ch;
- }
- return (mem);
-}
-
-/* While finding valid hex chars, convert to an integer, then return it */
-static int hex_to_int(char **ptr, int *int_value)
-{
- int num_chars = 0;
- int hex_value;
-
- *int_value = 0;
-
- while (**ptr) {
- hex_value = hex(**ptr);
- if (hex_value >= 0) {
- *int_value = (*int_value << 4) | hex_value;
- num_chars++;
- } else
- break;
- (*ptr)++;
- }
- return num_chars;
-}
-
-/* Copy the binary array pointed to by buf into mem. Fix $, #,
- and 0x7d escaped with 0x7d. Return a pointer to the character
- after the last byte written. */
-static char *ebin_to_mem(const char *buf, char *mem, int count)
-{
- for (; count > 0; count--, buf++) {
- if (*buf == 0x7d)
- *mem++ = *(++buf) ^ 0x20;
- else
- *mem++ = *buf;
- }
- return mem;
-}
-
-/* Pack a hex byte */
-static char *pack_hex_byte(char *pkt, int byte)
-{
- *pkt++ = hexchars[(byte >> 4) & 0xf];
- *pkt++ = hexchars[(byte & 0xf)];
- return pkt;
-}
-
-/* Scan for the start char '$', read the packet and check the checksum */
-static void get_packet(char *buffer, int buflen)
-{
- unsigned char checksum;
- unsigned char xmitcsum;
- int i;
- int count;
- char ch;
-
- do {
- /* Ignore everything until the start character */
- while ((ch = get_debug_char()) != '$');
-
- checksum = 0;
- xmitcsum = -1;
- count = 0;
-
- /* Now, read until a # or end of buffer is found */
- while (count < (buflen - 1)) {
- ch = get_debug_char();
-
- if (ch == '#')
- break;
-
- checksum = checksum + ch;
- buffer[count] = ch;
- count = count + 1;
- }
-
- buffer[count] = 0;
-
- /* Continue to read checksum following # */
- if (ch == '#') {
- xmitcsum = hex(get_debug_char()) << 4;
- xmitcsum += hex(get_debug_char());
-
- /* Checksum */
- if (checksum != xmitcsum)
- put_debug_char('-'); /* Failed checksum */
- else {
- /* Ack successful transfer */
- put_debug_char('+');
-
- /* If a sequence char is present, reply
- the sequence ID */
- if (buffer[2] == ':') {
- put_debug_char(buffer[0]);
- put_debug_char(buffer[1]);
-
- /* Remove sequence chars from buffer */
- count = strlen(buffer);
- for (i = 3; i <= count; i++)
- buffer[i - 3] = buffer[i];
- }
- }
- }
- }
- while (checksum != xmitcsum); /* Keep trying while we fail */
-}
-
-/* Send the packet in the buffer with run-length encoding */
-static void put_packet(char *buffer)
-{
- int checksum;
- char *src;
- int runlen;
- int encode;
-
- do {
- src = buffer;
- put_debug_char('$');
- checksum = 0;
-
- /* Continue while we still have chars left */
- while (*src) {
- /* Check for runs up to 99 chars long */
- for (runlen = 1; runlen < 99; runlen++) {
- if (src[0] != src[runlen])
- break;
- }
-
- if (runlen > 3) {
- /* Got a useful amount, send encoding */
- encode = runlen + ' ' - 4;
- put_debug_char(*src); checksum += *src;
- put_debug_char('*'); checksum += '*';
- put_debug_char(encode); checksum += encode;
- src += runlen;
- } else {
- /* Otherwise just send the current char */
- put_debug_char(*src); checksum += *src;
- src += 1;
- }
- }
-
- /* '#' Separator, put high and low components of checksum */
- put_debug_char('#');
- put_debug_char(highhex(checksum));
- put_debug_char(lowhex(checksum));
- }
- while ((get_debug_char()) != '+'); /* While no ack */
-}
-
-/* A bus error has occurred - perform a longjmp to return execution and
- allow handling of the error */
-static void kgdb_handle_bus_error(void)
-{
- longjmp(rem_com_env, 1);
-}
-
-/* Translate SH-3/4 exception numbers to unix-like signal values */
-static int compute_signal(const int excep_code)
-{
- int sigval;
-
- switch (excep_code) {
-
- case INVALID_INSN_VEC:
- case INVALID_SLOT_VEC:
- sigval = SIGILL;
- break;
- case ADDRESS_ERROR_LOAD_VEC:
- case ADDRESS_ERROR_STORE_VEC:
- sigval = SIGSEGV;
- break;
-
- case SERIAL_BREAK_VEC:
- case NMI_VEC:
- sigval = SIGINT;
- break;
-
- case USER_BREAK_VEC:
- case TRAP_VEC:
- sigval = SIGTRAP;
- break;
-
- default:
- sigval = SIGBUS; /* "software generated" */
- break;
- }
-
- return (sigval);
-}
-
-/* Make a local copy of the registers passed into the handler (bletch) */
-static void kgdb_regs_to_gdb_regs(const struct kgdb_regs *regs,
- int *gdb_regs)
-{
- gdb_regs[R0] = regs->regs[R0];
- gdb_regs[R1] = regs->regs[R1];
- gdb_regs[R2] = regs->regs[R2];
- gdb_regs[R3] = regs->regs[R3];
- gdb_regs[R4] = regs->regs[R4];
- gdb_regs[R5] = regs->regs[R5];
- gdb_regs[R6] = regs->regs[R6];
- gdb_regs[R7] = regs->regs[R7];
- gdb_regs[R8] = regs->regs[R8];
- gdb_regs[R9] = regs->regs[R9];
- gdb_regs[R10] = regs->regs[R10];
- gdb_regs[R11] = regs->regs[R11];
- gdb_regs[R12] = regs->regs[R12];
- gdb_regs[R13] = regs->regs[R13];
- gdb_regs[R14] = regs->regs[R14];
- gdb_regs[R15] = regs->regs[R15];
- gdb_regs[PC] = regs->pc;
- gdb_regs[PR] = regs->pr;
- gdb_regs[GBR] = regs->gbr;
- gdb_regs[MACH] = regs->mach;
- gdb_regs[MACL] = regs->macl;
- gdb_regs[SR] = regs->sr;
- gdb_regs[VBR] = regs->vbr;
-}
-
-/* Copy local gdb registers back to kgdb regs, for later copy to kernel */
-static void gdb_regs_to_kgdb_regs(const int *gdb_regs,
- struct kgdb_regs *regs)
-{
- regs->regs[R0] = gdb_regs[R0];
- regs->regs[R1] = gdb_regs[R1];
- regs->regs[R2] = gdb_regs[R2];
- regs->regs[R3] = gdb_regs[R3];
- regs->regs[R4] = gdb_regs[R4];
- regs->regs[R5] = gdb_regs[R5];
- regs->regs[R6] = gdb_regs[R6];
- regs->regs[R7] = gdb_regs[R7];
- regs->regs[R8] = gdb_regs[R8];
- regs->regs[R9] = gdb_regs[R9];
- regs->regs[R10] = gdb_regs[R10];
- regs->regs[R11] = gdb_regs[R11];
- regs->regs[R12] = gdb_regs[R12];
- regs->regs[R13] = gdb_regs[R13];
- regs->regs[R14] = gdb_regs[R14];
- regs->regs[R15] = gdb_regs[R15];
- regs->pc = gdb_regs[PC];
- regs->pr = gdb_regs[PR];
- regs->gbr = gdb_regs[GBR];
- regs->mach = gdb_regs[MACH];
- regs->macl = gdb_regs[MACL];
- regs->sr = gdb_regs[SR];
- regs->vbr = gdb_regs[VBR];
-}
-
-/* Calculate the new address for after a step */
-static short *get_step_address(void)
-{
- short op = *(short *) trap_registers.pc;
- long addr;
-
- /* BT */
- if (OPCODE_BT(op)) {
- if (trap_registers.sr & SR_T_BIT_MASK)
- addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op);
- else
- addr = trap_registers.pc + 2;
- }
-
- /* BTS */
- else if (OPCODE_BTS(op)) {
- if (trap_registers.sr & SR_T_BIT_MASK)
- addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op);
- else
- addr = trap_registers.pc + 4; /* Not in delay slot */
- }
-
- /* BF */
- else if (OPCODE_BF(op)) {
- if (!(trap_registers.sr & SR_T_BIT_MASK))
- addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op);
- else
- addr = trap_registers.pc + 2;
- }
-
- /* BFS */
- else if (OPCODE_BFS(op)) {
- if (!(trap_registers.sr & SR_T_BIT_MASK))
- addr = trap_registers.pc + 4 + OPCODE_BTF_DISP(op);
- else
- addr = trap_registers.pc + 4; /* Not in delay slot */
- }
-
- /* BRA */
- else if (OPCODE_BRA(op))
- addr = trap_registers.pc + 4 + OPCODE_BRA_DISP(op);
-
- /* BRAF */
- else if (OPCODE_BRAF(op))
- addr = trap_registers.pc + 4
- + trap_registers.regs[OPCODE_BRAF_REG(op)];
-
- /* BSR */
- else if (OPCODE_BSR(op))
- addr = trap_registers.pc + 4 + OPCODE_BSR_DISP(op);
-
- /* BSRF */
- else if (OPCODE_BSRF(op))
- addr = trap_registers.pc + 4
- + trap_registers.regs[OPCODE_BSRF_REG(op)];
-
- /* JMP */
- else if (OPCODE_JMP(op))
- addr = trap_registers.regs[OPCODE_JMP_REG(op)];
-
- /* JSR */
- else if (OPCODE_JSR(op))
- addr = trap_registers.regs[OPCODE_JSR_REG(op)];
-
- /* RTS */
- else if (OPCODE_RTS(op))
- addr = trap_registers.pr;
-
- /* RTE */
- else if (OPCODE_RTE(op))
- addr = trap_registers.regs[15];
-
- /* Other */
- else
- addr = trap_registers.pc + 2;
-
- flush_icache_range(addr, addr + 2);
- return (short *) addr;
-}
-
-/* Set up a single-step. Replace the instruction immediately after the
- current instruction (i.e. next in the expected flow of control) with a
- trap instruction, so that returning will cause only a single instruction
- to be executed. Note that this model is slightly broken for instructions
- with delay slots (e.g. B[TF]S, BSR, BRA etc), where both the branch
- and the instruction in the delay slot will be executed. */
-static void do_single_step(void)
-{
- unsigned short *addr = 0;
-
- /* Determine where the target instruction will send us to */
- addr = get_step_address();
- stepped_address = (int)addr;
-
- /* Replace it */
- stepped_opcode = *(short *)addr;
- *addr = STEP_OPCODE;
-
- /* Flush and return */
- flush_icache_range((long) addr, (long) addr + 2);
-}
-
-/* Undo a single step */
-static void undo_single_step(void)
-{
- /* If we have stepped, put back the old instruction */
- /* Use stepped_address in case we stopped elsewhere */
- if (stepped_opcode != 0) {
- *(short*)stepped_address = stepped_opcode;
- flush_icache_range(stepped_address, stepped_address + 2);
- }
- stepped_opcode = 0;
-}
-
-/* Send a signal message */
-static void send_signal_msg(const int signum)
-{
- out_buffer[0] = 'S';
- out_buffer[1] = highhex(signum);
- out_buffer[2] = lowhex(signum);
- out_buffer[3] = 0;
- put_packet(out_buffer);
-}
-
-/* Reply that all was well */
-static void send_ok_msg(void)
-{
- strcpy(out_buffer, "OK");
- put_packet(out_buffer);
-}
-
-/* Reply that an error occurred */
-static void send_err_msg(void)
-{
- strcpy(out_buffer, "E01");
- put_packet(out_buffer);
-}
-
-/* Empty message indicates unrecognised command */
-static void send_empty_msg(void)
-{
- put_packet("");
-}
-
-/* Read memory due to 'm' message */
-static void read_mem_msg(void)
-{
- char *ptr;
- int addr;
- int length;
-
- /* Jmp, disable bus error handler */
- if (setjmp(rem_com_env) == 0) {
-
- kgdb_nofault = 1;
-
- /* Walk through, have m<addr>,<length> */
- ptr = &in_buffer[1];
- if (hex_to_int(&ptr, &addr) && (*ptr++ == ','))
- if (hex_to_int(&ptr, &length)) {
- ptr = 0;
- if (length * 2 > OUTBUFMAX)
- length = OUTBUFMAX / 2;
- mem_to_hex((char *) addr, out_buffer, length);
- }
- if (ptr)
- send_err_msg();
- else
- put_packet(out_buffer);
- } else
- send_err_msg();
-
- /* Restore bus error handler */
- kgdb_nofault = 0;
-}
-
-/* Write memory due to 'M' or 'X' message */
-static void write_mem_msg(int binary)
-{
- char *ptr;
- int addr;
- int length;
-
- if (setjmp(rem_com_env) == 0) {
-
- kgdb_nofault = 1;
-
- /* Walk through, have M<addr>,<length>:<data> */
- ptr = &in_buffer[1];
- if (hex_to_int(&ptr, &addr) && (*ptr++ == ','))
- if (hex_to_int(&ptr, &length) && (*ptr++ == ':')) {
- if (binary)
- ebin_to_mem(ptr, (char*)addr, length);
- else
- hex_to_mem(ptr, (char*)addr, length);
- flush_icache_range(addr, addr + length);
- ptr = 0;
- send_ok_msg();
- }
- if (ptr)
- send_err_msg();
- } else
- send_err_msg();
-
- /* Restore bus error handler */
- kgdb_nofault = 0;
-}
-
-/* Continue message */
-static void continue_msg(void)
-{
- /* Try to read optional parameter, PC unchanged if none */
- char *ptr = &in_buffer[1];
- int addr;
-
- if (hex_to_int(&ptr, &addr))
- trap_registers.pc = addr;
-}
-
-/* Continue message with signal */
-static void continue_with_sig_msg(void)
-{
- int signal;
- char *ptr = &in_buffer[1];
- int addr;
-
- /* Report limitation */
- kgdb_to_gdb("Cannot force signal in kgdb, continuing anyway.\n");
-
- /* Signal */
- hex_to_int(&ptr, &signal);
- if (*ptr == ';')
- ptr++;
-
- /* Optional address */
- if (hex_to_int(&ptr, &addr))
- trap_registers.pc = addr;
-}
-
-/* Step message */
-static void step_msg(void)
-{
- continue_msg();
- do_single_step();
-}
-
-/* Step message with signal */
-static void step_with_sig_msg(void)
-{
- continue_with_sig_msg();
- do_single_step();
-}
-
-/* Send register contents */
-static void send_regs_msg(void)
-{
- kgdb_regs_to_gdb_regs(&trap_registers, registers);
- mem_to_hex((char *) registers, out_buffer, NUMREGBYTES);
- put_packet(out_buffer);
-}
-
-/* Set register contents - currently can't set other thread's registers */
-static void set_regs_msg(void)
-{
- kgdb_regs_to_gdb_regs(&trap_registers, registers);
- hex_to_mem(&in_buffer[1], (char *) registers, NUMREGBYTES);
- gdb_regs_to_kgdb_regs(registers, &trap_registers);
- send_ok_msg();
-}
-
-#ifdef CONFIG_SH_KGDB_CONSOLE
-/*
- * Bring up the ports..
- */
-static int __init kgdb_serial_setup(void)
-{
- struct console dummy;
- return kgdb_console_setup(&dummy, 0);
-}
-#else
-#define kgdb_serial_setup() 0
-#endif
-
-/* The command loop, read and act on requests */
-static void kgdb_command_loop(const int excep_code, const int trapa_value)
-{
- int sigval;
-
- /* Enter GDB mode (e.g. after detach) */
- if (!kgdb_in_gdb_mode) {
- /* Do serial setup, notify user, issue preemptive ack */
- printk(KERN_NOTICE "KGDB: Waiting for GDB\n");
- kgdb_in_gdb_mode = 1;
- put_debug_char('+');
- }
-
- /* Reply to host that an exception has occurred */
- sigval = compute_signal(excep_code);
- send_signal_msg(sigval);
-
- /* TRAP_VEC exception indicates a software trap inserted in place of
- code by GDB so back up PC by one instruction, as this instruction
- will later be replaced by its original one. Do NOT do this for
- trap 0xff, since that indicates a compiled-in breakpoint which
- will not be replaced (and we would retake the trap forever) */
- if ((excep_code == TRAP_VEC) && (trapa_value != (0x3c << 2)))
- trap_registers.pc -= 2;
-
- /* Undo any stepping we may have done */
- undo_single_step();
-
- while (1) {
- out_buffer[0] = 0;
- get_packet(in_buffer, BUFMAX);
-
- /* Examine first char of buffer to see what we need to do */
- switch (in_buffer[0]) {
- case '?': /* Send which signal we've received */
- send_signal_msg(sigval);
- break;
-
- case 'g': /* Return the values of the CPU registers */
- send_regs_msg();
- break;
-
- case 'G': /* Set the value of the CPU registers */
- set_regs_msg();
- break;
-
- case 'm': /* Read LLLL bytes address AA..AA */
- read_mem_msg();
- break;
-
- case 'M': /* Write LLLL bytes address AA..AA, ret OK */
- write_mem_msg(0); /* 0 = data in hex */
- break;
-
- case 'X': /* Write LLLL bytes esc bin address AA..AA */
- if (kgdb_bits == '8')
- write_mem_msg(1); /* 1 = data in binary */
- else
- send_empty_msg();
- break;
-
- case 'C': /* Continue, signum included, we ignore it */
- continue_with_sig_msg();
- return;
-
- case 'c': /* Continue at address AA..AA (optional) */
- continue_msg();
- return;
-
- case 'S': /* Step, signum included, we ignore it */
- step_with_sig_msg();
- return;
-
- case 's': /* Step one instruction from AA..AA */
- step_msg();
- return;
-
- case 'k': /* 'Kill the program' with a kernel ? */
- break;
-
- case 'D': /* Detach from program, send reply OK */
- kgdb_in_gdb_mode = 0;
- send_ok_msg();
- get_debug_char();
- return;
-
- default:
- send_empty_msg();
- break;
- }
- }
-}
-
-/* There has been an exception, most likely a breakpoint. */
-static void handle_exception(struct pt_regs *regs)
-{
- int excep_code, vbr_val;
- int count;
- int trapa_value = ctrl_inl(TRA);
-
- /* Copy kernel regs (from stack) */
- for (count = 0; count < 16; count++)
- trap_registers.regs[count] = regs->regs[count];
- trap_registers.pc = regs->pc;
- trap_registers.pr = regs->pr;
- trap_registers.sr = regs->sr;
- trap_registers.gbr = regs->gbr;
- trap_registers.mach = regs->mach;
- trap_registers.macl = regs->macl;
-
- asm("stc vbr, %0":"=r"(vbr_val));
- trap_registers.vbr = vbr_val;
-
- /* Get excode for command loop call, user access */
- asm("stc r2_bank, %0":"=r"(excep_code));
-
- /* Act on the exception */
- kgdb_command_loop(excep_code, trapa_value);
-
- /* Copy back the (maybe modified) registers */
- for (count = 0; count < 16; count++)
- regs->regs[count] = trap_registers.regs[count];
- regs->pc = trap_registers.pc;
- regs->pr = trap_registers.pr;
- regs->sr = trap_registers.sr;
- regs->gbr = trap_registers.gbr;
- regs->mach = trap_registers.mach;
- regs->macl = trap_registers.macl;
-
- vbr_val = trap_registers.vbr;
- asm("ldc %0, vbr": :"r"(vbr_val));
-}
-
-asmlinkage void kgdb_handle_exception(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- handle_exception(regs);
-}
-
-/* Initialise the KGDB data structures and serial configuration */
-int __init kgdb_init(void)
-{
- in_nmi = 0;
- kgdb_nofault = 0;
- stepped_opcode = 0;
- kgdb_in_gdb_mode = 0;
-
- if (kgdb_serial_setup() != 0) {
- printk(KERN_NOTICE "KGDB: serial setup error\n");
- return -1;
- }
-
- /* Init ptr to exception handler */
- kgdb_debug_hook = handle_exception;
- kgdb_bus_err_hook = kgdb_handle_bus_error;
-
- /* Enter kgdb now if requested, or just report init done */
- printk(KERN_NOTICE "KGDB: stub is initialized.\n");
-
- return 0;
-}
-
-/* Make function available for "user messages"; console will use it too. */
-
-char gdbmsgbuf[BUFMAX];
-#define MAXOUT ((BUFMAX-2)/2)
-
-static void kgdb_msg_write(const char *s, unsigned count)
-{
- int i;
- int wcount;
- char *bufptr;
-
- /* 'O'utput */
- gdbmsgbuf[0] = 'O';
-
- /* Fill and send buffers... */
- while (count > 0) {
- bufptr = gdbmsgbuf + 1;
-
- /* Calculate how many this time */
- wcount = (count > MAXOUT) ? MAXOUT : count;
-
- /* Pack in hex chars */
- for (i = 0; i < wcount; i++)
- bufptr = pack_hex_byte(bufptr, s[i]);
- *bufptr = '\0';
-
- /* Move up */
- s += wcount;
- count -= wcount;
-
- /* Write packet */
- put_packet(gdbmsgbuf);
- }
-}
-
-static void kgdb_to_gdb(const char *s)
-{
- kgdb_msg_write(s, strlen(s));
-}
-
-#ifdef CONFIG_SH_KGDB_CONSOLE
-void kgdb_console_write(struct console *co, const char *s, unsigned count)
-{
- /* Bail if we're not talking to GDB */
- if (!kgdb_in_gdb_mode)
- return;
-
- kgdb_msg_write(s, count);
-}
-#endif
-
-#ifdef CONFIG_KGDB_SYSRQ
-static void sysrq_handle_gdb(int key, struct tty_struct *tty)
-{
- printk("Entering GDB stub\n");
- breakpoint();
-}
-
-static struct sysrq_key_op sysrq_gdb_op = {
- .handler = sysrq_handle_gdb,
- .help_msg = "Gdb",
- .action_msg = "GDB",
-};
-
-static int gdb_register_sysrq(void)
-{
- printk("Registering GDB sysrq handler\n");
- register_sysrq_key('g', &sysrq_gdb_op);
- return 0;
-}
-module_init(gdb_register_sysrq);
-#endif
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
new file mode 100644
index 00000000000..83acbf3f6de
--- /dev/null
+++ b/arch/sh/kernel/kprobes.c
@@ -0,0 +1,585 @@
+/*
+ * Kernel probes (kprobes) for SuperH
+ *
+ * Copyright (C) 2007 Chris Smith <chris.smith@st.com>
+ * Copyright (C) 2006 Lineo Solutions, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/kdebug.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/uaccess.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
+static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
+static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
+
+#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
+#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
+#define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000)
+#define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023)
+#define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000)
+#define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003)
+
+#define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00)
+#define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00)
+
+#define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00)
+#define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900)
+
+#define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b)
+#define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b)
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
+
+ if (OPCODE_RTE(opcode))
+ return -EFAULT; /* Bad breakpoint */
+
+ p->opcode = opcode;
+
+ return 0;
+}
+
+void __kprobes arch_copy_kprobe(struct kprobe *p)
+{
+ memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ p->opcode = *p->addr;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = BREAKPOINT_INSTRUCTION;
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (*p->addr == BREAKPOINT_INSTRUCTION)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * If an illegal slot instruction exception occurs for an address
+ * containing a kprobe, remove the probe.
+ *
+ * Returns 0 if the exception was handled successfully, 1 otherwise.
+ */
+int __kprobes kprobe_handle_illslot(unsigned long pc)
+{
+ struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
+
+ if (p != NULL) {
+ printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
+ (unsigned int)pc + 2);
+ unregister_kprobe(p);
+ return 0;
+ }
+
+ return 1;
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ struct kprobe *saved = this_cpu_ptr(&saved_next_opcode);
+
+ if (saved->addr) {
+ arch_disarm_kprobe(p);
+ arch_disarm_kprobe(saved);
+
+ saved->addr = NULL;
+ saved->opcode = 0;
+
+ saved = this_cpu_ptr(&saved_next_opcode2);
+ if (saved->addr) {
+ arch_disarm_kprobe(saved);
+
+ saved->addr = NULL;
+ saved->opcode = 0;
+ }
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+/*
+ * Singlestep is implemented by disabling the current kprobe and setting one
+ * on the next instruction, following branches. Two probes are set if the
+ * branch is conditional.
+ */
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ __this_cpu_write(saved_current_opcode.addr, (kprobe_opcode_t *)regs->pc);
+
+ if (p != NULL) {
+ struct kprobe *op1, *op2;
+
+ arch_disarm_kprobe(p);
+
+ op1 = this_cpu_ptr(&saved_next_opcode);
+ op2 = this_cpu_ptr(&saved_next_opcode2);
+
+ if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
+ unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
+ op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr];
+ } else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
+ unsigned long disp = (p->opcode & 0x0FFF);
+ op1->addr =
+ (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
+
+ } else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
+ unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
+ op1->addr =
+ (kprobe_opcode_t *) (regs->pc + 4 +
+ regs->regs[reg_nr]);
+
+ } else if (OPCODE_RTS(p->opcode)) {
+ op1->addr = (kprobe_opcode_t *) regs->pr;
+
+ } else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
+ unsigned long disp = (p->opcode & 0x00FF);
+ /* case 1 */
+ op1->addr = p->addr + 1;
+ /* case 2 */
+ op2->addr =
+ (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
+ op2->opcode = *(op2->addr);
+ arch_arm_kprobe(op2);
+
+ } else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
+ unsigned long disp = (p->opcode & 0x00FF);
+ /* case 1 */
+ op1->addr = p->addr + 2;
+ /* case 2 */
+ op2->addr =
+ (kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
+ op2->opcode = *(op2->addr);
+ arch_arm_kprobe(op2);
+
+ } else {
+ op1->addr = p->addr + 1;
+ }
+
+ op1->opcode = *(op1->addr);
+ arch_arm_kprobe(op1);
+ }
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *) regs->pr;
+
+ /* Replace the return addr with trampoline addr */
+ regs->pr = (unsigned long)kretprobe_trampoline;
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ int ret = 0;
+ kprobe_opcode_t *addr = NULL;
+ struct kprobe_ctlblk *kcb;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
+ addr = (kprobe_opcode_t *) (regs->pc);
+
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+ goto no_kprobe;
+ }
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ } else {
+ p = __this_cpu_read(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs)) {
+ goto ss_probe;
+ }
+ }
+ goto no_kprobe;
+ }
+
+ p = get_kprobe(addr);
+ if (!p) {
+ /* Not one of ours: let kernel handle it */
+ if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ */
+ ret = 1;
+ }
+
+ goto no_kprobe;
+ }
+
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ if (p->pre_handler && p->pre_handler(p, regs))
+ /* handler has already set things up, so skip ss setup */
+ return 1;
+
+ss_probe:
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+}
+
+/*
+ * For function-return probes, init_kprobes() establishes a probepoint
+ * here. When a retprobed function returns, this probe is hit and
+ * trampoline_probe_handler() runs, calling the kretprobe's handler.
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+ asm volatile (".globl kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n\t"
+ "nop\n");
+}
+
+/*
+ * Called when we hit the probe point at kretprobe_trampoline
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more then one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler) {
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
+ ri->rp->handler(ri, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ regs->pc = orig_ret_address;
+ kretprobe_hash_unlock(current, &flags);
+
+ preempt_enable_no_resched();
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+
+ return orig_ret_address;
+}
+
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ kprobe_opcode_t *addr = NULL;
+ struct kprobe *p = NULL;
+
+ if (!cur)
+ return 0;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ p = this_cpu_ptr(&saved_next_opcode);
+ if (p->addr) {
+ arch_disarm_kprobe(p);
+ p->addr = NULL;
+ p->opcode = 0;
+
+ addr = __this_cpu_read(saved_current_opcode.addr);
+ __this_cpu_write(saved_current_opcode.addr, NULL);
+
+ p = get_kprobe(addr);
+ arch_arm_kprobe(p);
+
+ p = this_cpu_ptr(&saved_next_opcode2);
+ if (p->addr) {
+ arch_disarm_kprobe(p);
+ p->addr = NULL;
+ p->opcode = 0;
+ }
+ }
+
+ /* Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+
+ reset_current_kprobe();
+
+out:
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ const struct exception_table_entry *entry;
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe, point the pc back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->pc = (unsigned long)cur->addr;
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accounting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ if ((entry = search_exception_tables(regs->pc)) != NULL) {
+ regs->pc = entry->fixup;
+ return 1;
+ }
+
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct kprobe *p = NULL;
+ struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+ kprobe_opcode_t *addr = NULL;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ addr = (kprobe_opcode_t *) (args->regs->pc);
+ if (val == DIE_TRAP) {
+ if (!kprobe_running()) {
+ if (kprobe_handler(args->regs)) {
+ ret = NOTIFY_STOP;
+ } else {
+ /* Not a kprobe trap */
+ ret = NOTIFY_DONE;
+ }
+ } else {
+ p = get_kprobe(addr);
+ if ((kcb->kprobe_status == KPROBE_HIT_SS) ||
+ (kcb->kprobe_status == KPROBE_REENTER)) {
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ } else {
+ if (kprobe_handler(args->regs)) {
+ ret = NOTIFY_STOP;
+ } else {
+ p = __this_cpu_read(current_kprobe);
+ if (p->break_handler &&
+ p->break_handler(p, args->regs))
+ ret = NOTIFY_STOP;
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ unsigned long addr;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ kcb->jprobe_saved_regs = *regs;
+ kcb->jprobe_saved_r15 = regs->regs[15];
+ addr = kcb->jprobe_saved_r15;
+
+ /*
+ * TBD: As Linus pointed out, gcc assumes that the callee
+ * owns the argument space and could overwrite it, e.g.
+ * tailcall optimization. So, to be absolutely safe
+ * we also save and restore enough stack bytes to cover
+ * the argument area.
+ */
+ memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
+ MIN_STACK_SIZE(addr));
+
+ regs->pc = (unsigned long)(jp->entry);
+
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long stack_addr = kcb->jprobe_saved_r15;
+ u8 *addr = (u8 *)regs->pc;
+
+ if ((addr >= (u8 *)jprobe_return) &&
+ (addr <= (u8 *)jprobe_return_end)) {
+ *regs = kcb->jprobe_saved_regs;
+
+ memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
+ MIN_STACK_SIZE(stack_addr));
+
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ preempt_enable_no_resched();
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)&kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c
new file mode 100644
index 00000000000..b880a7e2ace
--- /dev/null
+++ b/arch/sh/kernel/localtimer.c
@@ -0,0 +1,66 @@
+/*
+ * Dummy local timer
+ *
+ * Copyright (C) 2008 Paul Mundt
+ *
+ * cloned from:
+ *
+ * linux/arch/arm/mach-realview/localtimer.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/jiffies.h>
+#include <linux/percpu.h>
+#include <linux/clockchips.h>
+#include <linux/hardirq.h>
+#include <linux/irq.h>
+
+static DEFINE_PER_CPU(struct clock_event_device, local_clockevent);
+
+/*
+ * Used on SMP for either the local timer or SMP_MSG_TIMER
+ */
+void local_timer_interrupt(void)
+{
+ struct clock_event_device *clk = this_cpu_ptr(&local_clockevent);
+
+ irq_enter();
+ clk->event_handler(clk);
+ irq_exit();
+}
+
+static void dummy_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+}
+
+void local_timer_setup(unsigned int cpu)
+{
+ struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
+
+ clk->name = "dummy_timer";
+ clk->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DUMMY;
+ clk->rating = 400;
+ clk->mult = 1;
+ clk->set_mode = dummy_timer_set_mode;
+ clk->broadcast = smp_timer_broadcast;
+ clk->cpumask = cpumask_of(cpu);
+
+ clockevents_register_device(clk);
+}
+
+void local_timer_stop(unsigned int cpu)
+{
+}
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 5c17de51987..9fea49f6e66 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -8,33 +8,33 @@
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
-
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/reboot.h>
+#include <linux/numa.h>
+#include <linux/ftrace.h>
+#include <linux/suspend.h>
+#include <linux/memblock.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
+#include <asm/sh_bios.h>
+#include <asm/reboot.h>
-typedef NORET_TYPE void (*relocate_new_kernel_t)(
- unsigned long indirection_page,
- unsigned long reboot_code_buffer,
- unsigned long start_address,
- unsigned long vbr_reg) ATTRIB_NORET;
+typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
+ unsigned long reboot_code_buffer,
+ unsigned long start_address);
extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
-extern void *gdb_vbr_vector;
-
-void machine_shutdown(void)
-{
-}
+extern void *vbr_base;
-void machine_crash_shutdown(struct pt_regs *regs)
+void native_machine_crash_shutdown(struct pt_regs *regs)
{
+ /* Nothing to do for UP, but definitely broken for SMP.. */
}
/*
@@ -70,19 +70,35 @@ static void kexec_info(struct kimage *image)
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
*/
-NORET_TYPE void machine_kexec(struct kimage *image)
+void machine_kexec(struct kimage *image)
{
-
unsigned long page_list;
unsigned long reboot_code_buffer;
- unsigned long vbr_reg;
relocate_new_kernel_t rnk;
+ unsigned long entry;
+ unsigned long *ptr;
+ int save_ftrace_enabled;
+
+ /*
+ * Nicked from the mips version of machine_kexec():
+ * The generic kexec code builds a page list with physical
+ * addresses. Use phys_to_virt() to convert them to virtual.
+ */
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
+ ptr = (entry & IND_INDIRECTION) ?
+ phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
+ if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+ *ptr & IND_DESTINATION)
+ *ptr = (unsigned long) phys_to_virt(*ptr);
+ }
-#if defined(CONFIG_SH_STANDARD_BIOS)
- vbr_reg = ((unsigned long )gdb_vbr_vector) - 0x100;
-#else
- vbr_reg = 0x80000000; // dummy
+#ifdef CONFIG_KEXEC_JUMP
+ if (image->preserve_context)
+ save_processor_state();
#endif
+
+ save_ftrace_enabled = __ftrace_enabled_save();
+
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
@@ -96,11 +112,96 @@ NORET_TYPE void machine_kexec(struct kimage *image)
memcpy((void *)reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
- kexec_info(image);
+ kexec_info(image);
flush_cache_all();
+ sh_bios_vbr_reload();
+
/* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer;
- (*rnk)(page_list, reboot_code_buffer, image->start, vbr_reg);
+ (*rnk)(page_list, reboot_code_buffer,
+ (unsigned long)phys_to_virt(image->start));
+
+#ifdef CONFIG_KEXEC_JUMP
+ asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory");
+
+ if (image->preserve_context)
+ restore_processor_state();
+
+ /* Convert page list back to physical addresses, what a mess. */
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
+ ptr = (*ptr & IND_INDIRECTION) ?
+ phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) {
+ if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+ *ptr & IND_DESTINATION)
+ *ptr = virt_to_phys(*ptr);
+ }
+#endif
+
+ __ftrace_enabled_restore(save_ftrace_enabled);
+}
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_NUMA
+ VMCOREINFO_SYMBOL(node_data);
+ VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
+#endif
+#ifdef CONFIG_X2TLB
+ VMCOREINFO_CONFIG(X2TLB);
+#endif
}
+void __init reserve_crashkernel(void)
+{
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ &crash_size, &crash_base);
+ if (ret == 0 && crash_size > 0) {
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+ }
+
+ if (crashk_res.end == crashk_res.start)
+ goto disable;
+
+ crash_size = PAGE_ALIGN(resource_size(&crashk_res));
+ if (!crashk_res.start) {
+ unsigned long max = memblock_end_of_DRAM() - memory_limit;
+ crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max);
+ if (!crashk_res.start) {
+ pr_err("crashkernel allocation failed\n");
+ goto disable;
+ }
+ } else {
+ ret = memblock_reserve(crashk_res.start, crash_size);
+ if (unlikely(ret < 0)) {
+ pr_err("crashkernel reservation failed - "
+ "memory is in use\n");
+ goto disable;
+ }
+ }
+
+ crashk_res.end = crashk_res.start + crash_size - 1;
+
+ /*
+ * Crash kernel trumps memory limit
+ */
+ if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) {
+ memory_limit = 0;
+ pr_info("Disabled memory limit for crashkernel\n");
+ }
+
+ pr_info("Reserving %ldMB of memory at 0x%08lx "
+ "for crashkernel (System RAM: %ldMB)\n",
+ (unsigned long)(crash_size >> 20),
+ (unsigned long)(crashk_res.start),
+ (unsigned long)(memblock_phys_mem_size() >> 20));
+
+ return;
+
+disable:
+ crashk_res.start = crashk_res.end = 0;
+}
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index 129b2cfd18a..ec05f491c34 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -14,6 +14,8 @@
#include <linux/string.h>
#include <asm/machvec.h>
#include <asm/sections.h>
+#include <asm/addrspace.h>
+#include <asm/setup.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -116,22 +118,7 @@ void __init sh_mv_setup(void)
sh_mv.mv_##elem = generic_##elem; \
} while (0)
- mv_set(inb); mv_set(inw); mv_set(inl);
- mv_set(outb); mv_set(outw); mv_set(outl);
-
- mv_set(inb_p); mv_set(inw_p); mv_set(inl_p);
- mv_set(outb_p); mv_set(outw_p); mv_set(outl_p);
-
- mv_set(insb); mv_set(insw); mv_set(insl);
- mv_set(outsb); mv_set(outsw); mv_set(outsl);
-
- mv_set(readb); mv_set(readw); mv_set(readl);
- mv_set(writeb); mv_set(writew); mv_set(writel);
-
- mv_set(ioport_map);
- mv_set(ioport_unmap);
mv_set(irq_demux);
-
- if (!sh_mv.mv_nr_irqs)
- sh_mv.mv_nr_irqs = NR_IRQS;
+ mv_set(mode_pins);
+ mv_set(mem_init);
}
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index b3d0a03b4c7..1b525dedd29 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -27,62 +27,12 @@
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
+#include <linux/bug.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
-
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
-}
-
-/* We don't need anything special. */
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-#ifdef CONFIG_SUPERH32
-#define COPY_UNALIGNED_WORD(sw, tw, align) \
-{ \
- void *__s = &(sw), *__t = &(tw); \
- unsigned short *__s2 = __s, *__t2 = __t; \
- unsigned char *__s1 = __s, *__t1 = __t; \
- switch ((align)) \
- { \
- case 0: \
- *(unsigned long *) __t = *(unsigned long *) __s; \
- break; \
- case 2: \
- *__t2++ = *__s2++; \
- *__t2 = *__s2; \
- break; \
- default: \
- *__t1++ = *__s1++; \
- *__t1++ = *__s1++; \
- *__t1++ = *__s1++; \
- *__t1 = *__s1; \
- break; \
- } \
-}
-#else
-/* One thing SHmedia doesn't screw up! */
-#define COPY_UNALIGNED_WORD(sw, tw, align) { (tw) = (sw); }
-#endif
+#include <asm/unaligned.h>
+#include <asm/dwarf.h>
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
@@ -96,7 +46,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
Elf32_Addr relocation;
uint32_t *location;
uint32_t value;
- int align;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
@@ -109,7 +58,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
- align = (int)location & 3;
#ifdef CONFIG_SUPERH64
/* For text addresses, bit2 of the st_other field indicates
@@ -117,20 +65,22 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
* SHmedia, the LSB of the symbol needs to be asserted
* for the CPU to be in SHmedia mode when it starts executing
* the branch target. */
- relocation |= (sym->st_other & 4);
+ relocation |= !!(sym->st_other & 4);
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_SH_NONE:
+ break;
case R_SH_DIR32:
- COPY_UNALIGNED_WORD (*location, value, align);
+ value = get_unaligned(location);
value += relocation;
- COPY_UNALIGNED_WORD (value, *location, align);
+ put_unaligned(value, location);
break;
case R_SH_REL32:
relocation = (relocation - (Elf32_Addr) location);
- COPY_UNALIGNED_WORD (*location, value, align);
+ value = get_unaligned(location);
value += relocation;
- COPY_UNALIGNED_WORD (value, *location, align);
+ put_unaligned(value, location);
break;
case R_SH_IMM_LOW16:
*location = (*location & ~0x3fffc00) |
@@ -159,24 +109,18 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return 0;
}
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- printk(KERN_ERR "module %s: REL RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
- return 0;
+ int ret = 0;
+
+ ret |= module_dwarf_finalize(hdr, sechdrs, me);
+
+ return ret;
}
void module_arch_cleanup(struct module *mod)
{
+ module_dwarf_cleanup(mod);
}
diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c
new file mode 100644
index 00000000000..ff0abbd1e65
--- /dev/null
+++ b/arch/sh/kernel/nmi_debug.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+
+enum nmi_action {
+ NMI_SHOW_STATE = 1 << 0,
+ NMI_SHOW_REGS = 1 << 1,
+ NMI_DIE = 1 << 2,
+ NMI_DEBOUNCE = 1 << 3,
+};
+
+static unsigned long nmi_actions;
+
+static int nmi_debug_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+
+ if (likely(val != DIE_NMI))
+ return NOTIFY_DONE;
+
+ if (nmi_actions & NMI_SHOW_STATE)
+ show_state();
+ if (nmi_actions & NMI_SHOW_REGS)
+ show_regs(args->regs);
+ if (nmi_actions & NMI_DEBOUNCE)
+ mdelay(10);
+ if (nmi_actions & NMI_DIE)
+ return NOTIFY_BAD;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nmi_debug_nb = {
+ .notifier_call = nmi_debug_notify,
+};
+
+static int __init nmi_debug_setup(char *str)
+{
+ char *p, *sep;
+
+ register_die_notifier(&nmi_debug_nb);
+
+ if (*str != '=')
+ return 0;
+
+ for (p = str + 1; *p; p = sep + 1) {
+ sep = strchr(p, ',');
+ if (sep)
+ *sep = 0;
+ if (strcmp(p, "state") == 0)
+ nmi_actions |= NMI_SHOW_STATE;
+ else if (strcmp(p, "regs") == 0)
+ nmi_actions |= NMI_SHOW_REGS;
+ else if (strcmp(p, "debounce") == 0)
+ nmi_actions |= NMI_DEBOUNCE;
+ else if (strcmp(p, "die") == 0)
+ nmi_actions |= NMI_DIE;
+ else
+ printk(KERN_WARNING "NMI: Unrecognized action `%s'\n",
+ p);
+ if (!sep)
+ break;
+ }
+
+ return 0;
+}
+__setup("nmi_debug", nmi_debug_setup);
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
new file mode 100644
index 00000000000..cc80b614b5f
--- /dev/null
+++ b/arch/sh/kernel/perf_callchain.c
@@ -0,0 +1,41 @@
+/*
+ * Performance event callchain support - SuperH architecture code
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/perf_event.h>
+#include <linux/percpu.h>
+#include <asm/unwinder.h>
+#include <asm/ptrace.h>
+
+static int callchain_stack(void *data, char *name)
+{
+ return 0;
+}
+
+static void callchain_address(void *data, unsigned long addr, int reliable)
+{
+ struct perf_callchain_entry *entry = data;
+
+ if (reliable)
+ perf_callchain_store(entry, addr);
+}
+
+static const struct stacktrace_ops callchain_ops = {
+ .stack = callchain_stack,
+ .address = callchain_address,
+};
+
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+{
+ perf_callchain_store(entry, regs->pc);
+
+ unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
+}
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
new file mode 100644
index 00000000000..02331672b6d
--- /dev/null
+++ b/arch/sh/kernel/perf_event.c
@@ -0,0 +1,400 @@
+/*
+ * Performance event support framework for SuperH hardware counters.
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * Heavily based on the x86 and PowerPC implementations.
+ *
+ * x86:
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2009 Jaswinder Singh Rajput
+ * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
+ *
+ * ppc:
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <linux/export.h>
+#include <asm/processor.h>
+
+struct cpu_hw_events {
+ struct perf_event *events[MAX_HWEVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+ unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+};
+
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+
+static struct sh_pmu *sh_pmu __read_mostly;
+
+/* Number of perf_events counting hardware events */
+static atomic_t num_events;
+/* Used to avoid races in calling reserve/release_pmc_hardware */
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+/*
+ * Stub these out for now, do something more profound later.
+ */
+int reserve_pmc_hardware(void)
+{
+ return 0;
+}
+
+void release_pmc_hardware(void)
+{
+}
+
+static inline int sh_pmu_initialized(void)
+{
+ return !!sh_pmu;
+}
+
+const char *perf_pmu_name(void)
+{
+ if (!sh_pmu)
+ return NULL;
+
+ return sh_pmu->name;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+ if (!sh_pmu)
+ return 0;
+
+ return sh_pmu->num_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+/*
+ * Release the PMU if this is the last perf_event.
+ */
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (!atomic_add_unless(&num_events, -1, 1)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_dec_return(&num_events) == 0)
+ release_pmc_hardware();
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+}
+
+static int hw_perf_cache_event(int config, int *evp)
+{
+ unsigned long type, op, result;
+ int ev;
+
+ if (!sh_pmu->cache_events)
+ return -EINVAL;
+
+ /* unpack config */
+ type = config & 0xff;
+ op = (config >> 8) & 0xff;
+ result = (config >> 16) & 0xff;
+
+ if (type >= PERF_COUNT_HW_CACHE_MAX ||
+ op >= PERF_COUNT_HW_CACHE_OP_MAX ||
+ result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ ev = (*sh_pmu->cache_events)[type][op][result];
+ if (ev == 0)
+ return -EOPNOTSUPP;
+ if (ev == -1)
+ return -EINVAL;
+ *evp = ev;
+ return 0;
+}
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ int config = -1;
+ int err;
+
+ if (!sh_pmu_initialized())
+ return -ENODEV;
+
+ /*
+ * All of the on-chip counters are "limited", in that they have
+ * no interrupts, and are therefore unable to do sampling without
+ * further work and timer assistance.
+ */
+ if (hwc->sample_period)
+ return -EINVAL;
+
+ /*
+ * See if we need to reserve the counter.
+ *
+ * If no events are currently in use, then we have to take a
+ * mutex to ensure that we don't race with another task doing
+ * reserve_pmc_hardware or release_pmc_hardware.
+ */
+ err = 0;
+ if (!atomic_inc_not_zero(&num_events)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_read(&num_events) == 0 &&
+ reserve_pmc_hardware())
+ err = -EBUSY;
+ else
+ atomic_inc(&num_events);
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+
+ if (err)
+ return err;
+
+ event->destroy = hw_perf_event_destroy;
+
+ switch (attr->type) {
+ case PERF_TYPE_RAW:
+ config = attr->config & sh_pmu->raw_event_mask;
+ break;
+ case PERF_TYPE_HW_CACHE:
+ err = hw_perf_cache_event(attr->config, &config);
+ if (err)
+ return err;
+ break;
+ case PERF_TYPE_HARDWARE:
+ if (attr->config >= sh_pmu->max_events)
+ return -EINVAL;
+
+ config = sh_pmu->event_map(attr->config);
+ break;
+ }
+
+ if (config == -1)
+ return -EINVAL;
+
+ hwc->config |= config;
+
+ return 0;
+}
+
+static void sh_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
+{
+ u64 prev_raw_count, new_raw_count;
+ s64 delta;
+ int shift = 0;
+
+ /*
+ * Depending on the counter configuration, they may or may not
+ * be chained, in which case the previous counter value can be
+ * updated underneath us if the lower-half overflows.
+ *
+ * Our tactic to handle this is to first atomically read and
+ * exchange a new raw count - then add that new-prev delta
+ * count to the generic counter atomically.
+ *
+ * As there is no interrupt associated with the overflow events,
+ * this is the simplest approach for maintaining consistency.
+ */
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = sh_pmu->read(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ /*
+ * Now we have the new raw value and have updated the prev
+ * timestamp already. We can now calculate the elapsed delta
+ * (counter-)time and add that to the generic counter.
+ *
+ * Careful, not all hw sign-extends above the physical width
+ * of the count.
+ */
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+}
+
+static void sh_pmu_stop(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (!(event->hw.state & PERF_HES_STOPPED)) {
+ sh_pmu->disable(hwc, idx);
+ cpuc->events[idx] = NULL;
+ event->hw.state |= PERF_HES_STOPPED;
+ }
+
+ if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
+ sh_perf_event_update(event, &event->hw, idx);
+ event->hw.state |= PERF_HES_UPTODATE;
+ }
+}
+
+static void sh_pmu_start(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (WARN_ON_ONCE(idx == -1))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ cpuc->events[idx] = event;
+ event->hw.state = 0;
+ sh_pmu->enable(hwc, idx);
+}
+
+static void sh_pmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ sh_pmu_stop(event, PERF_EF_UPDATE);
+ __clear_bit(event->hw.idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static int sh_pmu_add(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ int ret = -EAGAIN;
+
+ perf_pmu_disable(event->pmu);
+
+ if (__test_and_set_bit(idx, cpuc->used_mask)) {
+ idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
+ if (idx == sh_pmu->num_events)
+ goto out;
+
+ __set_bit(idx, cpuc->used_mask);
+ hwc->idx = idx;
+ }
+
+ sh_pmu->disable(hwc, idx);
+
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (flags & PERF_EF_START)
+ sh_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+ ret = 0;
+out:
+ perf_pmu_enable(event->pmu);
+ return ret;
+}
+
+static void sh_pmu_read(struct perf_event *event)
+{
+ sh_perf_event_update(event, &event->hw, event->hw.idx);
+}
+
+static int sh_pmu_event_init(struct perf_event *event)
+{
+ int err;
+
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HW_CACHE:
+ case PERF_TYPE_HARDWARE:
+ err = __hw_perf_event_init(event);
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ if (unlikely(err)) {
+ if (event->destroy)
+ event->destroy(event);
+ }
+
+ return err;
+}
+
+static void sh_pmu_enable(struct pmu *pmu)
+{
+ if (!sh_pmu_initialized())
+ return;
+
+ sh_pmu->enable_all();
+}
+
+static void sh_pmu_disable(struct pmu *pmu)
+{
+ if (!sh_pmu_initialized())
+ return;
+
+ sh_pmu->disable_all();
+}
+
+static struct pmu pmu = {
+ .pmu_enable = sh_pmu_enable,
+ .pmu_disable = sh_pmu_disable,
+ .event_init = sh_pmu_event_init,
+ .add = sh_pmu_add,
+ .del = sh_pmu_del,
+ .start = sh_pmu_start,
+ .stop = sh_pmu_stop,
+ .read = sh_pmu_read,
+};
+
+static void sh_pmu_setup(int cpu)
+{
+ struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
+
+ memset(cpuhw, 0, sizeof(struct cpu_hw_events));
+}
+
+static int
+sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ sh_pmu_setup(cpu);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+int register_sh_pmu(struct sh_pmu *_pmu)
+{
+ if (sh_pmu)
+ return -EBUSY;
+ sh_pmu = _pmu;
+
+ pr_info("Performance Events: %s support registered\n", _pmu->name);
+
+ WARN_ON(_pmu->num_events > MAX_HWEVENTS);
+
+ perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+ perf_cpu_notifier(sh_pmu_notifier);
+ return 0;
+}
diff --git a/arch/sh/kernel/pm.c b/arch/sh/kernel/pm.c
deleted file mode 100644
index 10ab62c9aed..00000000000
--- a/arch/sh/kernel/pm.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Generic Power Management Routine
- *
- * Copyright (c) 2006 Andriy Skulysh <askulsyh@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License.
- */
-#include <linux/suspend.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <asm/freq.h>
-#include <asm/io.h>
-#include <asm/watchdog.h>
-#include <asm/pm.h>
-
-#define INTR_OFFSET 0x600
-
-#define STBCR 0xffffff82
-#define STBCR2 0xffffff88
-
-#define STBCR_STBY 0x80
-#define STBCR_MSTP2 0x04
-
-#define MCR 0xffffff68
-#define RTCNT 0xffffff70
-
-#define MCR_RMODE 2
-#define MCR_RFSH 4
-
-void pm_enter(void)
-{
- u8 stbcr, csr;
- u16 frqcr, mcr;
- u32 vbr_new, vbr_old;
-
- set_bl_bit();
-
- /* set wdt */
- csr = sh_wdt_read_csr();
- csr &= ~WTCSR_TME;
- csr |= WTCSR_CKS_4096;
- sh_wdt_write_csr(csr);
- csr = sh_wdt_read_csr();
- sh_wdt_write_cnt(0);
-
- /* disable PLL1 */
- frqcr = ctrl_inw(FRQCR);
- frqcr &= ~(FRQCR_PLLEN | FRQCR_PSTBY);
- ctrl_outw(frqcr, FRQCR);
-
- /* enable standby */
- stbcr = ctrl_inb(STBCR);
- ctrl_outb(stbcr | STBCR_STBY | STBCR_MSTP2, STBCR);
-
- /* set self-refresh */
- mcr = ctrl_inw(MCR);
- ctrl_outw(mcr & ~MCR_RFSH, MCR);
-
- /* set interrupt handler */
- asm volatile("stc vbr, %0" : "=r" (vbr_old));
- vbr_new = get_zeroed_page(GFP_ATOMIC);
- udelay(50);
- memcpy((void*)(vbr_new + INTR_OFFSET),
- &wakeup_start, &wakeup_end - &wakeup_start);
- asm volatile("ldc %0, vbr" : : "r" (vbr_new));
-
- ctrl_outw(0, RTCNT);
- ctrl_outw(mcr | MCR_RFSH | MCR_RMODE, MCR);
-
- cpu_sleep();
-
- asm volatile("ldc %0, vbr" : : "r" (vbr_old));
-
- free_page(vbr_new);
-
- /* enable PLL1 */
- frqcr = ctrl_inw(FRQCR);
- frqcr |= FRQCR_PSTBY;
- ctrl_outw(frqcr, FRQCR);
- udelay(50);
- frqcr |= FRQCR_PLLEN;
- ctrl_outw(frqcr, FRQCR);
-
- ctrl_outb(stbcr, STBCR);
-
- clear_bl_bit();
-}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
new file mode 100644
index 00000000000..53bc6c4c84e
--- /dev/null
+++ b/arch/sh/kernel/process.c
@@ -0,0 +1,76 @@
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/export.h>
+#include <linux/stackprotector.h>
+#include <asm/fpu.h>
+
+struct kmem_cache *task_xstate_cachep = NULL;
+unsigned int xstate_size;
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+/*
+ * this gets called so that we can store lazy state into memory and copy the
+ * current task into the new thread.
+ */
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+#ifdef CONFIG_SUPERH32
+ unlazy_fpu(src, task_pt_regs(src));
+#endif
+ *dst = *src;
+
+ if (src->thread.xstate) {
+ dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
+ GFP_KERNEL);
+ if (!dst->thread.xstate)
+ return -ENOMEM;
+ memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
+ }
+
+ return 0;
+}
+
+void free_thread_xstate(struct task_struct *tsk)
+{
+ if (tsk->thread.xstate) {
+ kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
+ tsk->thread.xstate = NULL;
+ }
+}
+
+void arch_release_task_struct(struct task_struct *tsk)
+{
+ free_thread_xstate(tsk);
+}
+
+void arch_task_cache_init(void)
+{
+ if (!xstate_size)
+ return;
+
+ task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
+ __alignof__(union thread_xstate),
+ SLAB_PANIC | SLAB_NOTRACK, NULL);
+}
+
+#ifdef CONFIG_SH_FPU_EMU
+# define HAVE_SOFTFP 1
+#else
+# define HAVE_SOFTFP 0
+#endif
+
+void init_thread_xstate(void)
+{
+ if (boot_cpu_data.flags & CPU_HAS_FPU)
+ xstate_size = sizeof(struct sh_fpu_hard_struct);
+ else if (HAVE_SOFTFP)
+ xstate_size = sizeof(struct sh_fpu_soft_struct);
+ else
+ xstate_size = 0;
+}
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index b98e37a1f54..2885fc9d9dc 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -7,131 +7,43 @@
*
* SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2008 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/elfcore.h>
-#include <linux/pm.h>
#include <linux/kallsyms.h>
-#include <linux/kexec.h>
-#include <linux/kdebug.h>
-#include <linux/tick.h>
-#include <linux/reboot.h>
#include <linux/fs.h>
-#include <linux/preempt.h>
+#include <linux/ftrace.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/prefetch.h>
+#include <linux/stackprotector.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
-#include <asm/pgalloc.h>
-#include <asm/system.h>
-#include <asm/ubc.h>
#include <asm/fpu.h>
-
-static int hlt_counter;
-int ubc_usercnt = 0;
-
-void (*pm_idle)(void);
-void (*pm_power_off)(void);
-EXPORT_SYMBOL(pm_power_off);
-
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-EXPORT_SYMBOL(enable_hlt);
-
-static int __init nohlt_setup(char *__unused)
-{
- hlt_counter = 1;
- return 1;
-}
-__setup("nohlt", nohlt_setup);
-
-static int __init hlt_setup(char *__unused)
-{
- hlt_counter = 0;
- return 1;
-}
-__setup("hlt", hlt_setup);
-
-void default_idle(void)
-{
- if (!hlt_counter) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb__after_clear_bit();
- set_bl_bit();
- while (!need_resched())
- cpu_sleep();
- clear_bl_bit();
- set_thread_flag(TIF_POLLING_NRFLAG);
- } else
- while (!need_resched())
- cpu_relax();
-}
-
-void cpu_idle(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- /* endless idle loop with no priority at all */
- while (1) {
- void (*idle)(void) = pm_idle;
-
- if (!idle)
- idle = default_idle;
-
- tick_nohz_stop_sched_tick();
- while (!need_resched())
- idle();
- tick_nohz_restart_sched_tick();
-
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- check_pgt_cache();
- }
-}
-
-void machine_restart(char * __unused)
-{
- /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
- asm volatile("ldc %0, sr\n\t"
- "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
-}
-
-void machine_halt(void)
-{
- local_irq_disable();
-
- while (1)
- cpu_sleep();
-}
-
-void machine_power_off(void)
-{
- if (pm_power_off)
- pm_power_off();
-}
+#include <asm/syscalls.h>
+#include <asm/switch_to.h>
void show_regs(struct pt_regs * regs)
{
printk("\n");
- printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm);
+ show_regs_print_info(KERN_DEFAULT);
+
print_symbol("PC is at %s\n", instruction_pointer(regs));
+ print_symbol("PR is at %s\n", regs->pr);
+
printk("PC : %08lx SP : %08lx SR : %08lx ",
regs->pc, regs->regs[15], regs->sr);
#ifdef CONFIG_MMU
- printk("TEA : %08x ", ctrl_inl(MMU_TEA));
+ printk("TEA : %08x\n", __raw_readl(MMU_TEA));
#else
- printk(" ");
+ printk("\n");
#endif
- printk("%s\n", print_tainted());
printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
regs->regs[0],regs->regs[1],
@@ -149,59 +61,35 @@ void show_regs(struct pt_regs * regs)
regs->mach, regs->macl, regs->gbr, regs->pr);
show_trace(NULL, (unsigned long *)regs->regs[15], regs);
+ show_code(regs);
}
-/*
- * Create a kernel thread
- */
-
-/*
- * This is the mechanism for creating a new kernel thread.
- *
- */
-extern void kernel_thread_helper(void);
-__asm__(".align 5\n"
- "kernel_thread_helper:\n\t"
- "jsr @r5\n\t"
- " nop\n\t"
- "mov.l 1f, r1\n\t"
- "jsr @r1\n\t"
- " mov r0, r4\n\t"
- ".align 2\n\t"
- "1:.long do_exit");
-
-/* Don't use this in BL=1(cli). Or else, CPU resets! */
-int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+void start_thread(struct pt_regs *regs, unsigned long new_pc,
+ unsigned long new_sp)
{
- struct pt_regs regs;
-
- memset(&regs, 0, sizeof(regs));
- regs.regs[4] = (unsigned long)arg;
- regs.regs[5] = (unsigned long)fn;
-
- regs.pc = (unsigned long)kernel_thread_helper;
- regs.sr = (1 << 30);
+ regs->pr = 0;
+ regs->sr = SR_FD;
+ regs->pc = new_pc;
+ regs->regs[15] = new_sp;
- /* Ok, create the new process.. */
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
- &regs, 0, NULL, NULL);
+ free_thread_xstate(current);
}
+EXPORT_SYMBOL(start_thread);
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
- if (current->thread.ubc_pc) {
- current->thread.ubc_pc = 0;
- ubc_usercnt -= 1;
- }
}
void flush_thread(void)
{
-#if defined(CONFIG_SH_FPU)
struct task_struct *tsk = current;
+
+ flush_ptrace_hw_breakpoint(tsk);
+
+#if defined(CONFIG_SH_FPU)
/* Forget lazy FPU state */
clear_fpu(tsk, task_pt_regs(tsk));
clear_used_math();
@@ -222,107 +110,88 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
struct task_struct *tsk = current;
fpvalid = !!tsk_used_math(tsk);
- if (fpvalid) {
- unlazy_fpu(tsk, regs);
- memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
- }
+ if (fpvalid)
+ fpvalid = !fpregs_get(tsk, NULL, 0,
+ sizeof(struct user_fpu_struct),
+ fpu, NULL);
#endif
return fpvalid;
}
+EXPORT_SYMBOL(dump_fpu);
asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
- unsigned long unused,
- struct task_struct *p, struct pt_regs *regs)
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long arg, struct task_struct *p)
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs;
-#if defined(CONFIG_SH_FPU)
+
+#if defined(CONFIG_SH_DSP)
struct task_struct *tsk = current;
- unlazy_fpu(tsk, regs);
- p->thread.fpu = tsk->thread.fpu;
- copy_to_stopped_child_used_math(p);
+ if (is_dsp_enabled(tsk)) {
+ /* We can use the __save_dsp or just copy the struct:
+ * __save_dsp(p);
+ * p->thread.dsp_status.status |= SR_DSP
+ */
+ p->thread.dsp_status = tsk->thread.dsp_status;
+ }
#endif
- childregs = task_pt_regs(p);
- *childregs = *regs;
+ memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
- if (user_mode(regs)) {
- childregs->regs[15] = usp;
- ti->addr_limit = USER_DS;
- } else {
- childregs->regs[15] = (unsigned long)childregs;
+ childregs = task_pt_regs(p);
+ p->thread.sp = (unsigned long) childregs;
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(childregs, 0, sizeof(struct pt_regs));
+ p->thread.pc = (unsigned long) ret_from_kernel_thread;
+ childregs->regs[4] = arg;
+ childregs->regs[5] = usp;
+ childregs->sr = SR_MD;
+#if defined(CONFIG_SH_FPU)
+ childregs->sr |= SR_FD;
+#endif
ti->addr_limit = KERNEL_DS;
+ ti->status &= ~TS_USEDFPU;
+ p->thread.fpu_counter = 0;
+ return 0;
}
+ *childregs = *current_pt_regs();
+
+ if (usp)
+ childregs->regs[15] = usp;
+ ti->addr_limit = USER_DS;
if (clone_flags & CLONE_SETTLS)
childregs->gbr = childregs->regs[0];
childregs->regs[0] = 0; /* Set return value for child */
-
- p->thread.sp = (unsigned long) childregs;
p->thread.pc = (unsigned long) ret_from_fork;
-
- p->thread.ubc_pc = 0;
-
return 0;
}
-/* Tracing by user break controller. */
-static void ubc_set_tracing(int asid, unsigned long pc)
-{
-#if defined(CONFIG_CPU_SH4A)
- unsigned long val;
-
- val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
- val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
-
- ctrl_outl(val, UBC_CBR0);
- ctrl_outl(pc, UBC_CAR0);
- ctrl_outl(0x0, UBC_CAMR0);
- ctrl_outl(0x0, UBC_CBCR);
-
- val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
- ctrl_outl(val, UBC_CRR0);
-
- /* Read UBC register that we wrote last, for checking update */
- val = ctrl_inl(UBC_CRR0);
-
-#else /* CONFIG_CPU_SH4A */
- ctrl_outl(pc, UBC_BARA);
-
-#ifdef CONFIG_MMU
- ctrl_outb(asid, UBC_BASRA);
-#endif
-
- ctrl_outl(0, UBC_BAMRA);
-
- if (current_cpu_data.type == CPU_SH7729 ||
- current_cpu_data.type == CPU_SH7710 ||
- current_cpu_data.type == CPU_SH7712) {
- ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
- ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
- } else {
- ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
- ctrl_outw(BRCR_PCBA, UBC_BRCR);
- }
-#endif /* CONFIG_CPU_SH4A */
-}
-
/*
* switch_to(x,y) should switch tasks from x to y.
*
*/
-struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *next)
+__notrace_funcgraph struct task_struct *
+__switch_to(struct task_struct *prev, struct task_struct *next)
{
-#if defined(CONFIG_SH_FPU)
- unlazy_fpu(prev, task_pt_regs(prev));
+ struct thread_struct *next_t = &next->thread;
+
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ __stack_chk_guard = next->stack_canary;
#endif
+ unlazy_fpu(prev, task_pt_regs(prev));
+
+ /* we're going to use this soon, after a few expensive things */
+ if (next->thread.fpu_counter > 5)
+ prefetch(next_t->xstate);
+
#ifdef CONFIG_MMU
/*
* Restore the kernel mode register
@@ -333,100 +202,17 @@ struct task_struct *__switch_to(struct task_struct *prev,
: "r" (task_thread_info(next)));
#endif
- /* If no tasks are using the UBC, we're done */
- if (ubc_usercnt == 0)
- /* If no tasks are using the UBC, we're done */;
- else if (next->thread.ubc_pc && next->mm) {
- int asid = 0;
-#ifdef CONFIG_MMU
- asid |= cpu_asid(smp_processor_id(), next->mm);
-#endif
- ubc_set_tracing(asid, next->thread.ubc_pc);
- } else {
-#if defined(CONFIG_CPU_SH4A)
- ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
- ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
-#else
- ctrl_outw(0, UBC_BBRA);
- ctrl_outw(0, UBC_BBRB);
-#endif
- }
+ /*
+ * If the task has used fpu the last 5 timeslices, just do a full
+ * restore of the math state immediately to avoid the trap; the
+ * chances of needing FPU soon are obviously high now
+ */
+ if (next->thread.fpu_counter > 5)
+ __fpu_state_restore();
return prev;
}
-asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
-#ifdef CONFIG_MMU
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
-#else
- /* fork almost works, enough to trick you into looking elsewhere :-( */
- return -EINVAL;
-#endif
-}
-
-asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
- unsigned long parent_tidptr,
- unsigned long child_tidptr,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- if (!newsp)
- newsp = regs->regs[15];
- return do_fork(clone_flags, newsp, regs, 0,
- (int __user *)parent_tidptr,
- (int __user *)child_tidptr);
-}
-
-/*
- * This is trivial, and on the face of it looks like it
- * could equally well be done in user mode.
- *
- * Not so, for quite unobvious reasons - register pressure.
- * In user mode vfork() cannot have a stack frame, and if
- * done by calling the "clone()" system call directly, you
- * do not have enough call-clobbered registers to hold all
- * the information you need.
- */
-asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
- 0, NULL, NULL);
-}
-
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
- char __user * __user *uenvp, unsigned long r7,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- int error;
- char *filename;
-
- filename = getname(ufilename);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
-
- error = do_execve(filename, uargv, uenvp, regs);
- if (error == 0) {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
- }
- putname(filename);
-out:
- return error;
-}
-
unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc;
@@ -448,19 +234,3 @@ unsigned long get_wchan(struct task_struct *p)
return pc;
}
-
-asmlinkage void break_point_trap(void)
-{
- /* Clear tracing. */
-#if defined(CONFIG_CPU_SH4A)
- ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
- ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
-#else
- ctrl_outw(0, UBC_BBRA);
- ctrl_outw(0, UBC_BBRB);
-#endif
- current->thread.ubc_pc = 0;
- ubc_usercnt -= 1;
-
- force_sig(SIGTRAP, current);
-}
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 046999b1d1a..e2062e64334 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -21,114 +21,26 @@
#include <linux/fs.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/proc_fs.h>
#include <linux/io.h>
+#include <asm/syscalls.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
+#include <asm/fpu.h>
+#include <asm/switch_to.h>
struct task_struct *last_task_used_math = NULL;
+struct pt_regs fake_swapper_regs = { 0, };
-static int hlt_counter = 1;
-
-#define HARD_IDLE_TIMEOUT (HZ / 3)
-
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-
-static int __init nohlt_setup(char *__unused)
-{
- hlt_counter = 1;
- return 1;
-}
-
-static int __init hlt_setup(char *__unused)
-{
- hlt_counter = 0;
- return 1;
-}
-
-__setup("nohlt", nohlt_setup);
-__setup("hlt", hlt_setup);
-
-static inline void hlt(void)
-{
- __asm__ __volatile__ ("sleep" : : : "memory");
-}
-
-/*
- * The idle loop on a uniprocessor SH..
- */
-void cpu_idle(void)
-{
- /* endless idle loop with no priority at all */
- while (1) {
- if (hlt_counter) {
- while (!need_resched())
- cpu_relax();
- } else {
- local_irq_disable();
- while (!need_resched()) {
- local_irq_enable();
- hlt();
- local_irq_disable();
- }
- local_irq_enable();
- }
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
-
-}
-
-void machine_restart(char * __unused)
-{
- extern void phys_stext(void);
-
- phys_stext();
-}
-
-void machine_halt(void)
-{
- for (;;);
-}
-
-void machine_power_off(void)
-{
-#if 0
- /* Disable watchdog timer */
- ctrl_outl(0xa5000000, WTCSR);
- /* Configure deep standby on sleep */
- ctrl_outl(0x03, STBCR);
-#endif
-
- __asm__ __volatile__ (
- "sleep\n\t"
- "synci\n\t"
- "nop;nop;nop;nop\n\t"
- );
-
- panic("Unexpected wakeup!\n");
-}
-
-void (*pm_power_off)(void) = machine_power_off;
-EXPORT_SYMBOL(pm_power_off);
-
-void show_regs(struct pt_regs * regs)
+void show_regs(struct pt_regs *regs)
{
unsigned long long ah, al, bh, bl, ch, cl;
printk("\n");
+ show_regs_print_info(KERN_DEFAULT);
ah = (regs->pc) >> 32;
al = (regs->pc) & 0xffffffff;
@@ -373,49 +285,6 @@ void show_regs(struct pt_regs * regs)
}
}
-struct task_struct * alloc_task_struct(void)
-{
- /* Get task descriptor pages */
- return (struct task_struct *)
- __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
-}
-
-void free_task_struct(struct task_struct *p)
-{
- free_pages((unsigned long) p, get_order(THREAD_SIZE));
-}
-
-/*
- * Create a kernel thread
- */
-ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
-{
- do_exit(fn(arg));
-}
-
-/*
- * This is the mechanism for creating a new kernel thread.
- *
- * NOTE! Only a kernel-only process(ie the swapper or direct descendants
- * who haven't done an "execve()") should use this: it will work within
- * a system call from a "real" process, but the process memory space will
- * not be freed until both the parent and the child have exited.
- */
-int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
-{
- struct pt_regs regs;
-
- memset(&regs, 0, sizeof(regs));
- regs.regs[2] = (unsigned long)arg;
- regs.regs[3] = (unsigned long)fn;
-
- regs.pc = (unsigned long)kernel_thread_helper;
- regs.sr = (1 << 30);
-
- return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
- &regs, 0, NULL, NULL);
-}
-
/*
* Free current thread data structures etc..
*/
@@ -447,7 +316,7 @@ void exit_thread(void)
void flush_thread(void)
{
- /* Called by fs/exec.c (flush_old_exec) to remove traces of a
+ /* Called by fs/exec.c (setup_new_exec) to remove traces of a
* previously running executable. */
#ifdef CONFIG_SH_FPU
if (last_task_used_math == current) {
@@ -483,13 +352,13 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
if (fpvalid) {
if (current == last_task_used_math) {
enable_fpu();
- save_fpu(tsk, regs);
+ save_fpu(tsk);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
- memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+ memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
}
return fpvalid;
@@ -497,132 +366,59 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
return 0; /* Task didn't use the fpu at all. */
#endif
}
+EXPORT_SYMBOL(dump_fpu);
asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
-int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
- unsigned long unused,
- struct task_struct *p, struct pt_regs *regs)
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+ unsigned long arg, struct task_struct *p)
{
struct pt_regs *childregs;
- unsigned long long se; /* Sign extension */
#ifdef CONFIG_SH_FPU
- if(last_task_used_math == current) {
+ /* can't happen for a kernel thread */
+ if (last_task_used_math == current) {
enable_fpu();
- save_fpu(current, regs);
+ save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
- regs->sr |= SR_FD;
+ current_pt_regs()->sr |= SR_FD;
}
#endif
/* Copy from sh version */
childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
+ p->thread.sp = (unsigned long) childregs;
- *childregs = *regs;
-
- if (user_mode(regs)) {
- childregs->regs[15] = usp;
- p->thread.uregs = childregs;
- } else {
- childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childregs->regs[2] = (unsigned long)arg;
+ childregs->regs[3] = (unsigned long)usp;
+ childregs->sr = (1 << 30); /* not user_mode */
+ childregs->sr |= SR_FD; /* Invalidate FPU flag */
+ p->thread.pc = (unsigned long) ret_from_kernel_thread;
+ return 0;
}
-
- childregs->regs[9] = 0; /* Set return value for child */
- childregs->sr |= SR_FD; /* Invalidate FPU flag */
-
- p->thread.sp = (unsigned long) childregs;
- p->thread.pc = (unsigned long) ret_from_fork;
+ *childregs = *current_pt_regs();
/*
* Sign extend the edited stack.
- * Note that thread.pc and thread.pc will stay
+ * Note that thread.pc and thread.pc will stay
* 32-bit wide and context switch must take care
* of NEFF sign extension.
*/
+ if (usp)
+ childregs->regs[15] = neff_sign_extend(usp);
+ p->thread.uregs = childregs;
- se = childregs->regs[15];
- se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
- childregs->regs[15] = se;
-
- return 0;
-}
-
-asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
- unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs *pregs)
-{
- return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
-}
-
-asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
- unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs *pregs)
-{
- if (!newsp)
- newsp = pregs->regs[15];
- return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
-}
+ childregs->regs[9] = 0; /* Set return value for child */
+ childregs->sr |= SR_FD; /* Invalidate FPU flag */
-/*
- * This is trivial, and on the face of it looks like it
- * could equally well be done in user mode.
- *
- * Not so, for quite unobvious reasons - register pressure.
- * In user mode vfork() cannot have a stack frame, and if
- * done by calling the "clone()" system call directly, you
- * do not have enough call-clobbered registers to hold all
- * the information you need.
- */
-asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
- unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs *pregs)
-{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
-}
+ p->thread.pc = (unsigned long) ret_from_fork;
-/*
- * sys_execve() executes a new program.
- */
-asmlinkage int sys_execve(char *ufilename, char **uargv,
- char **uenvp, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs *pregs)
-{
- int error;
- char *filename;
-
- lock_kernel();
- filename = getname((char __user *)ufilename);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
-
- error = do_execve(filename,
- (char __user * __user *)uargv,
- (char __user * __user *)uenvp,
- pregs);
- if (error == 0) {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
- }
- putname(filename);
-out:
- unlock_kernel();
- return error;
+ return 0;
}
-/*
- * These bracket the sleeping functions..
- */
-extern void interruptible_sleep_on(wait_queue_head_t *q);
-
-#define mid_sched ((unsigned long) interruptible_sleep_on)
-
#ifdef CONFIG_FRAME_POINTER
static int in_sh64_switch_to(unsigned long pc)
{
@@ -664,41 +460,3 @@ unsigned long get_wchan(struct task_struct *p)
#endif
return pc;
}
-
-/* Provide a /proc/asids file that lists out the
- ASIDs currently associated with the processes. (If the DM.PC register is
- examined through the debug link, this shows ASID + PC. To make use of this,
- the PID->ASID relationship needs to be known. This is primarily for
- debugging.)
- */
-
-#if defined(CONFIG_SH64_PROC_ASIDS)
-static int
-asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
-{
- int len=0;
- struct task_struct *p;
- read_lock(&tasklist_lock);
- for_each_process(p) {
- int pid = p->pid;
-
- if (!pid)
- continue;
- if (p->mm)
- len += sprintf(buf+len, "%5d : %02lx\n", pid,
- asid_cache(smp_processor_id()));
- else
- len += sprintf(buf+len, "%5d : (none)\n", pid);
- }
- read_unlock(&tasklist_lock);
- *eof = 1;
- return len;
-}
-
-static int __init register_proc_asids(void)
-{
- create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
- return 0;
-}
-__initcall(register_proc_asids);
-#endif
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
new file mode 100644
index 00000000000..0a05983633c
--- /dev/null
+++ b/arch/sh/kernel/ptrace.c
@@ -0,0 +1,33 @@
+#include <linux/ptrace.h>
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset: the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index fddb547f3c2..668c81631c0 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -1,12 +1,14 @@
/*
- * linux/arch/sh/kernel/ptrace.c
+ * SuperH process tracing
*
- * Original x86 implementation:
- * By Ross Biro 1/23/92
- * edited by Linus Torvalds
+ * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
+ * Copyright (C) 2002 - 2009 Paul Mundt
*
- * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
- * Audit support: Yuichi Nakamura <ynakam@hitachisoft.jp>
+ * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -15,21 +17,24 @@
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
-#include <linux/slab.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/io.h>
#include <linux/audit.h>
+#include <linux/seccomp.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/regset.h>
+#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
+#include <asm/syscalls.h>
+#include <asm/fpu.h>
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
/*
* This routine will get a word off of the process kernel stack.
@@ -57,21 +62,67 @@ static inline int put_stack_long(struct task_struct *task, int offset,
return 0;
}
-static void ptrace_disable_singlestep(struct task_struct *child)
+void ptrace_triggered(struct perf_event *bp,
+ struct perf_sample_data *data, struct pt_regs *regs)
{
- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+ struct perf_event_attr attr;
/*
- * Ensure the UBC is not programmed at the next context switch.
- *
- * Normally this is not needed but there are sequences such as
- * singlestep, signal delivery, and continue that leave the
- * ubc_pc non-zero leading to spurious SIGTRAPs.
+ * Disable the breakpoint request here since ptrace has defined a
+ * one-shot behaviour for breakpoint exceptions.
*/
- if (child->thread.ubc_pc != 0) {
- ubc_usercnt -= 1;
- child->thread.ubc_pc = 0;
+ attr = bp->attr;
+ attr.disabled = true;
+ modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int set_single_step(struct task_struct *tsk, unsigned long addr)
+{
+ struct thread_struct *thread = &tsk->thread;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+
+ bp = thread->ptrace_bps[0];
+ if (!bp) {
+ ptrace_breakpoint_init(&attr);
+
+ attr.bp_addr = addr;
+ attr.bp_len = HW_BREAKPOINT_LEN_2;
+ attr.bp_type = HW_BREAKPOINT_R;
+
+ bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
+ NULL, tsk);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ thread->ptrace_bps[0] = bp;
+ } else {
+ int err;
+
+ attr = bp->attr;
+ attr.bp_addr = addr;
+ /* reenable breakpoint */
+ attr.disabled = false;
+ err = modify_user_hw_breakpoint(bp, &attr);
+ if (unlikely(err))
+ return err;
}
+
+ return 0;
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+ unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
+
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+
+ set_single_step(child, pc);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/*
@@ -81,21 +132,248 @@ static void ptrace_disable_singlestep(struct task_struct *child)
*/
void ptrace_disable(struct task_struct *child)
{
- ptrace_disable_singlestep(child);
+ user_disable_single_step(child);
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
{
- struct user * dummy = NULL;
+ const struct pt_regs *regs = task_pt_regs(target);
int ret;
- switch (request) {
- /* when I and D space are separate, these will need to be fixed. */
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA:
- ret = generic_ptrace_peekdata(child, addr, data);
- break;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->regs,
+ 0, 16 * sizeof(unsigned long));
+ if (!ret)
+ /* PC, PR, SR, GBR, MACH, MACL, TRA */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ offsetof(struct pt_regs, pc),
+ sizeof(struct pt_regs));
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_regs), -1);
+
+ return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->regs,
+ 0, 16 * sizeof(unsigned long));
+ if (!ret && count > 0)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ offsetof(struct pt_regs, pc),
+ sizeof(struct pt_regs));
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_regs), -1);
+
+ return ret;
+}
+
+#ifdef CONFIG_SH_FPU
+int fpregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ if ((boot_cpu_data.flags & CPU_HAS_FPU))
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->hardfpu, 0, -1);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->softfpu, 0, -1);
+}
+
+static int fpregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ set_stopped_child_used_math(target);
+
+ if ((boot_cpu_data.flags & CPU_HAS_FPU))
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->hardfpu, 0, -1);
+
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->softfpu, 0, -1);
+}
+
+static int fpregs_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ return tsk_used_math(target) ? regset->n : 0;
+}
+#endif
+
+#ifdef CONFIG_SH_DSP
+static int dspregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_dspregs *regs =
+ (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
+ int ret;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
+ 0, sizeof(struct pt_dspregs));
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_dspregs), -1);
+ return ret;
+}
+
+static int dspregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_dspregs *regs =
+ (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
+ 0, sizeof(struct pt_dspregs));
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_dspregs), -1);
+
+ return ret;
+}
+
+static int dspregs_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+
+ return regs->sr & SR_DSP ? regset->n : 0;
+}
+#endif
+
+const struct pt_regs_offset regoffset_table[] = {
+ REGS_OFFSET_NAME(0),
+ REGS_OFFSET_NAME(1),
+ REGS_OFFSET_NAME(2),
+ REGS_OFFSET_NAME(3),
+ REGS_OFFSET_NAME(4),
+ REGS_OFFSET_NAME(5),
+ REGS_OFFSET_NAME(6),
+ REGS_OFFSET_NAME(7),
+ REGS_OFFSET_NAME(8),
+ REGS_OFFSET_NAME(9),
+ REGS_OFFSET_NAME(10),
+ REGS_OFFSET_NAME(11),
+ REGS_OFFSET_NAME(12),
+ REGS_OFFSET_NAME(13),
+ REGS_OFFSET_NAME(14),
+ REGS_OFFSET_NAME(15),
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(pr),
+ REG_OFFSET_NAME(sr),
+ REG_OFFSET_NAME(gbr),
+ REG_OFFSET_NAME(mach),
+ REG_OFFSET_NAME(macl),
+ REG_OFFSET_NAME(tra),
+ REG_OFFSET_END,
+};
+
+/*
+ * These are our native regset flavours.
+ */
+enum sh_regset {
+ REGSET_GENERAL,
+#ifdef CONFIG_SH_FPU
+ REGSET_FPU,
+#endif
+#ifdef CONFIG_SH_DSP
+ REGSET_DSP,
+#endif
+};
+
+static const struct user_regset sh_regsets[] = {
+ /*
+ * Format is:
+ * R0 --> R15
+ * PC, PR, SR, GBR, MACH, MACL, TRA
+ */
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = genregs_get,
+ .set = genregs_set,
+ },
+
+#ifdef CONFIG_SH_FPU
+ [REGSET_FPU] = {
+ .core_note_type = NT_PRFPREG,
+ .n = sizeof(struct user_fpu_struct) / sizeof(long),
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = fpregs_get,
+ .set = fpregs_set,
+ .active = fpregs_active,
+ },
+#endif
+
+#ifdef CONFIG_SH_DSP
+ [REGSET_DSP] = {
+ .n = sizeof(struct pt_dspregs) / sizeof(long),
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = dspregs_get,
+ .set = dspregs_set,
+ .active = dspregs_active,
+ },
+#endif
+};
+
+static const struct user_regset_view user_sh_native_view = {
+ .name = "sh",
+ .e_machine = EM_SH,
+ .regsets = sh_regsets,
+ .n = ARRAY_SIZE(sh_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_sh_native_view;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ unsigned long __user *datap = (unsigned long __user *)data;
+ int ret;
+
+ switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
@@ -107,30 +385,38 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
- else if (addr >= (long) &dummy->fpu &&
- addr < (long) &dummy->u_fpvalid) {
+ else if (addr >= offsetof(struct user, fpu) &&
+ addr < offsetof(struct user, u_fpvalid)) {
if (!tsk_used_math(child)) {
- if (addr == (long)&dummy->fpu.fpscr)
+ if (addr == offsetof(struct user, fpu.fpscr))
tmp = FPSCR_INIT;
else
tmp = 0;
- } else
- tmp = ((long *)&child->thread.fpu)
- [(addr - (long)&dummy->fpu) >> 2];
- } else if (addr == (long) &dummy->u_fpvalid)
+ } else {
+ unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
+ index = addr - offsetof(struct user, fpu);
+ tmp = ((unsigned long *)child->thread.xstate)
+ [index >> 2];
+ }
+ } else if (addr == offsetof(struct user, u_fpvalid))
tmp = !!tsk_used_math(child);
+ else if (addr == PT_TEXT_ADDR)
+ tmp = child->mm->start_code;
+ else if (addr == PT_DATA_ADDR)
+ tmp = child->mm->start_data;
+ else if (addr == PT_TEXT_END_ADDR)
+ tmp = child->mm->end_code;
+ else if (addr == PT_TEXT_LEN)
+ tmp = child->mm->end_code - child->mm->start_code;
else
tmp = 0;
- ret = put_user(tmp, (unsigned long __user *)data);
+ ret = put_user(tmp, datap);
break;
}
- /* when I and D space are separate, this will have to be fixed. */
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- ret = generic_ptrace_pokedata(child, addr, data);
- break;
-
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
if ((addr & 3) || addr < 0 ||
@@ -139,149 +425,111 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
if (addr < sizeof(struct pt_regs))
ret = put_stack_long(child, addr, data);
- else if (addr >= (long) &dummy->fpu &&
- addr < (long) &dummy->u_fpvalid) {
+ else if (addr >= offsetof(struct user, fpu) &&
+ addr < offsetof(struct user, u_fpvalid)) {
+ unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
+ index = addr - offsetof(struct user, fpu);
set_stopped_child_used_math(child);
- ((long *)&child->thread.fpu)
- [(addr - (long)&dummy->fpu) >> 2] = data;
+ ((unsigned long *)child->thread.xstate)
+ [index >> 2] = data;
ret = 0;
- } else if (addr == (long) &dummy->u_fpvalid) {
+ } else if (addr == offsetof(struct user, u_fpvalid)) {
conditional_stopped_child_used_math(data, child);
ret = 0;
}
break;
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT: { /* restart after signal. */
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-
- ptrace_disable_singlestep(child);
-
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
- break;
- }
-
-/*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL: {
- ret = 0;
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- ptrace_disable_singlestep(child);
- child->exit_code = SIGKILL;
- wake_up_process(child);
+ case PTRACE_GETREGS:
+ return copy_regset_to_user(child, &user_sh_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ datap);
+ case PTRACE_SETREGS:
+ return copy_regset_from_user(child, &user_sh_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ datap);
+#ifdef CONFIG_SH_FPU
+ case PTRACE_GETFPREGS:
+ return copy_regset_to_user(child, &user_sh_native_view,
+ REGSET_FPU,
+ 0, sizeof(struct user_fpu_struct),
+ datap);
+ case PTRACE_SETFPREGS:
+ return copy_regset_from_user(child, &user_sh_native_view,
+ REGSET_FPU,
+ 0, sizeof(struct user_fpu_struct),
+ datap);
+#endif
+#ifdef CONFIG_SH_DSP
+ case PTRACE_GETDSPREGS:
+ return copy_regset_to_user(child, &user_sh_native_view,
+ REGSET_DSP,
+ 0, sizeof(struct pt_dspregs),
+ datap);
+ case PTRACE_SETDSPREGS:
+ return copy_regset_from_user(child, &user_sh_native_view,
+ REGSET_DSP,
+ 0, sizeof(struct pt_dspregs),
+ datap);
+#endif
+ default:
+ ret = ptrace_request(child, request, addr, data);
break;
}
- case PTRACE_SINGLESTEP: { /* set the trap flag. */
- long pc;
- struct pt_regs *regs = NULL;
+ return ret;
+}
- ret = -EIO;
- if (!valid_signal(data))
- break;
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- if ((child->ptrace & PT_DTRACE) == 0) {
- /* Spurious delayed TF traps may occur */
- child->ptrace |= PT_DTRACE;
- }
+static inline int audit_arch(void)
+{
+ int arch = EM_SH;
- pc = get_stack_long(child, (long)&regs->pc);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ arch |= __AUDIT_ARCH_LE;
+#endif
- /* Next scheduling will set up UBC */
- if (child->thread.ubc_pc == 0)
- ubc_usercnt += 1;
- child->thread.ubc_pc = pc;
+ return arch;
+}
- set_tsk_thread_flag(child, TIF_SINGLESTEP);
- child->exit_code = data;
- /* give it a chance to run. */
- wake_up_process(child);
- ret = 0;
- break;
- }
+asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ long ret = 0;
-#ifdef CONFIG_SH_DSP
- case PTRACE_GETDSPREGS: {
- unsigned long dp;
+ secure_computing_strict(regs->regs[0]);
- ret = -EIO;
- dp = ((unsigned long) child) + THREAD_SIZE -
- sizeof(struct pt_dspregs);
- if (*((int *) (dp - 4)) == SR_FD) {
- copy_to_user((void *)addr, (void *) dp,
- sizeof(struct pt_dspregs));
- ret = 0;
- }
- break;
- }
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+ * Tracing decided this syscall should not happen.
+ * We'll return a bogus call number to get an ENOSYS
+ * error, but leave the original number in regs->regs[0].
+ */
+ ret = -1L;
- case PTRACE_SETDSPREGS: {
- unsigned long dp;
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[0]);
- ret = -EIO;
- dp = ((unsigned long) child) + THREAD_SIZE -
- sizeof(struct pt_dspregs);
- if (*((int *) (dp - 4)) == SR_FD) {
- copy_from_user((void *) dp, (void *)addr,
- sizeof(struct pt_dspregs));
- ret = 0;
- }
- break;
- }
-#endif
- default:
- ret = ptrace_request(child, request, addr, data);
- break;
- }
+ audit_syscall_entry(audit_arch(), regs->regs[3],
+ regs->regs[4], regs->regs[5],
+ regs->regs[6], regs->regs[7]);
- return ret;
+ return ret ?: regs->regs[0];
}
-asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
- struct task_struct *tsk = current;
-
- if (unlikely(current->audit_context) && entryexit)
- audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
- regs->regs[0]);
-
- if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
- !test_thread_flag(TIF_SINGLESTEP))
- goto out;
- if (!(tsk->ptrace & PT_PTRACED))
- goto out;
+ int step;
- /* the 0x80 provides a way for the tracing parent to distinguish
- between a syscall stop and SIGTRAP delivery */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
- !test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
-
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (tsk->exit_code) {
- send_sig(tsk->exit_code, tsk, 1);
- tsk->exit_code = 0;
- }
+ audit_syscall_exit(regs);
-out:
- if (unlikely(current->audit_context) && !entryexit)
- audit_syscall_entry(AUDIT_ARCH_SH, regs->regs[3],
- regs->regs[4], regs->regs[5],
- regs->regs[6], regs->regs[7]);
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->regs[0]);
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
}
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index f6fbdfa6876..af90339dadc 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -2,7 +2,7 @@
* arch/sh/kernel/ptrace_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
+ * Copyright (C) 2003 - 2008 Paul Mundt
*
* Started from SH3/4 version:
* SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
@@ -20,19 +20,28 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
+#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
+#include <linux/seccomp.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/regset.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
+#include <asm/syscalls.h>
+#include <asm/fpu.h>
+#include <asm/traps.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
/* This mask defines the bits of the SR which the user is not allowed to
change, which are everything except S, Q, M, PR, SZ, FR. */
@@ -73,13 +82,13 @@ get_fpu_long(struct task_struct *task, unsigned long addr)
if (last_task_used_math == task) {
enable_fpu();
- save_fpu(task, regs);
+ save_fpu(task);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
- tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
+ tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
return tmp;
}
@@ -105,32 +114,282 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
- fpinit(&task->thread.fpu.hard);
- set_stopped_child_used_math(task);
+ init_fpu(task);
} else if (last_task_used_math == task) {
enable_fpu();
- save_fpu(task, regs);
+ save_fpu(task);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
- ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
+ ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
return 0;
}
+void user_enable_single_step(struct task_struct *child)
+{
+ struct pt_regs *regs = child->thread.uregs;
+
+ regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
+
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+ struct pt_regs *regs = child->thread.uregs;
+
+ regs->sr &= ~SR_SSTEP;
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
{
+ const struct pt_regs *regs = task_pt_regs(target);
int ret;
- switch (request) {
- /* when I and D space are separate, these will need to be fixed. */
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA:
- ret = generic_ptrace_peekdata(child, addr, data);
- break;
+ /* PC, SR, SYSCALL */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ 0, 3 * sizeof(unsigned long long));
+
+ /* R1 -> R63 */
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->regs,
+ offsetof(struct pt_regs, regs[0]),
+ 63 * sizeof(unsigned long long));
+ /* TR0 -> TR7 */
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->tregs,
+ offsetof(struct pt_regs, tregs[0]),
+ 8 * sizeof(unsigned long long));
+
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_regs), -1);
+
+ return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* PC, SR, SYSCALL */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ 0, 3 * sizeof(unsigned long long));
+
+ /* R1 -> R63 */
+ if (!ret && count > 0)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->regs,
+ offsetof(struct pt_regs, regs[0]),
+ 63 * sizeof(unsigned long long));
+
+ /* TR0 -> TR7 */
+ if (!ret && count > 0)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->tregs,
+ offsetof(struct pt_regs, tregs[0]),
+ 8 * sizeof(unsigned long long));
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_regs), -1);
+
+ return ret;
+}
+
+#ifdef CONFIG_SH_FPU
+int fpregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->hardfpu, 0, -1);
+}
+
+static int fpregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ ret = init_fpu(target);
+ if (ret)
+ return ret;
+
+ set_stopped_child_used_math(target);
+
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.xstate->hardfpu, 0, -1);
+}
+
+static int fpregs_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ return tsk_used_math(target) ? regset->n : 0;
+}
+#endif
+
+const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(pc),
+ REG_OFFSET_NAME(sr),
+ REG_OFFSET_NAME(syscall_nr),
+ REGS_OFFSET_NAME(0),
+ REGS_OFFSET_NAME(1),
+ REGS_OFFSET_NAME(2),
+ REGS_OFFSET_NAME(3),
+ REGS_OFFSET_NAME(4),
+ REGS_OFFSET_NAME(5),
+ REGS_OFFSET_NAME(6),
+ REGS_OFFSET_NAME(7),
+ REGS_OFFSET_NAME(8),
+ REGS_OFFSET_NAME(9),
+ REGS_OFFSET_NAME(10),
+ REGS_OFFSET_NAME(11),
+ REGS_OFFSET_NAME(12),
+ REGS_OFFSET_NAME(13),
+ REGS_OFFSET_NAME(14),
+ REGS_OFFSET_NAME(15),
+ REGS_OFFSET_NAME(16),
+ REGS_OFFSET_NAME(17),
+ REGS_OFFSET_NAME(18),
+ REGS_OFFSET_NAME(19),
+ REGS_OFFSET_NAME(20),
+ REGS_OFFSET_NAME(21),
+ REGS_OFFSET_NAME(22),
+ REGS_OFFSET_NAME(23),
+ REGS_OFFSET_NAME(24),
+ REGS_OFFSET_NAME(25),
+ REGS_OFFSET_NAME(26),
+ REGS_OFFSET_NAME(27),
+ REGS_OFFSET_NAME(28),
+ REGS_OFFSET_NAME(29),
+ REGS_OFFSET_NAME(30),
+ REGS_OFFSET_NAME(31),
+ REGS_OFFSET_NAME(32),
+ REGS_OFFSET_NAME(33),
+ REGS_OFFSET_NAME(34),
+ REGS_OFFSET_NAME(35),
+ REGS_OFFSET_NAME(36),
+ REGS_OFFSET_NAME(37),
+ REGS_OFFSET_NAME(38),
+ REGS_OFFSET_NAME(39),
+ REGS_OFFSET_NAME(40),
+ REGS_OFFSET_NAME(41),
+ REGS_OFFSET_NAME(42),
+ REGS_OFFSET_NAME(43),
+ REGS_OFFSET_NAME(44),
+ REGS_OFFSET_NAME(45),
+ REGS_OFFSET_NAME(46),
+ REGS_OFFSET_NAME(47),
+ REGS_OFFSET_NAME(48),
+ REGS_OFFSET_NAME(49),
+ REGS_OFFSET_NAME(50),
+ REGS_OFFSET_NAME(51),
+ REGS_OFFSET_NAME(52),
+ REGS_OFFSET_NAME(53),
+ REGS_OFFSET_NAME(54),
+ REGS_OFFSET_NAME(55),
+ REGS_OFFSET_NAME(56),
+ REGS_OFFSET_NAME(57),
+ REGS_OFFSET_NAME(58),
+ REGS_OFFSET_NAME(59),
+ REGS_OFFSET_NAME(60),
+ REGS_OFFSET_NAME(61),
+ REGS_OFFSET_NAME(62),
+ REGS_OFFSET_NAME(63),
+ TREGS_OFFSET_NAME(0),
+ TREGS_OFFSET_NAME(1),
+ TREGS_OFFSET_NAME(2),
+ TREGS_OFFSET_NAME(3),
+ TREGS_OFFSET_NAME(4),
+ TREGS_OFFSET_NAME(5),
+ TREGS_OFFSET_NAME(6),
+ TREGS_OFFSET_NAME(7),
+ REG_OFFSET_END,
+};
+
+/*
+ * These are our native regset flavours.
+ */
+enum sh_regset {
+ REGSET_GENERAL,
+#ifdef CONFIG_SH_FPU
+ REGSET_FPU,
+#endif
+};
+
+static const struct user_regset sh_regsets[] = {
+ /*
+ * Format is:
+ * PC, SR, SYSCALL,
+ * R1 --> R63,
+ * TR0 --> TR7,
+ */
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(long long),
+ .align = sizeof(long long),
+ .get = genregs_get,
+ .set = genregs_set,
+ },
+
+#ifdef CONFIG_SH_FPU
+ [REGSET_FPU] = {
+ .core_note_type = NT_PRFPREG,
+ .n = sizeof(struct user_fpu_struct) /
+ sizeof(long long),
+ .size = sizeof(long long),
+ .align = sizeof(long long),
+ .get = fpregs_get,
+ .set = fpregs_set,
+ .active = fpregs_active,
+ },
+#endif
+};
+
+static const struct user_regset_view user_sh64_native_view = {
+ .name = "sh64",
+ .e_machine = EM_SH,
+ .regsets = sh_regsets,
+ .n = ARRAY_SIZE(sh_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_sh64_native_view;
+}
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
+
+ switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
@@ -143,22 +402,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = get_stack_long(child, addr);
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
- tmp = get_fpu_long(child, addr - offsetof(struct user, fpu));
+ unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
+ index = addr - offsetof(struct user, fpu);
+ tmp = get_fpu_long(child, index);
} else if (addr == offsetof(struct user, u_fpvalid)) {
tmp = !!tsk_used_math(child);
} else {
break;
}
- ret = put_user(tmp, (unsigned long *)data);
+ ret = put_user(tmp, datap);
break;
}
- /* when I and D space are separate, this will have to be fixed. */
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- ret = generic_ptrace_pokedata(child, addr, data);
- break;
-
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area. We must
disallow any changes to certain SR bits or u_fpvalid, since
@@ -186,76 +444,52 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
- ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data);
- }
- break;
-
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT: { /* restart after signal. */
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
- break;
- }
-
-/*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL: {
- ret = 0;
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- wake_up_process(child);
- break;
- }
-
- case PTRACE_SINGLESTEP: { /* set the trap flag. */
- struct pt_regs *regs;
-
- ret = -EIO;
- if (!valid_signal(data))
- break;
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- if ((child->ptrace & PT_DTRACE) == 0) {
- /* Spurious delayed TF traps may occur */
- child->ptrace |= PT_DTRACE;
+ unsigned long index;
+ ret = init_fpu(child);
+ if (ret)
+ break;
+ index = addr - offsetof(struct user, fpu);
+ ret = put_fpu_long(child, index, data);
}
-
- regs = child->thread.uregs;
-
- regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
-
- child->exit_code = data;
- /* give it a chance to run. */
- wake_up_process(child);
- ret = 0;
break;
- }
+ case PTRACE_GETREGS:
+ return copy_regset_to_user(child, &user_sh64_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ datap);
+ case PTRACE_SETREGS:
+ return copy_regset_from_user(child, &user_sh64_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ datap);
+#ifdef CONFIG_SH_FPU
+ case PTRACE_GETFPREGS:
+ return copy_regset_to_user(child, &user_sh64_native_view,
+ REGSET_FPU,
+ 0, sizeof(struct user_fpu_struct),
+ datap);
+ case PTRACE_SETFPREGS:
+ return copy_regset_from_user(child, &user_sh64_native_view,
+ REGSET_FPU,
+ 0, sizeof(struct user_fpu_struct),
+ datap);
+#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
+
return ret;
}
-asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
+asmlinkage int sh64_ptrace(long request, long pid,
+ unsigned long addr, unsigned long data)
{
#define WPC_DBRMODE 0x0d104008
- static int first_call = 1;
+ static unsigned long first_call;
- lock_kernel();
- if (first_call) {
+ if (!test_and_set_bit(0, &first_call)) {
/* Set WPC.DBRMODE to 0. This makes all debug events get
* delivered through RESVEC, i.e. into the handlers in entry.S.
* (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
@@ -265,45 +499,62 @@ asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
* the remote gdb.) */
printk("DBRMODE set to 0 to permit native debugging\n");
poke_real_address_q(WPC_DBRMODE, 0);
- first_call = 0;
}
- unlock_kernel();
return sys_ptrace(request, pid, addr, data);
}
-asmlinkage void syscall_trace(struct pt_regs *regs, int entryexit)
+static inline int audit_arch(void)
{
- struct task_struct *tsk = current;
+ int arch = EM_SH;
- if (unlikely(current->audit_context) && entryexit)
- audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
- regs->regs[9]);
+#ifdef CONFIG_64BIT
+ arch |= __AUDIT_ARCH_64BIT;
+#endif
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ arch |= __AUDIT_ARCH_LE;
+#endif
- if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
- !test_thread_flag(TIF_SINGLESTEP))
- goto out;
- if (!(tsk->ptrace & PT_PTRACED))
- goto out;
+ return arch;
+}
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) &&
- !test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
+asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ long long ret = 0;
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (tsk->exit_code) {
- send_sig(tsk->exit_code, tsk, 1);
- tsk->exit_code = 0;
- }
+ secure_computing_strict(regs->regs[9]);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+ * Tracing decided this syscall should not happen.
+ * We'll return a bogus call number to get an ENOSYS
+ * error, but leave the original number in regs->regs[0].
+ */
+ ret = -1LL;
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[9]);
-out:
- if (unlikely(current->audit_context) && !entryexit)
- audit_syscall_entry(AUDIT_ARCH_SH, regs->regs[1],
- regs->regs[2], regs->regs[3],
- regs->regs[4], regs->regs[5]);
+ audit_syscall_entry(audit_arch(), regs->regs[1],
+ regs->regs[2], regs->regs[3],
+ regs->regs[4], regs->regs[5]);
+
+ return ret ?: regs->regs[9];
+}
+
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->regs[9]);
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
}
/* Called with interrupts disabled */
@@ -320,9 +571,10 @@ asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
}
/* Called with interrupts disabled */
-asmlinkage void do_software_break_point(unsigned long long vec,
- struct pt_regs *regs)
+BUILD_TRAP_HANDLER(breakpoint)
{
+ TRAP_HANDLER_DECL;
+
/* We need to forward step the PC, to counteract the backstep done
in signal.c. */
local_irq_enable();
@@ -337,5 +589,5 @@ asmlinkage void do_software_break_point(unsigned long long vec,
*/
void ptrace_disable(struct task_struct *child)
{
- /* nothing to do.. */
+ user_disable_single_step(child);
}
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c
new file mode 100644
index 00000000000..04afe5b2066
--- /dev/null
+++ b/arch/sh/kernel/reboot.c
@@ -0,0 +1,102 @@
+#include <linux/pm.h>
+#include <linux/kexec.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/module.h>
+#ifdef CONFIG_SUPERH32
+#include <asm/watchdog.h>
+#endif
+#include <asm/addrspace.h>
+#include <asm/reboot.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+#ifdef CONFIG_SUPERH32
+static void watchdog_trigger_immediate(void)
+{
+ sh_wdt_write_cnt(0xFF);
+ sh_wdt_write_csr(0xC2);
+}
+#endif
+
+static void native_machine_restart(char * __unused)
+{
+ local_irq_disable();
+
+ /* Destroy all of the TLBs in preparation for reset by MMU */
+ __flush_tlb_global();
+
+ /* Address error with SR.BL=1 first. */
+ trigger_address_error();
+
+#ifdef CONFIG_SUPERH32
+ /* If that fails or is unsupported, go for the watchdog next. */
+ watchdog_trigger_immediate();
+#endif
+
+ /*
+ * Give up and sleep.
+ */
+ while (1)
+ cpu_sleep();
+}
+
+static void native_machine_shutdown(void)
+{
+ smp_send_stop();
+}
+
+static void native_machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+}
+
+static void native_machine_halt(void)
+{
+ /* stop other cpus */
+ machine_shutdown();
+
+ /* stop this cpu */
+ stop_this_cpu(NULL);
+}
+
+struct machine_ops machine_ops = {
+ .power_off = native_machine_power_off,
+ .shutdown = native_machine_shutdown,
+ .restart = native_machine_restart,
+ .halt = native_machine_halt,
+#ifdef CONFIG_KEXEC
+ .crash_shutdown = native_machine_crash_shutdown,
+#endif
+};
+
+void machine_power_off(void)
+{
+ machine_ops.power_off();
+}
+
+void machine_shutdown(void)
+{
+ machine_ops.shutdown();
+}
+
+void machine_restart(char *cmd)
+{
+ machine_ops.restart(cmd);
+}
+
+void machine_halt(void)
+{
+ machine_ops.halt();
+}
+
+#ifdef CONFIG_KEXEC
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ machine_ops.crash_shutdown(regs);
+}
+#endif
diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S
index c66cb3209db..fcc9934fb97 100644
--- a/arch/sh/kernel/relocate_kernel.S
+++ b/arch/sh/kernel/relocate_kernel.S
@@ -4,6 +4,8 @@
*
* LANDISK/sh4 is supported. Maybe, SH archtecture works well.
*
+ * 2009-03-18 Magnus Damm - Added Kexec Jump support
+ *
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
@@ -16,23 +18,141 @@ relocate_new_kernel:
/* r4 = indirection_page */
/* r5 = reboot_code_buffer */
/* r6 = start_address */
- /* r7 = vbr_reg */
- mov.l 10f,r8 /* PAGE_SIZE */
- mov.l 11f,r9 /* P2SEG */
+ mov.l 10f, r0 /* PAGE_SIZE */
+ add r5, r0 /* setup new stack at end of control page */
+
+ /* save r15->r8 to new stack */
+ mov.l r15, @-r0
+ mov r0, r15
+ mov.l r14, @-r15
+ mov.l r13, @-r15
+ mov.l r12, @-r15
+ mov.l r11, @-r15
+ mov.l r10, @-r15
+ mov.l r9, @-r15
+ mov.l r8, @-r15
+
+ /* save other random registers */
+ sts.l macl, @-r15
+ sts.l mach, @-r15
+ stc.l gbr, @-r15
+ stc.l ssr, @-r15
+ stc.l sr, @-r15
+ sts.l pr, @-r15
+ stc.l spc, @-r15
+
+ /* switch to bank1 and save r7->r0 */
+ mov.l 12f, r9
+ stc sr, r8
+ or r9, r8
+ ldc r8, sr
+ mov.l r7, @-r15
+ mov.l r6, @-r15
+ mov.l r5, @-r15
+ mov.l r4, @-r15
+ mov.l r3, @-r15
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ mov.l r0, @-r15
+
+ /* switch to bank0 and save r7->r0 */
+ mov.l 12f, r9
+ not r9, r9
+ stc sr, r8
+ and r9, r8
+ ldc r8, sr
+ mov.l r7, @-r15
+ mov.l r6, @-r15
+ mov.l r5, @-r15
+ mov.l r4, @-r15
+ mov.l r3, @-r15
+ mov.l r2, @-r15
+ mov.l r1, @-r15
+ mov.l r0, @-r15
+
+ mov.l r4, @-r15 /* save indirection page again */
+
+ bsr swap_pages /* swap pages before jumping to new kernel */
+ nop
+
+ mova 11f, r0
+ mov.l r15, @r0 /* save pointer to stack */
+
+ jsr @r6 /* hand over control to new kernel */
+ nop
+
+ mov.l 11f, r15 /* get pointer to stack */
+ mov.l @r15+, r4 /* restore r4 to get indirection page */
- /* stack setting */
- add r8,r5
- mov r5,r15
+ bsr swap_pages /* swap pages back to previous state */
+ nop
+ /* make sure bank0 is active and restore r0->r7 */
+ mov.l 12f, r9
+ not r9, r9
+ stc sr, r8
+ and r9, r8
+ ldc r8, sr
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+
+ /* switch to bank1 and restore r0->r7 */
+ mov.l 12f, r9
+ stc sr, r8
+ or r9, r8
+ ldc r8, sr
+ mov.l @r15+, r0
+ mov.l @r15+, r1
+ mov.l @r15+, r2
+ mov.l @r15+, r3
+ mov.l @r15+, r4
+ mov.l @r15+, r5
+ mov.l @r15+, r6
+ mov.l @r15+, r7
+
+ /* switch back to bank0 */
+ mov.l 12f, r9
+ not r9, r9
+ stc sr, r8
+ and r9, r8
+ ldc r8, sr
+
+ /* restore other random registers */
+ ldc.l @r15+, spc
+ lds.l @r15+, pr
+ ldc.l @r15+, sr
+ ldc.l @r15+, ssr
+ ldc.l @r15+, gbr
+ lds.l @r15+, mach
+ lds.l @r15+, macl
+
+ /* restore r8->r15 */
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ mov.l @r15+, r12
+ mov.l @r15+, r13
+ mov.l @r15+, r14
+ mov.l @r15+, r15
+ rts
+ nop
+
+swap_pages:
bra 1f
- mov r4,r0 /* cmd = indirection_page */
+ mov r4,r0 /* cmd = indirection_page */
0:
mov.l @r4+,r0 /* cmd = *ind++ */
-1: /* addr = (cmd | P2SEG) & 0xfffffff0 */
+1: /* addr = cmd & 0xfffffff0 */
mov r0,r2
- or r9,r2
mov #-16,r1
and r1,r2
@@ -40,57 +160,70 @@ relocate_new_kernel:
tst #1,r0
bt 2f
bra 0b
- mov r2,r5
+ mov r2,r5
2: /* else if(cmd & IND_INDIRECTION) ind = addr */
tst #2,r0
bt 3f
bra 0b
- mov r2,r4
+ mov r2,r4
-3: /* else if(cmd & IND_DONE) goto 6 */
+3: /* else if(cmd & IND_DONE) return */
tst #4,r0
bt 4f
- bra 6f
- nop
+ rts
+ nop
4: /* else if(cmd & IND_SOURCE) memcpy(dst,addr,PAGE_SIZE) */
tst #8,r0
bt 0b
- mov r8,r3
+ mov.l 10f,r3 /* PAGE_SIZE */
shlr2 r3
shlr2 r3
5:
dt r3
- mov.l @r2+,r1 /* 16n+0 */
- mov.l r1,@r5
- add #4,r5
- mov.l @r2+,r1 /* 16n+4 */
- mov.l r1,@r5
- add #4,r5
- mov.l @r2+,r1 /* 16n+8 */
- mov.l r1,@r5
- add #4,r5
- mov.l @r2+,r1 /* 16n+12 */
- mov.l r1,@r5
- add #4,r5
+
+ /* regular kexec just overwrites the destination page
+ * with the contents of the source page.
+ * for the kexec jump case we need to swap the contents
+ * of the pages.
+ * to keep it simple swap the contents for both cases.
+ */
+ mov.l @(0, r2), r8
+ mov.l @(0, r5), r1
+ mov.l r8, @(0, r5)
+ mov.l r1, @(0, r2)
+
+ mov.l @(4, r2), r8
+ mov.l @(4, r5), r1
+ mov.l r8, @(4, r5)
+ mov.l r1, @(4, r2)
+
+ mov.l @(8, r2), r8
+ mov.l @(8, r5), r1
+ mov.l r8, @(8, r5)
+ mov.l r1, @(8, r2)
+
+ mov.l @(12, r2), r8
+ mov.l @(12, r5), r1
+ mov.l r8, @(12, r5)
+ mov.l r1, @(12, r2)
+
+ add #16,r5
+ add #16,r2
bf 5b
bra 0b
- nop
-6:
-#ifdef CONFIG_SH_STANDARD_BIOS
- ldc r7, vbr
-#endif
- jmp @r6
- nop
+ nop
.align 2
10:
.long PAGE_SIZE
11:
- .long P2SEG
+ .long 0
+12:
+ .long 0x20000000 ! RB=1
relocate_new_kernel_end:
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c
new file mode 100644
index 00000000000..5124aeb28c3
--- /dev/null
+++ b/arch/sh/kernel/return_address.c
@@ -0,0 +1,59 @@
+/*
+ * arch/sh/kernel/return_address.c
+ *
+ * Copyright (C) 2009 Matt Fleming
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/dwarf.h>
+
+#ifdef CONFIG_DWARF_UNWINDER
+
+void *return_address(unsigned int depth)
+{
+ struct dwarf_frame *frame;
+ unsigned long ra;
+ int i;
+
+ for (i = 0, frame = NULL, ra = 0; i <= depth; i++) {
+ struct dwarf_frame *tmp;
+
+ tmp = dwarf_unwind_stack(ra, frame);
+ if (!tmp)
+ return NULL;
+
+ if (frame)
+ dwarf_free_frame(frame);
+
+ frame = tmp;
+
+ if (!frame || !frame->return_addr)
+ break;
+
+ ra = frame->return_addr;
+ }
+
+ /* Failed to unwind the stack to the specified depth. */
+ WARN_ON(i != depth + 1);
+
+ if (frame)
+ dwarf_free_frame(frame);
+
+ return (void *)ra;
+}
+
+#else
+
+void *return_address(unsigned int depth)
+{
+ return NULL;
+}
+
+#endif
+
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c
deleted file mode 100644
index 184119eeae5..00000000000
--- a/arch/sh/kernel/semaphore.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Just taken from alpha implementation.
- * This can't work well, perhaps.
- */
-/*
- * Generic semaphore code. Buyer beware. Do your own
- * specific changes in <asm/semaphore-helper.h>
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/init.h>
-#include <asm/semaphore.h>
-#include <asm/semaphore-helper.h>
-
-DEFINE_SPINLOCK(semaphore_wake_lock);
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to sleep, while the "waking" variable is
- * incremented when the "up()" code goes to wake up waiting
- * processes.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * waking_non_zero() (from asm/semaphore.h) must execute
- * atomically.
- *
- * When __up() is called, the count was negative before
- * incrementing it, and we need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to
- * wake up and exit. ALL waiting processes actually wake up but
- * only the one that gets to the "waking" field first will gate
- * through and acquire the semaphore. The others will go back
- * to sleep.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-void __up(struct semaphore *sem)
-{
- wake_one_more(sem);
- wake_up(&sem->wait);
-}
-
-/*
- * Perform the "down" function. Return zero for semaphore acquired,
- * return negative for signalled out of the function.
- *
- * If called from __down, the return is ignored and the wait loop is
- * not interruptible. This means that a task waiting on a semaphore
- * using "down()" cannot be killed until someone does an "up()" on
- * the semaphore.
- *
- * If called from __down_interruptible, the return value gets checked
- * upon return. If the return value is negative then the task continues
- * with the negative value in the return register (it can be tested by
- * the caller).
- *
- * Either form may be used in conjunction with "up()".
- *
- */
-
-#define DOWN_VAR \
- struct task_struct *tsk = current; \
- wait_queue_t wait; \
- init_waitqueue_entry(&wait, tsk);
-
-#define DOWN_HEAD(task_state) \
- \
- \
- tsk->state = (task_state); \
- add_wait_queue(&sem->wait, &wait); \
- \
- /* \
- * Ok, we're set up. sem->count is known to be less than zero \
- * so we must wait. \
- * \
- * We can let go the lock for purposes of waiting. \
- * We re-acquire it after awaking so as to protect \
- * all semaphore operations. \
- * \
- * If "up()" is called before we call waking_non_zero() then \
- * we will catch it right away. If it is called later then \
- * we will have to go through a wakeup cycle to catch it. \
- * \
- * Multiple waiters contend for the semaphore lock to see \
- * who gets to gate through and who has to wait some more. \
- */ \
- for (;;) {
-
-#define DOWN_TAIL(task_state) \
- tsk->state = (task_state); \
- } \
- tsk->state = TASK_RUNNING; \
- remove_wait_queue(&sem->wait, &wait);
-
-void __sched __down(struct semaphore * sem)
-{
- DOWN_VAR
- DOWN_HEAD(TASK_UNINTERRUPTIBLE)
- if (waking_non_zero(sem))
- break;
- schedule();
- DOWN_TAIL(TASK_UNINTERRUPTIBLE)
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
- int ret = 0;
- DOWN_VAR
- DOWN_HEAD(TASK_INTERRUPTIBLE)
-
- ret = waking_non_zero_interruptible(sem, tsk);
- if (ret)
- {
- if (ret == 1)
- /* ret != 0 only if we get interrupted -arca */
- ret = 0;
- break;
- }
- schedule();
- DOWN_TAIL(TASK_INTERRUPTIBLE)
- return ret;
-}
-
-int __down_trylock(struct semaphore * sem)
-{
- return waking_non_zero_trylock(sem);
-}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index ff4f54a47c0..de19cfa768f 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -4,7 +4,7 @@
* This file handles the architecture-dependent parts of initialization
*
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2010 Paul Mundt
*/
#include <linux/screen_info.h>
#include <linux/ioport.h>
@@ -12,7 +12,6 @@
#include <linux/initrd.h>
#include <linux/bootmem.h>
#include <linux/console.h>
-#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/utsname.h>
#include <linux/nodemask.h>
@@ -23,6 +22,13 @@
#include <linux/kexec.h>
#include <linux/module.h>
#include <linux/smp.h>
+#include <linux/err.h>
+#include <linux/crash_dump.h>
+#include <linux/mmzone.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/memblock.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -31,7 +37,10 @@
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/clock.h>
+#include <asm/smp.h>
#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
+#include <asm/sparsemem.h>
/*
* Initialize loops_per_jiffy as 10000000 (1000MIPS).
@@ -41,7 +50,9 @@
struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
[0] = {
.type = CPU_SH_NONE,
+ .family = CPU_FAMILY_UNKNOWN,
.loops_per_jiffy = 10000000,
+ .phys_bits = MAX_PHYSMEM_BITS,
},
};
EXPORT_SYMBOL(cpu_data);
@@ -51,6 +62,7 @@ EXPORT_SYMBOL(cpu_data);
* sh_mv= on the command line, prior to .machvec.init teardown.
*/
struct sh_machine_vector sh_mv = { .mv_name = "generic", };
+EXPORT_SYMBOL(sh_mv);
#ifdef CONFIG_VT
struct screen_info screen_info;
@@ -74,173 +86,157 @@ static struct resource data_resource = {
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
};
+static struct resource bss_resource = {
+ .name = "Kernel bss",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+};
+
unsigned long memory_start;
EXPORT_SYMBOL(memory_start);
unsigned long memory_end = 0;
EXPORT_SYMBOL(memory_end);
+unsigned long memory_limit = 0;
+
+static struct resource mem_resources[MAX_NUMNODES];
int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
static int __init early_parse_mem(char *p)
{
- unsigned long size;
-
- memory_start = (unsigned long)__va(__MEMORY_START);
- size = memparse(p, &p);
-
- if (size > __MEMORY_SIZE) {
- static char msg[] __initdata = KERN_ERR
- "Using mem= to increase the size of kernel memory "
- "is not allowed.\n"
- " Recompile the kernel with the correct value for "
- "CONFIG_MEMORY_SIZE.\n";
- printk(msg);
- return 0;
- }
+ if (!p)
+ return 1;
- memory_end = memory_start + size;
+ memory_limit = PAGE_ALIGN(memparse(p, &p));
+
+ pr_notice("Memory limited to %ldMB\n", memory_limit >> 20);
return 0;
}
early_param("mem", early_parse_mem);
-/*
- * Register fully available low RAM pages with the bootmem allocator.
- */
-static void __init register_bootmem_low_pages(void)
+void __init check_for_initrd(void)
{
- unsigned long curr_pfn, last_pfn, pages;
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long start, end;
+
+ /*
+ * Check for the rare cases where boot loaders adhere to the boot
+ * ABI.
+ */
+ if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE)
+ goto disable;
+
+ start = INITRD_START + __MEMORY_START;
+ end = start + INITRD_SIZE;
+
+ if (unlikely(end <= start))
+ goto disable;
+ if (unlikely(start & ~PAGE_MASK)) {
+ pr_err("initrd must be page aligned\n");
+ goto disable;
+ }
+
+ if (unlikely(start < __MEMORY_START)) {
+ pr_err("initrd start (%08lx) < __MEMORY_START(%x)\n",
+ start, __MEMORY_START);
+ goto disable;
+ }
+
+ if (unlikely(end > memblock_end_of_DRAM())) {
+ pr_err("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ end, (unsigned long)memblock_end_of_DRAM());
+ goto disable;
+ }
/*
- * We are rounding up the start address of usable memory:
+ * If we got this far in spite of the boot loader's best efforts
+ * to the contrary, assume we actually have a valid initrd and
+ * fix up the root dev.
*/
- curr_pfn = PFN_UP(__MEMORY_START);
+ ROOT_DEV = Root_RAM0;
/*
- * ... and at the end of the usable range downwards:
+ * Address sanitization
*/
- last_pfn = PFN_DOWN(__pa(memory_end));
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = initrd_start + INITRD_SIZE;
+
+ memblock_reserve(__pa(initrd_start), INITRD_SIZE);
- if (last_pfn > max_low_pfn)
- last_pfn = max_low_pfn;
+ return;
- pages = last_pfn - curr_pfn;
- free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
+disable:
+ pr_info("initrd disabled\n");
+ initrd_start = initrd_end = 0;
+#endif
}
-#ifdef CONFIG_KEXEC
-static void __init reserve_crashkernel(void)
+void calibrate_delay(void)
{
- unsigned long long free_mem;
- unsigned long long crash_size, crash_base;
- int ret;
-
- free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
-
- ret = parse_crashkernel(boot_command_line, free_mem,
- &crash_size, &crash_base);
- if (ret == 0 && crash_size) {
- if (crash_base <= 0) {
- printk(KERN_INFO "crashkernel reservation failed - "
- "you have to specify a base address\n");
- return;
- }
-
- if (reserve_bootmem(crash_base, crash_size,
- BOOTMEM_EXCLUSIVE) < 0) {
- printk(KERN_INFO "crashkernel reservation failed - "
- "memory is in use\n");
- return;
- }
-
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(crash_size >> 20),
- (unsigned long)(crash_base >> 20),
- (unsigned long)(free_mem >> 20));
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
- }
+ struct clk *clk = clk_get(NULL, "cpu_clk");
+
+ if (IS_ERR(clk))
+ panic("Need a sane CPU clock definition!");
+
+ loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ;
+
+ printk(KERN_INFO "Calibrating delay loop (skipped)... "
+ "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n",
+ loops_per_jiffy/(500000/HZ),
+ (loops_per_jiffy/(5000/HZ)) % 100,
+ loops_per_jiffy);
}
-#else
-static inline void __init reserve_crashkernel(void)
-{}
-#endif
-void __init setup_bootmem_allocator(unsigned long free_pfn)
+void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
+ unsigned long end_pfn)
{
- unsigned long bootmap_size;
+ struct resource *res = &mem_resources[nid];
+ unsigned long start, end;
- /*
- * Find a proper area for the bootmem bitmap. After this
- * bootstrap step all allocations (until the page allocator
- * is intact) must be done via bootmem_alloc().
- */
- bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn,
- min_low_pfn, max_low_pfn);
+ WARN_ON(res->name); /* max one active range per node for now */
- add_active_range(0, min_low_pfn, max_low_pfn);
- register_bootmem_low_pages();
+ start = start_pfn << PAGE_SHIFT;
+ end = end_pfn << PAGE_SHIFT;
- node_set_online(0);
+ res->name = "System RAM";
+ res->start = start;
+ res->end = end - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ if (request_resource(&iomem_resource, res)) {
+ pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
+ start_pfn, end_pfn);
+ return;
+ }
/*
- * Reserve the kernel text and
- * Reserve the bootmem bitmap. We do this in two steps (first step
- * was init_bootmem()), because this catches the (definitely buggy)
- * case of us accidentally initializing the bootmem allocator with
- * an invalid RAM area.
+ * We don't know which RAM region contains kernel data or
+ * the reserved crashkernel region, so try it repeatedly
+ * and let the resource manager test it.
*/
- reserve_bootmem(__MEMORY_START+PAGE_SIZE,
- (PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START,
- BOOTMEM_DEFAULT);
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ request_resource(res, &bss_resource);
+#ifdef CONFIG_KEXEC
+ request_resource(res, &crashk_res);
+#endif
/*
- * reserve physical page 0 - it's a special BIOS page on many boxes,
- * enabling clean reboots, SMP operation, laptop functions.
+ * Also make sure that there is a PMB mapping that covers this
+ * range before we attempt to activate it, to avoid reset by MMU.
+ * We can hit this path with NUMA or memory hot-add.
*/
- reserve_bootmem(__MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT);
-
- sparse_memory_present_with_active_regions(0);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- ROOT_DEV = Root_RAM0;
+ pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
+ PAGE_KERNEL);
- if (LOADER_TYPE && INITRD_START) {
- if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
- reserve_bootmem(INITRD_START + __MEMORY_START,
- INITRD_SIZE, BOOTMEM_DEFAULT);
- initrd_start = INITRD_START + PAGE_OFFSET +
- __MEMORY_START;
- initrd_end = initrd_start + INITRD_SIZE;
- } else {
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
- INITRD_START + INITRD_SIZE,
- max_low_pfn << PAGE_SHIFT);
- initrd_start = 0;
- }
- }
-#endif
-
- reserve_crashkernel();
+ memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
+ &memblock.memory, nid);
}
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-static void __init setup_memory(void)
+void __init __weak plat_early_device_setup(void)
{
- unsigned long start_pfn;
-
- /*
- * Partially used pages are not usable - thus
- * we are rounding upwards:
- */
- start_pfn = PFN_UP(__pa(_end));
- setup_bootmem_allocator(start_pfn);
}
-#else
-extern void __init setup_memory(void);
-#endif
void __init setup_arch(char **cmdline_p)
{
@@ -248,6 +244,17 @@ void __init setup_arch(char **cmdline_p)
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
+ printk(KERN_NOTICE "Boot params:\n"
+ "... MOUNT_ROOT_RDONLY - %08lx\n"
+ "... RAMDISK_FLAGS - %08lx\n"
+ "... ORIG_ROOT_DEV - %08lx\n"
+ "... LOADER_TYPE - %08lx\n"
+ "... INITRD_START - %08lx\n"
+ "... INITRD_SIZE - %08lx\n",
+ MOUNT_ROOT_RDONLY, RAMDISK_FLAGS,
+ ORIG_ROOT_DEV, LOADER_TYPE,
+ INITRD_START, INITRD_SIZE);
+
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
@@ -265,15 +272,17 @@ void __init setup_arch(char **cmdline_p)
code_resource.end = virt_to_phys(_etext)-1;
data_resource.start = virt_to_phys(_etext);
data_resource.end = virt_to_phys(_edata)-1;
+ bss_resource.start = virt_to_phys(__bss_start);
+ bss_resource.end = virt_to_phys(__bss_stop)-1;
- memory_start = (unsigned long)__va(__MEMORY_START);
- if (!memory_end)
- memory_end = memory_start + __MEMORY_SIZE;
-
-#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERWRITE
strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
#else
strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
+#ifdef CONFIG_CMDLINE_EXTEND
+ strlcat(command_line, " ", sizeof(command_line));
+ strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
+#endif
#endif
/* Save unparsed command line copy for /proc/cmdline */
@@ -282,24 +291,14 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
- sh_mv_setup();
+ plat_early_device_setup();
- /*
- * Find the highest page frame number we have available
- */
- max_pfn = PFN_DOWN(__pa(memory_end));
-
- /*
- * Determine low and high memory ranges:
- */
- max_low_pfn = max_pfn;
- min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
+ sh_mv_setup();
- nodes_clear(node_online_map);
+ /* Let earlyprintk output early console messages */
+ early_platform_driver_probe("earlyprintk", 1, 1);
- /* Setup bootmem with available RAM */
- setup_memory();
- sparse_init();
+ paging_init();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
@@ -309,137 +308,17 @@ void __init setup_arch(char **cmdline_p)
if (likely(sh_mv.mv_setup))
sh_mv.mv_setup(cmdline_p);
- paging_init();
-
-#ifdef CONFIG_SMP
plat_smp_setup();
-#endif
-}
-
-static const char *cpu_name[] = {
- [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
- [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
- [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
- [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
- [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
- [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
- [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
- [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
- [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
- [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
- [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
- [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
- [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
- [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
- [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
- [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
- [CPU_SH7366] = "SH7366", [CPU_SH_NONE] = "Unknown"
-};
-
-const char *get_cpu_subtype(struct sh_cpuinfo *c)
-{
- return cpu_name[c->type];
-}
-
-#ifdef CONFIG_PROC_FS
-/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
-static const char *cpu_flags[] = {
- "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
- "ptea", "llsc", "l2", "op32", NULL
-};
-
-static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
-{
- unsigned long i;
-
- seq_printf(m, "cpu flags\t:");
-
- if (!c->flags) {
- seq_printf(m, " %s\n", cpu_flags[0]);
- return;
- }
-
- for (i = 0; cpu_flags[i]; i++)
- if ((c->flags & (1 << i)))
- seq_printf(m, " %s", cpu_flags[i+1]);
-
- seq_printf(m, "\n");
}
-static void show_cacheinfo(struct seq_file *m, const char *type,
- struct cache_info info)
+/* processor boot mode configuration */
+int generic_mode_pins(void)
{
- unsigned int cache_size;
-
- cache_size = info.ways * info.sets * info.linesz;
-
- seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
- type, cache_size >> 10, info.ways);
-}
-
-/*
- * Get CPU information for use by the procfs.
- */
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
- struct sh_cpuinfo *c = v;
- unsigned int cpu = c - cpu_data;
-
- if (!cpu_online(cpu))
- return 0;
-
- if (cpu == 0)
- seq_printf(m, "machine\t\t: %s\n", get_system_type());
-
- seq_printf(m, "processor\t: %d\n", cpu);
- seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
- seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
-
- show_cpuflags(m, c);
-
- seq_printf(m, "cache type\t: ");
-
- /*
- * Check for what type of cache we have, we support both the
- * unified cache on the SH-2 and SH-3, as well as the harvard
- * style cache on the SH-4.
- */
- if (c->icache.flags & SH_CACHE_COMBINED) {
- seq_printf(m, "unified\n");
- show_cacheinfo(m, "cache", c->icache);
- } else {
- seq_printf(m, "split (harvard)\n");
- show_cacheinfo(m, "icache", c->icache);
- show_cacheinfo(m, "dcache", c->dcache);
- }
-
- /* Optional secondary cache */
- if (c->flags & CPU_HAS_L2_CACHE)
- show_cacheinfo(m, "scache", c->scache);
-
- seq_printf(m, "bogomips\t: %lu.%02lu\n",
- c->loops_per_jiffy/(500000/HZ),
- (c->loops_per_jiffy/(5000/HZ)) % 100);
-
+ pr_warning("generic_mode_pins(): missing mode pin configuration\n");
return 0;
}
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
- return *pos < NR_CPUS ? cpu_data + *pos : NULL;
-}
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+int test_mode_pin(int pin)
{
- ++*pos;
- return c_start(m, pos);
+ return sh_mv.mv_mode_pins() & pin;
}
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-const struct seq_operations cpuinfo_op = {
- .start = c_start,
- .next = c_next,
- .stop = c_stop,
- .show = show_cpuinfo,
-};
-#endif /* CONFIG_PROC_FS */
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index d1bcac4fa26..fe584e51696 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -1,76 +1,172 @@
/*
- * linux/arch/sh/kernel/sh_bios.c
* C interface for trapping into the standard LinuxSH BIOS.
*
* Copyright (C) 2000 Greg Banks, Mitch Davis
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ * Copyright (C) 2002 M. R. Brown
+ * Copyright (C) 2004 - 2010 Paul Mundt
*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
#include <asm/sh_bios.h>
-#define BIOS_CALL_CONSOLE_WRITE 0
-#define BIOS_CALL_READ_BLOCK 1
+#define BIOS_CALL_CONSOLE_WRITE 0
#define BIOS_CALL_ETH_NODE_ADDR 10
#define BIOS_CALL_SHUTDOWN 11
-#define BIOS_CALL_CHAR_OUT 0x1f /* TODO: hack */
-#define BIOS_CALL_GDB_GET_MODE_PTR 0xfe
-#define BIOS_CALL_GDB_DETACH 0xff
+#define BIOS_CALL_GDB_DETACH 0xff
-static __inline__ long sh_bios_call(long func, long arg0, long arg1, long arg2, long arg3)
+void *gdb_vbr_vector = NULL;
+
+static inline long sh_bios_call(long func, long arg0, long arg1, long arg2,
+ long arg3)
{
- register long r0 __asm__("r0") = func;
- register long r4 __asm__("r4") = arg0;
- register long r5 __asm__("r5") = arg1;
- register long r6 __asm__("r6") = arg2;
- register long r7 __asm__("r7") = arg3;
- __asm__ __volatile__("trapa #0x3f"
- : "=z" (r0)
- : "0" (r0), "r" (r4), "r" (r5), "r" (r6), "r" (r7)
- : "memory");
- return r0;
-}
+ register long r0 __asm__("r0") = func;
+ register long r4 __asm__("r4") = arg0;
+ register long r5 __asm__("r5") = arg1;
+ register long r6 __asm__("r6") = arg2;
+ register long r7 __asm__("r7") = arg3;
+ if (!gdb_vbr_vector)
+ return -ENOSYS;
+
+ __asm__ __volatile__("trapa #0x3f":"=z"(r0)
+ :"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7)
+ :"memory");
+ return r0;
+}
void sh_bios_console_write(const char *buf, unsigned int len)
{
- sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0);
+ sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0);
}
+void sh_bios_gdb_detach(void)
+{
+ sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
+}
+EXPORT_SYMBOL_GPL(sh_bios_gdb_detach);
-void sh_bios_char_out(char ch)
+void sh_bios_get_node_addr(unsigned char *node_addr)
{
- sh_bios_call(BIOS_CALL_CHAR_OUT, ch, 0, 0, 0);
+ sh_bios_call(BIOS_CALL_ETH_NODE_ADDR, 0, (long)node_addr, 0, 0);
}
+EXPORT_SYMBOL_GPL(sh_bios_get_node_addr);
+void sh_bios_shutdown(unsigned int how)
+{
+ sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
+}
-int sh_bios_in_gdb_mode(void)
+/*
+ * Read the old value of the VBR register to initialise the vector
+ * through which debug and BIOS traps are delegated by the Linux trap
+ * handler.
+ */
+void sh_bios_vbr_init(void)
{
- static char queried = 0;
- static char *gdb_mode_p = 0;
-
- if (!queried)
- {
- /* Query the gdb stub for address of its gdb mode variable */
- long r = sh_bios_call(BIOS_CALL_GDB_GET_MODE_PTR, 0, 0, 0, 0);
- if (r != ~0) /* BIOS returns -1 for unknown function */
- gdb_mode_p = (char *)r;
- queried = 1;
- }
- return (gdb_mode_p != 0 ? *gdb_mode_p : 0);
+ unsigned long vbr;
+
+ if (unlikely(gdb_vbr_vector))
+ return;
+
+ __asm__ __volatile__ ("stc vbr, %0" : "=r" (vbr));
+
+ if (vbr) {
+ gdb_vbr_vector = (void *)(vbr + 0x100);
+ printk(KERN_NOTICE "Setting GDB trap vector to %p\n",
+ gdb_vbr_vector);
+ } else
+ printk(KERN_NOTICE "SH-BIOS not detected\n");
}
-void sh_bios_gdb_detach(void)
+/**
+ * sh_bios_vbr_reload - Re-load the system VBR from the BIOS vector.
+ *
+ * This can be used by save/restore code to reinitialize the system VBR
+ * from the fixed BIOS VBR. A no-op if no BIOS VBR is known.
+ */
+void sh_bios_vbr_reload(void)
{
- sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
+ if (gdb_vbr_vector)
+ __asm__ __volatile__ (
+ "ldc %0, vbr"
+ :
+ : "r" (((unsigned long) gdb_vbr_vector) - 0x100)
+ : "memory"
+ );
}
-EXPORT_SYMBOL(sh_bios_gdb_detach);
-void sh_bios_get_node_addr (unsigned char *node_addr)
+#ifdef CONFIG_EARLY_PRINTK
+/*
+ * Print a string through the BIOS
+ */
+static void sh_console_write(struct console *co, const char *s,
+ unsigned count)
{
- sh_bios_call(BIOS_CALL_ETH_NODE_ADDR, 0, (long)node_addr, 0, 0);
+ sh_bios_console_write(s, count);
}
-void sh_bios_shutdown(unsigned int how)
+/*
+ * Setup initial baud/bits/parity. We do two things here:
+ * - construct a cflag setting for the first rs_open()
+ * - initialize the serial port
+ * Return non-zero if we didn't find a serial port.
+ */
+static int __init sh_console_setup(struct console *co, char *options)
{
- sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
+ int cflag = CREAD | HUPCL | CLOCAL;
+
+ /*
+ * Now construct a cflag setting.
+ * TODO: this is a totally bogus cflag, as we have
+ * no idea what serial settings the BIOS is using, or
+ * even if its using the serial port at all.
+ */
+ cflag |= B115200 | CS8 | /*no parity*/0;
+
+ co->cflag = cflag;
+
+ return 0;
+}
+
+static struct console bios_console = {
+ .name = "bios",
+ .write = sh_console_write,
+ .setup = sh_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static int __init setup_early_printk(char *buf)
+{
+ int keep_early = 0;
+
+ if (!buf)
+ return 0;
+
+ if (strstr(buf, "keep"))
+ keep_early = 1;
+
+ if (!strncmp(buf, "bios", 4))
+ early_console = &bios_console;
+
+ if (likely(early_console)) {
+ if (keep_early)
+ early_console->flags &= ~CON_BOOT;
+ else
+ early_console->flags |= CON_BOOT;
+ register_console(early_console);
+ }
+
+ return 0;
}
+early_param("earlyprintk", setup_early_printk);
+#endif
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index d80de390327..d77f2f6c7ff 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -1,72 +1,39 @@
#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/vmalloc.h>
-#include <linux/pci.h>
-#include <linux/irq.h>
-#include <asm/sections.h>
-#include <asm/semaphore.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-extern struct hw_interrupt_type no_irq_type;
-
-EXPORT_SYMBOL(sh_mv);
-
-/* platform dependent support */
-EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(irq_desc);
-EXPORT_SYMBOL(no_irq_type);
-
-EXPORT_SYMBOL(strlen);
-
-/* PCI exports */
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
-#endif
+#include <asm/sections.h>
-/* mem exports */
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__copy_user);
-
-#ifdef CONFIG_MMU
-EXPORT_SYMBOL(get_vm_area);
-#endif
-
-/* semaphore exports */
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-EXPORT_SYMBOL(__down_trylock);
-
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_generic);
+EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(empty_zero_page);
+#ifdef CONFIG_FLATMEM
+/* need in pfn_valid macro */
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+#endif
-#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
+#define DECLARE_EXPORT(name) \
+ extern void name(void);EXPORT_SYMBOL(name)
-/* These symbols are generated by the compiler itself */
DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__sdivsi3);
+DECLARE_EXPORT(__lshrsi3);
DECLARE_EXPORT(__ashrsi3);
DECLARE_EXPORT(__ashlsi3);
-DECLARE_EXPORT(__ashrdi3);
-DECLARE_EXPORT(__ashldi3);
DECLARE_EXPORT(__ashiftrt_r4_6);
DECLARE_EXPORT(__ashiftrt_r4_7);
DECLARE_EXPORT(__ashiftrt_r4_8);
@@ -84,8 +51,7 @@ DECLARE_EXPORT(__ashiftrt_r4_23);
DECLARE_EXPORT(__ashiftrt_r4_24);
DECLARE_EXPORT(__ashiftrt_r4_27);
DECLARE_EXPORT(__ashiftrt_r4_30);
-DECLARE_EXPORT(__lshrsi3);
-DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__movstr);
DECLARE_EXPORT(__movstrSI8);
DECLARE_EXPORT(__movstrSI12);
DECLARE_EXPORT(__movstrSI16);
@@ -100,52 +66,32 @@ DECLARE_EXPORT(__movstrSI48);
DECLARE_EXPORT(__movstrSI52);
DECLARE_EXPORT(__movstrSI56);
DECLARE_EXPORT(__movstrSI60);
-#if __GNUC__ == 4
+DECLARE_EXPORT(__movstr_i4_even);
+DECLARE_EXPORT(__movstr_i4_odd);
+DECLARE_EXPORT(__movstrSI12_i4);
DECLARE_EXPORT(__movmem);
-#else
-DECLARE_EXPORT(__movstr);
-#endif
-
-#if __GNUC__ == 4
+DECLARE_EXPORT(__movmemSI8);
+DECLARE_EXPORT(__movmemSI12);
+DECLARE_EXPORT(__movmemSI16);
+DECLARE_EXPORT(__movmemSI20);
+DECLARE_EXPORT(__movmemSI24);
+DECLARE_EXPORT(__movmemSI28);
+DECLARE_EXPORT(__movmemSI32);
+DECLARE_EXPORT(__movmemSI36);
+DECLARE_EXPORT(__movmemSI40);
+DECLARE_EXPORT(__movmemSI44);
+DECLARE_EXPORT(__movmemSI48);
+DECLARE_EXPORT(__movmemSI52);
+DECLARE_EXPORT(__movmemSI56);
+DECLARE_EXPORT(__movmemSI60);
DECLARE_EXPORT(__movmem_i4_even);
DECLARE_EXPORT(__movmem_i4_odd);
DECLARE_EXPORT(__movmemSI12_i4);
-
-#if (__GNUC_MINOR__ >= 2 || defined(__GNUC_STM_RELEASE__))
-/*
- * GCC >= 4.2 emits these for division, as do GCC 4.1.x versions of the ST
- * compiler which include backported patches.
- */
-DECLARE_EXPORT(__sdivsi3_i4i);
DECLARE_EXPORT(__udiv_qrnnd_16);
+DECLARE_EXPORT(__sdivsi3_i4);
+DECLARE_EXPORT(__udivsi3_i4);
+DECLARE_EXPORT(__sdivsi3_i4i);
DECLARE_EXPORT(__udivsi3_i4i);
+#ifdef CONFIG_MCOUNT
+DECLARE_EXPORT(mcount);
#endif
-#else /* GCC 3.x */
-DECLARE_EXPORT(__movstr_i4_even);
-DECLARE_EXPORT(__movstr_i4_odd);
-DECLARE_EXPORT(__movstrSI12_i4);
-#endif /* __GNUC__ == 4 */
-
-#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
- defined(CONFIG_SH7705_CACHE_32KB))
-/* needed by some modules */
-EXPORT_SYMBOL(flush_cache_all);
-EXPORT_SYMBOL(flush_cache_range);
-EXPORT_SYMBOL(flush_dcache_page);
-EXPORT_SYMBOL(__flush_purge_region);
-#endif
-
-#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
- (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
-EXPORT_SYMBOL(clear_user_page);
-#endif
-
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_generic);
-#ifdef CONFIG_IPV6
-EXPORT_SYMBOL(csum_ipv6_magic);
-#endif
-EXPORT_SYMBOL(clear_page);
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(_ebss);
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index dd38a683de6..26a0774f527 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -16,7 +16,7 @@
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/screen_info.h>
-#include <asm/semaphore.h>
+#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
@@ -24,33 +24,31 @@
#include <asm/delay.h>
#include <asm/irq.h>
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
-/* platform dependent support */
-EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(kernel_thread);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(screen_info);
-#endif
-
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_trylock);
-EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(__put_user_asm_b);
+EXPORT_SYMBOL(__put_user_asm_w);
EXPORT_SYMBOL(__put_user_asm_l);
+EXPORT_SYMBOL(__put_user_asm_q);
+EXPORT_SYMBOL(__get_user_asm_b);
+EXPORT_SYMBOL(__get_user_asm_w);
EXPORT_SYMBOL(__get_user_asm_l);
+EXPORT_SYMBOL(__get_user_asm_q);
+EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
+EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strcpy);
/* Ugh. These come in from libgcc.a at link time. */
#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
DECLARE_EXPORT(__sdivsi3);
-DECLARE_EXPORT(__muldi3);
+DECLARE_EXPORT(__sdivsi3_1);
+DECLARE_EXPORT(__sdivsi3_2);
DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__div_table);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index f311551d9a0..594cd371aa2 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -22,80 +22,29 @@
#include <linux/elf.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
-#include <linux/freezer.h>
#include <linux/io.h>
-#include <asm/system.h>
+#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#include <asm/syscalls.h>
#include <asm/fpu.h>
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+struct fdpic_func_descriptor {
+ unsigned long text;
+ unsigned long GOT;
+};
/*
- * Atomically swap in the new signal mask, and wait for a signal.
+ * The following define adds a 64 byte gap between the signal
+ * stack frame and previous contents of the stack. This allows
+ * frame unwinding in a function epilogue but only if a frame
+ * pointer is used in the function. This is necessary because
+ * current gcc compilers (<4.3) do not generate unwind info on
+ * SH for function epilogues.
*/
-asmlinkage int
-sys_sigsuspend(old_sigset_t mask,
- unsigned long r5, unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
- current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
- return -ERESTARTNOHAND;
-}
-
-asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction __user *act,
- struct old_sigaction __user *oact)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
-
- if (act) {
- old_sigset_t mask;
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
- return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
- siginitset(&new_ka.sa.sa_mask, mask);
- }
-
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
-
- if (!ret && oact) {
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
- return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
- }
-
- return ret;
-}
-
-asmlinkage int
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
-{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-
- return do_sigaltstack(uss, uoss, regs->regs[15]);
-}
-
+#define UNWINDGUARD 64
/*
* Do a signal return; undo the signal stack.
@@ -128,11 +77,11 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
{
struct task_struct *tsk = current;
- if (!(current_cpu_data.flags & CPU_HAS_FPU))
+ if (!(boot_cpu_data.flags & CPU_HAS_FPU))
return 0;
set_used_math();
- return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0],
+ return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0],
sizeof(long)*(16*2+2));
}
@@ -141,15 +90,14 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
{
struct task_struct *tsk = current;
- if (!(current_cpu_data.flags & CPU_HAS_FPU))
+ if (!(boot_cpu_data.flags & CPU_HAS_FPU))
return 0;
- if (!used_math()) {
- __put_user(0, &sc->sc_ownedfp);
- return 0;
- }
+ if (!used_math())
+ return __put_user(0, &sc->sc_ownedfp);
- __put_user(1, &sc->sc_ownedfp);
+ if (__put_user(1, &sc->sc_ownedfp))
+ return -EFAULT;
/* This will cause a "finit" to be triggered by the next
attempted FPU operation by the 'current' process.
@@ -157,7 +105,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
clear_used_math();
unlazy_fpu(tsk, regs);
- return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard,
+ return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu,
sizeof(long)*(16*2+2));
}
#endif /* CONFIG_SH_FPU */
@@ -182,14 +130,14 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
#undef COPY
#ifdef CONFIG_SH_FPU
- if (current_cpu_data.flags & CPU_HAS_FPU) {
+ if (boot_cpu_data.flags & CPU_HAS_FPU) {
int owned_fp;
struct task_struct *tsk = current;
regs->sr |= SR_FD; /* Release FPU */
clear_fpu(tsk, regs);
clear_used_math();
- __get_user (owned_fp, &sc->sc_ownedfp);
+ err |= __get_user (owned_fp, &sc->sc_ownedfp);
if (owned_fp)
err |= restore_sigcontext_fpu(sc);
}
@@ -200,15 +148,16 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
return err;
}
-asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
+asmlinkage int sys_sigreturn(void)
{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ struct pt_regs *regs = current_pt_regs();
struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
sigset_t set;
int r0;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@@ -218,12 +167,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
sizeof(frame->extramask))))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->sc, &r0))
goto badframe;
@@ -234,36 +178,29 @@ badframe:
return 0;
}
-asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
+asmlinkage int sys_rt_sigreturn(void)
{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
sigset_t set;
- stack_t st;
int r0;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
goto badframe;
- if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);
return r0;
@@ -317,13 +254,13 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
sp = current->sas_ss_sp + current->sas_ss_size;
}
- return (void __user *)((sp - frame_size) & -8ul);
+ return (void __user *)((sp - (frame_size+UNWINDGUARD)) & -8ul);
}
/* These symbols are defined with the addresses in the vsyscall page.
See vsyscall-trapa.S. */
-extern void __user __kernel_sigreturn;
-extern void __user __kernel_rt_sigreturn;
+extern void __kernel_sigreturn(void);
+extern void __kernel_rt_sigreturn(void);
static int setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs)
@@ -368,6 +305,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
err |= __put_user(OR_R0_R0, &frame->retcode[6]);
err |= __put_user((__NR_sigreturn), &frame->retcode[7]);
regs->pr = (unsigned long) frame->retcode;
+ flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
}
if (err)
@@ -378,18 +316,24 @@ static int setup_frame(int sig, struct k_sigaction *ka,
regs->regs[4] = signal; /* Arg for signal handler */
regs->regs[5] = 0;
regs->regs[6] = (unsigned long) &frame->sc;
- regs->pc = (unsigned long) ka->sa.sa_handler;
+
+ if (current->personality & FDPIC_FUNCPTRS) {
+ struct fdpic_func_descriptor __user *funcptr =
+ (struct fdpic_func_descriptor __user *)ka->sa.sa_handler;
+
+ err |= __get_user(regs->pc, &funcptr->text);
+ err |= __get_user(regs->regs[12], &funcptr->GOT);
+ } else
+ regs->pc = (unsigned long)ka->sa.sa_handler;
+
+ if (err)
+ goto give_sigsegv;
set_fs(USER_DS);
pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
- flush_cache_sigtramp(regs->pr);
-
- if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
- flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
-
return 0;
give_sigsegv:
@@ -419,12 +363,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user((void *)current->sas_ss_sp,
- &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->regs[15]),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->regs[15]);
err |= setup_sigcontext(&frame->uc.uc_mcontext,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
@@ -448,6 +388,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(OR_R0_R0, &frame->retcode[6]);
err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
regs->pr = (unsigned long) frame->retcode;
+ flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
}
if (err)
@@ -458,18 +399,24 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->regs[4] = signal; /* Arg for signal handler */
regs->regs[5] = (unsigned long) &frame->info;
regs->regs[6] = (unsigned long) &frame->uc;
- regs->pc = (unsigned long) ka->sa.sa_handler;
+
+ if (current->personality & FDPIC_FUNCPTRS) {
+ struct fdpic_func_descriptor __user *funcptr =
+ (struct fdpic_func_descriptor __user *)ka->sa.sa_handler;
+
+ err |= __get_user(regs->pc, &funcptr->text);
+ err |= __get_user(regs->regs[12], &funcptr->GOT);
+ } else
+ regs->pc = (unsigned long)ka->sa.sa_handler;
+
+ if (err)
+ goto give_sigsegv;
set_fs(USER_DS);
pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
- flush_cache_sigtramp(regs->pr);
-
- if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
- flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
-
return 0;
give_sigsegv:
@@ -477,58 +424,53 @@ give_sigsegv:
return -EFAULT;
}
+static inline void
+handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
+ struct sigaction *sa)
+{
+ /* If we're not from a syscall, bail out */
+ if (regs->tra < 0)
+ return;
+
+ /* check for system call restart.. */
+ switch (regs->regs[0]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ no_system_call_restart:
+ regs->regs[0] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(sa->sa_flags & SA_RESTART))
+ goto no_system_call_restart;
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->regs[0] = save_r0;
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
+ break;
+ }
+}
+
/*
* OK, we're invoking a handler
*/
-
-static int
+static void
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0)
+ struct pt_regs *regs, unsigned int save_r0)
{
+ sigset_t *oldset = sigmask_to_save();
int ret;
- /* Are we from a system call? */
- if (regs->tra >= 0) {
- /* If so, check system call restarting.. */
- switch (regs->regs[0]) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- regs->regs[0] = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
- regs->regs[0] = -EINTR;
- break;
- }
- /* fallthrough */
- case -ERESTARTNOINTR:
- regs->regs[0] = save_r0;
- regs->pc -= instruction_size(
- ctrl_inw(regs->pc - 4));
- break;
- }
- }
-
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(sig, ka, info, oldset, regs);
else
ret = setup_frame(sig, ka, oldset, regs);
- if (ka->sa.sa_flags & SA_ONESHOT)
- ka->sa.sa_handler = SIG_DFL;
-
- if (ret == 0) {
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
- }
-
- return ret;
+ if (ret)
+ return;
+ signal_delivered(sig, info, ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
}
/*
@@ -545,7 +487,6 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
siginfo_t info;
int signr;
struct k_sigaction ka;
- sigset_t *oldset;
/*
* We want the common case to go fast, which
@@ -556,31 +497,15 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
if (!user_mode(regs))
return;
- if (try_to_freeze())
- goto no_signal;
-
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- oldset = &current->saved_sigmask;
- else
- oldset = &current->blocked;
-
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
- /* Whee! Actually deliver the signal. */
- if (handle_signal(signr, &ka, &info, oldset,
- regs, save_r0) == 0) {
- /* a signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- }
+ handle_syscall_restart(save_r0, regs, &ka.sa);
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &ka, &info, regs, save_r0);
return;
}
- no_signal:
/* Did we come from a system call? */
if (regs->tra >= 0) {
/* Restart the system call - no handlers present */
@@ -588,25 +513,29 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
regs->regs[0] == -ERESTARTSYS ||
regs->regs[0] == -ERESTARTNOINTR) {
regs->regs[0] = save_r0;
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
} else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
regs->regs[3] = __NR_restart_syscall;
}
}
- /* if there's no signal to deliver, we just put the saved sigmask
- * back */
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
- }
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back.
+ */
+ restore_saved_sigmask();
}
asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
- __u32 thread_info_flags)
+ unsigned long thread_info_flags)
{
/* deal with pending signal delivery */
- if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+ if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs, save_r0);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 80bde19d445..23d4c71c91a 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -2,7 +2,7 @@
* arch/sh/kernel/signal_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2003 - 2008 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -18,14 +18,15 @@
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/personality.h>
-#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
+#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#include <asm/fpu.h>
#define REG_RET 9
#define REG_ARG1 2
@@ -39,133 +40,103 @@
#define DEBUG_SIG 0
-#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+static void
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ struct pt_regs * regs);
-asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+static inline void
+handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
+{
+ /* If we're not from a syscall, bail out */
+ if (regs->syscall_nr < 0)
+ return;
-/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
+ /* check for system call restart.. */
+ switch (regs->regs[REG_RET]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ no_system_call_restart:
+ regs->regs[REG_RET] = -EINTR;
+ break;
-asmlinkage int
-sys_sigsuspend(old_sigset_t mask,
- unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs * regs)
-{
- sigset_t saveset;
-
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- REF_REG_RET = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- regs->pc += 4; /* because sys_sigreturn decrements the pc */
- if (do_signal(regs, &saveset)) {
- /* pc now points at signal handler. Need to decrement
- it because entry.S will increment it. */
+ case -ERESTARTSYS:
+ if (!(sa->sa_flags & SA_RESTART))
+ goto no_system_call_restart;
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ /* Decode syscall # */
+ regs->regs[REG_RET] = regs->syscall_nr;
regs->pc -= 4;
- return -EINTR;
- }
+ break;
}
}
-asmlinkage int
-sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
- unsigned long r4, unsigned long r5, unsigned long r6,
- unsigned long r7,
- struct pt_regs * regs)
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs)
{
- sigset_t saveset, newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- REF_REG_RET = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- regs->pc += 4; /* because sys_sigreturn decrements the pc */
- if (do_signal(regs, &saveset)) {
- /* pc now points at signal handler. Need to decrement
- it because entry.S will increment it. */
- regs->pc -= 4;
- return -EINTR;
- }
- }
-}
+ siginfo_t info;
+ int signr;
+ struct k_sigaction ka;
-asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction __user *act,
- struct old_sigaction __user *oact)
-{
- struct k_sigaction new_ka, old_ka;
- int ret;
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return;
- if (act) {
- old_sigset_t mask;
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
- return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
- siginitset(&new_ka.sa.sa_mask, mask);
+ signr = get_signal_to_deliver(&info, &ka, regs, 0);
+ if (signr > 0) {
+ handle_syscall_restart(regs, &ka.sa);
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &info, &ka, regs);
+ return;
}
- ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+ /* Did we come from a system call? */
+ if (regs->syscall_nr >= 0) {
+ /* Restart the system call - no handlers present */
+ switch (regs->regs[REG_RET]) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ /* Decode Syscall # */
+ regs->regs[REG_RET] = regs->syscall_nr;
+ regs->pc -= 4;
+ break;
- if (!ret && oact) {
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
- return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ case -ERESTART_RESTARTBLOCK:
+ regs->regs[REG_RET] = __NR_restart_syscall;
+ regs->pc -= 4;
+ break;
+ }
}
- return ret;
-}
-
-asmlinkage int
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
- unsigned long r4, unsigned long r5, unsigned long r6,
- unsigned long r7,
- struct pt_regs * regs)
-{
- return do_sigaltstack(uss, uoss, REF_REG_SP);
+ /* No signal to deliver -- put the saved sigmask back */
+ restore_saved_sigmask();
}
-
/*
* Do a signal return; undo the signal stack.
*/
-
-struct sigframe
-{
+struct sigframe {
struct sigcontext sc;
unsigned long extramask[_NSIG_WORDS-1];
long long retcode[2];
};
-struct rt_sigframe
-{
+struct rt_sigframe {
struct siginfo __user *pinfo;
void *puc;
struct siginfo info;
@@ -190,7 +161,7 @@ restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
regs->sr |= SR_FD;
}
- err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
+ err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
(sizeof(long long) * 32) + (sizeof(int) * 1));
return err;
@@ -209,13 +180,13 @@ setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
if (current == last_task_used_math) {
enable_fpu();
- save_fpu(current, regs);
+ save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
- err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
+ err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
(sizeof(long long) * 32) + (sizeof(int) * 1));
clear_used_math();
@@ -292,6 +263,9 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
sigset_t set;
long long ret;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
@@ -301,12 +275,7 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
sizeof(frame->extramask))))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
-
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->sc, &ret))
goto badframe;
@@ -326,30 +295,25 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
{
struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
sigset_t set;
- stack_t __user st;
long long ret;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
goto badframe;
regs->pc -= 4;
- if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- do_sigaltstack(&st, NULL, REF_REG_SP);
return (int) ret;
@@ -361,7 +325,6 @@ badframe:
/*
* Set up a signal frame.
*/
-
static int
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask)
@@ -415,8 +378,8 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
void sa_default_restorer(void); /* See comments below */
void sa_default_rt_restorer(void); /* See comments below */
-static void setup_frame(int sig, struct k_sigaction *ka,
- sigset_t *set, struct pt_regs *regs)
+static int setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
{
struct sigframe __user *frame;
int err = 0;
@@ -450,13 +413,11 @@ static void setup_frame(int sig, struct k_sigaction *ka,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
- DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
-
/*
* On SH5 all edited pointers are subject to NEFF
*/
- DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
- (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ ka->sa.sa_restorer | 0x1);
} else {
/*
* Different approach on SH5.
@@ -469,12 +430,11 @@ static void setup_frame(int sig, struct k_sigaction *ka,
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
- DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
- DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
- (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ frame->retcode | 0x01);
if (__copy_to_user(frame->retcode,
- (unsigned long long)sa_default_restorer & (~1), 16) != 0)
+ (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
goto give_sigsegv;
/* Cohere the trampoline with the I-cache. */
@@ -485,9 +445,7 @@ static void setup_frame(int sig, struct k_sigaction *ka,
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
- regs->regs[REG_SP] = (unsigned long) frame;
- regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
- (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
+ regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
/* FIXME:
@@ -502,28 +460,25 @@ static void setup_frame(int sig, struct k_sigaction *ka,
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
- regs->pc = (unsigned long) ka->sa.sa_handler;
- regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
+ regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler);
set_fs(USER_DS);
-#if DEBUG_SIG
/* Broken %016Lx */
- printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
- signal,
- current->comm, current->pid, frame,
- regs->pc >> 32, regs->pc & 0xffffffff,
- DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
-#endif
+ pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
+ signal, current->comm, current->pid, frame,
+ regs->pc >> 32, regs->pc & 0xffffffff,
+ DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
- return;
+ return 0;
give_sigsegv:
force_sigsegv(sig, current);
+ return -EFAULT;
}
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs *regs)
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int err = 0;
@@ -551,11 +506,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user((void *)current->sas_ss_sp,
- &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->regs[REG_SP]),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->regs[REG_SP]);
err |= setup_sigcontext(&frame->uc.uc_mcontext,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
@@ -567,13 +518,11 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
- DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1;
-
/*
* On SH5 all edited pointers are subject to NEFF
*/
- DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
- (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ ka->sa.sa_restorer | 0x1);
} else {
/*
* Different approach on SH5.
@@ -586,15 +535,14 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
-
- DEREF_REG_PR = (unsigned long) frame->retcode | 0x01;
- DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ?
- (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR;
+ DEREF_REG_PR = neff_sign_extend((unsigned long)
+ frame->retcode | 0x01);
if (__copy_to_user(frame->retcode,
- (unsigned long long)sa_default_rt_restorer & (~1), 16) != 0)
+ (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
goto give_sigsegv;
+ /* Cohere the trampoline with the I-cache. */
flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
}
@@ -602,150 +550,56 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
- regs->regs[REG_SP] = (unsigned long) frame;
- regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ?
- (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP];
+ regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
regs->regs[REG_ARG1] = signal; /* Arg for signal handler */
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
- regs->pc = (unsigned long) ka->sa.sa_handler;
- regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc;
+ regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler);
set_fs(USER_DS);
-#if DEBUG_SIG
- /* Broken %016Lx */
- printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
- signal,
- current->comm, current->pid, frame,
- regs->pc >> 32, regs->pc & 0xffffffff,
- DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
-#endif
+ pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
+ signal, current->comm, current->pid, frame,
+ regs->pc >> 32, regs->pc & 0xffffffff,
+ DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
- return;
+ return 0;
give_sigsegv:
force_sigsegv(sig, current);
+ return -EFAULT;
}
/*
* OK, we're invoking a handler
*/
-
static void
handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
- sigset_t *oldset, struct pt_regs * regs)
+ struct pt_regs * regs)
{
- /* Are we from a system call? */
- if (regs->syscall_nr >= 0) {
- /* If so, check system call restarting.. */
- switch (regs->regs[REG_RET]) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- regs->regs[REG_RET] = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ka->sa.sa_flags & SA_RESTART)) {
- regs->regs[REG_RET] = -EINTR;
- break;
- }
- /* fallthrough */
- case -ERESTARTNOINTR:
- /* Decode syscall # */
- regs->regs[REG_RET] = regs->syscall_nr;
- regs->pc -= 4;
- }
- }
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(sig, ka, info, oldset, regs);
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
else
- setup_frame(sig, ka, oldset, regs);
-
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-}
+ ret = setup_frame(sig, ka, oldset, regs);
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- *
- * Note that we go through the signals twice: once to check the signals that
- * the kernel can handle, and then we build all the user-level signal handling
- * stack-frames in one go after that.
- */
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
-{
- siginfo_t info;
- int signr;
- struct k_sigaction ka;
+ if (ret)
+ return;
- /*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return 1;
-
- if (try_to_freeze())
- goto no_signal;
-
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- oldset = &current->saved_sigmask;
- else if (!oldset)
- oldset = &current->blocked;
-
- signr = get_signal_to_deliver(&info, &ka, regs, 0);
-
- if (signr > 0) {
- /* Whee! Actually deliver the signal. */
- handle_signal(signr, &info, &ka, oldset, regs);
-
- /*
- * If a signal was successfully delivered, the saved sigmask
- * is in its frame, and we can clear the TIF_RESTORE_SIGMASK
- * flag.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
-
- return 1;
- }
-
-no_signal:
- /* Did we come from a system call? */
- if (regs->syscall_nr >= 0) {
- /* Restart the system call - no handlers present */
- switch (regs->regs[REG_RET]) {
- case -ERESTARTNOHAND:
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- /* Decode Syscall # */
- regs->regs[REG_RET] = regs->syscall_nr;
- regs->pc -= 4;
- break;
+ signal_delivered(sig, info, ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
+}
- case -ERESTART_RESTARTBLOCK:
- regs->regs[REG_RET] = __NR_restart_syscall;
- regs->pc -= 4;
- break;
- }
- }
+asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
+{
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(regs);
- /* No signal to deliver -- put the saved sigmask back */
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
}
-
- return 0;
}
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 5d039d168f5..fc5acfc93c9 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -3,7 +3,7 @@
*
* SMP support for the SuperH processors.
*
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2010 Paul Mundt
* Copyright (C) 2006 - 2007 Akio Idehara
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -18,35 +18,39 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/cpu.h>
#include <linux/interrupt.h>
-#include <asm/atomic.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
+#include <asm/setup.h>
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
+struct plat_smp_ops *mp_ops = NULL;
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
+/* State of each CPU */
+DEFINE_PER_CPU(int, cpu_state) = { 0 };
-static atomic_t cpus_booted = ATOMIC_INIT(0);
+void register_smp_ops(struct plat_smp_ops *ops)
+{
+ if (mp_ops)
+ printk(KERN_WARNING "Overriding previously set SMP ops\n");
-/*
- * Run specified function on a particular processor.
- */
-void __smp_call_function(unsigned int cpu);
+ mp_ops = ops;
+}
-static inline void __init smp_store_cpu_info(unsigned int cpu)
+static inline void smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
+ memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
+
c->loops_per_jiffy = loops_per_jiffy;
}
@@ -56,49 +60,150 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
init_new_context(current, &init_mm);
current_thread_info()->cpu = cpu;
- plat_prepare_cpus(max_cpus);
+ mp_ops->prepare_cpus(max_cpus);
#ifndef CONFIG_HOTPLUG_CPU
- cpu_present_map = cpu_possible_map;
+ init_cpu_present(cpu_possible_mask);
#endif
}
-void __devinit smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
__cpu_number_map[0] = cpu;
__cpu_logical_map[0] = cpu;
- cpu_set(cpu, cpu_online_map);
- cpu_set(cpu, cpu_possible_map);
+ set_cpu_online(cpu, true);
+ set_cpu_possible(cpu, true);
+
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void native_cpu_die(unsigned int cpu)
+{
+ unsigned int i;
+
+ for (i = 0; i < 10; i++) {
+ smp_rmb();
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
+ if (system_state == SYSTEM_RUNNING)
+ pr_info("CPU %u is now offline\n", cpu);
+
+ return;
+ }
+
+ msleep(100);
+ }
+
+ pr_err("CPU %u didn't die...\n", cpu);
+}
+
+int native_cpu_disable(unsigned int cpu)
+{
+ return cpu == 0 ? -EPERM : 0;
+}
+
+void play_dead_common(void)
+{
+ idle_task_exit();
+ irq_ctx_exit(raw_smp_processor_id());
+ mb();
+
+ __this_cpu_write(cpu_state, CPU_DEAD);
+ local_irq_disable();
+}
+
+void native_play_dead(void)
+{
+ play_dead_common();
+}
+
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ int ret;
+
+ ret = mp_ops->cpu_disable(cpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Stop the local timer for this CPU.
+ */
+ local_timer_stop(cpu);
+
+ /*
+ * Flush user cache and TLB mappings, and then remove this CPU
+ * from the vm mask set of all processes.
+ */
+ flush_cache_all();
+ local_flush_tlb_all();
+
+ clear_tasks_mm_cpumask(cpu);
+
+ return 0;
+}
+#else /* ... !CONFIG_HOTPLUG_CPU */
+int native_cpu_disable(unsigned int cpu)
+{
+ return -ENOSYS;
}
-asmlinkage void __cpuinit start_secondary(void)
+void native_cpu_die(unsigned int cpu)
{
- unsigned int cpu;
+ /* We said "no" in __cpu_disable */
+ BUG();
+}
+
+void native_play_dead(void)
+{
+ BUG();
+}
+#endif
+
+asmlinkage void start_secondary(void)
+{
+ unsigned int cpu = smp_processor_id();
struct mm_struct *mm = &init_mm;
+ enable_mmu();
atomic_inc(&mm->mm_count);
atomic_inc(&mm->mm_users);
current->active_mm = mm;
- BUG_ON(current->mm);
enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
per_cpu_trap_init();
preempt_disable();
+ notify_cpu_starting(cpu);
+
local_irq_enable();
+ /* Enable local timers */
+ local_timer_setup(cpu);
calibrate_delay();
- cpu = smp_processor_id();
smp_store_cpu_info(cpu);
- cpu_set(cpu, cpu_online_map);
+ set_cpu_online(cpu, true);
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
extern struct {
@@ -110,16 +215,11 @@ extern struct {
void *thread_info;
} stack_start;
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpu_up(unsigned int cpu, struct task_struct *tsk)
{
- struct task_struct *tsk;
unsigned long timeout;
- tsk = fork_idle(cpu);
- if (IS_ERR(tsk)) {
- printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);
- return PTR_ERR(tsk);
- }
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Fill in data in head.S for secondary cpus */
stack_start.sp = tsk->thread.sp;
@@ -127,9 +227,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
stack_start.start_kernel_fn = start_secondary;
- flush_cache_all();
+ flush_icache_range((unsigned long)&stack_start,
+ (unsigned long)&stack_start + sizeof(stack_start));
+ wmb();
- plat_start_cpu(cpu, (unsigned long)_stext);
+ mp_ops->start_cpu(cpu, (unsigned long)_stext);
timeout = jiffies + HZ;
while (time_before(jiffies, timeout)) {
@@ -137,6 +239,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
break;
udelay(10);
+ barrier();
}
if (cpu_online(cpu))
@@ -161,59 +264,62 @@ void __init smp_cpus_done(unsigned int max_cpus)
void smp_send_reschedule(int cpu)
{
- plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
-}
-
-static void stop_this_cpu(void *unused)
-{
- cpu_clear(smp_processor_id(), cpu_online_map);
- local_irq_disable();
-
- for (;;)
- cpu_relax();
+ mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
}
void smp_send_stop(void)
{
- smp_call_function(stop_this_cpu, 0, 1, 0);
+ smp_call_function(stop_this_cpu, 0, 0);
}
-struct smp_fn_call_struct smp_fn_call = {
- .lock = __SPIN_LOCK_UNLOCKED(smp_fn_call.lock),
- .finished = ATOMIC_INIT(0),
-};
-
-/*
- * The caller of this wants the passed function to run on every cpu. If wait
- * is set, wait until all cpus have finished the function before returning.
- * The lock is here to protect the call structure.
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- unsigned int nr_cpus = atomic_read(&cpus_booted);
- int i;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
+ int cpu;
- spin_lock(&smp_fn_call.lock);
+ for_each_cpu(cpu, mask)
+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
+}
- atomic_set(&smp_fn_call.finished, 0);
- smp_fn_call.fn = func;
- smp_fn_call.data = info;
+void arch_send_call_function_single_ipi(int cpu)
+{
+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
+}
- for (i = 0; i < nr_cpus; i++)
- if (i != smp_processor_id())
- plat_send_ipi(i, SMP_MSG_FUNCTION);
+void smp_timer_broadcast(const struct cpumask *mask)
+{
+ int cpu;
- if (wait)
- while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
+ for_each_cpu(cpu, mask)
+ mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
+}
- spin_unlock(&smp_fn_call.lock);
+static void ipi_timer(void)
+{
+ irq_enter();
+ local_timer_interrupt();
+ irq_exit();
+}
- return 0;
+void smp_message_recv(unsigned int msg)
+{
+ switch (msg) {
+ case SMP_MSG_FUNCTION:
+ generic_smp_call_function_interrupt();
+ break;
+ case SMP_MSG_RESCHEDULE:
+ scheduler_ipi();
+ break;
+ case SMP_MSG_FUNCTION_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+ case SMP_MSG_TIMER:
+ ipi_timer();
+ break;
+ default:
+ printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
+ smp_processor_id(), __func__, msg);
+ break;
+ }
}
/* Not really SMP stuff ... */
@@ -229,7 +335,7 @@ static void flush_tlb_all_ipi(void *info)
void flush_tlb_all(void)
{
- on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
+ on_each_cpu(flush_tlb_all_ipi, 0, 1);
}
static void flush_tlb_mm_ipi(void *mm)
@@ -249,13 +355,12 @@ static void flush_tlb_mm_ipi(void *mm)
* behalf of debugees, kswapd stealing pages from another process etc).
* Kanoj 07/00.
*/
-
void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
- smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+ smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
@@ -292,7 +397,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
fd.vma = vma;
fd.addr1 = start;
fd.addr2 = end;
- smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+ smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
@@ -316,7 +421,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
fd.addr1 = start;
fd.addr2 = end;
- on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
+ on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
}
static void flush_tlb_page_ipi(void *info)
@@ -335,7 +440,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
fd.vma = vma;
fd.addr1 = page;
- smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+ smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
} else {
int i;
for (i = 0; i < num_online_cpus(); i++)
@@ -359,6 +464,6 @@ void flush_tlb_one(unsigned long asid, unsigned long vaddr)
fd.addr1 = asid;
fd.addr2 = vaddr;
- smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1);
+ smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
local_flush_tlb_one(asid, vaddr);
}
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index d41e561be20..bf989e063a0 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -3,7 +3,7 @@
*
* Stack trace management functions
*
- * Copyright (C) 2006 Paul Mundt
+ * Copyright (C) 2006 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -12,25 +12,81 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
+#include <linux/module.h>
+#include <asm/unwinder.h>
#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+
+static int save_stack_stack(void *data, char *name)
+{
+ return 0;
+}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
+static void save_stack_address(void *data, unsigned long addr, int reliable)
+{
+ struct stack_trace *trace = data;
+
+ if (!reliable)
+ return;
+
+ if (trace->skip > 0) {
+ trace->skip--;
+ return;
+ }
+
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = addr;
+}
+
+static const struct stacktrace_ops save_stack_ops = {
+ .stack = save_stack_stack,
+ .address = save_stack_address,
+};
+
void save_stack_trace(struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)current_stack_pointer;
- while (!kstack_end(sp)) {
- unsigned long addr = *sp++;
-
- if (__kernel_text_address(addr)) {
- if (trace->skip > 0)
- trace->skip--;
- else
- trace->entries[trace->nr_entries++] = addr;
- if (trace->nr_entries >= trace->max_entries)
- break;
- }
+ unwind_stack(current, NULL, sp, &save_stack_ops, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+static void
+save_stack_address_nosched(void *data, unsigned long addr, int reliable)
+{
+ struct stack_trace *trace = (struct stack_trace *)data;
+
+ if (!reliable)
+ return;
+
+ if (in_sched_functions(addr))
+ return;
+
+ if (trace->skip > 0) {
+ trace->skip--;
+ return;
}
+
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = addr;
+}
+
+static const struct stacktrace_ops save_stack_ops_nosched = {
+ .stack = save_stack_stack,
+ .address = save_stack_address_nosched,
+};
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ unsigned long *sp = (unsigned long *)tsk->thread.sp;
+
+ unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/sh/kernel/swsusp.c b/arch/sh/kernel/swsusp.c
new file mode 100644
index 00000000000..12b64a0f2f0
--- /dev/null
+++ b/arch/sh/kernel/swsusp.c
@@ -0,0 +1,38 @@
+/*
+ * swsusp.c - SuperH hibernation support
+ *
+ * Copyright (C) 2009 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <asm/suspend.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <asm/fpu.h>
+
+struct swsusp_arch_regs swsusp_arch_regs_cpu0;
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
+
+ return (pfn >= begin_pfn) && (pfn < end_pfn);
+}
+
+void save_processor_state(void)
+{
+ init_fpu(current);
+}
+
+void restore_processor_state(void)
+{
+ local_flush_tlb_all();
+}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 59cd2859ce9..8c6a350df75 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -22,124 +22,11 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/ipc.h>
-#include <asm/cacheflush.h>
+#include <asm/syscalls.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
-
-unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
-EXPORT_SYMBOL(shm_align_mask);
-
-#ifdef CONFIG_MMU
-/*
- * To avoid cache aliases, we map the shared page with same color.
- */
-#define COLOUR_ALIGN(addr, pgoff) \
- ((((addr) + shm_align_mask) & ~shm_align_mask) + \
- (((pgoff) << PAGE_SHIFT) & shm_align_mask))
-
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long start_addr;
- int do_colour_align;
-
- if (flags & MAP_FIXED) {
- /* We do not accept a shared mapping if it would violate
- * cache aliasing constraints.
- */
- if ((flags & MAP_SHARED) && (addr & shm_align_mask))
- return -EINVAL;
- return addr;
- }
-
- if (unlikely(len > TASK_SIZE))
- return -ENOMEM;
-
- do_colour_align = 0;
- if (filp || (flags & MAP_SHARED))
- do_colour_align = 1;
-
- if (addr) {
- if (do_colour_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
-
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
- return addr;
- }
-
- if (len > mm->cached_hole_size) {
- start_addr = addr = mm->free_area_cache;
- } else {
- mm->cached_hole_size = 0;
- start_addr = addr = TASK_UNMAPPED_BASE;
- }
-
-full_search:
- if (do_colour_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(mm->free_area_cache);
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (unlikely(TASK_SIZE - len < addr)) {
- /*
- * Start a new search - just in case we missed
- * some holes.
- */
- if (start_addr != TASK_UNMAPPED_BASE) {
- start_addr = addr = TASK_UNMAPPED_BASE;
- mm->cached_hole_size = 0;
- goto full_search;
- }
- return -ENOMEM;
- }
- if (likely(!vma || addr + len <= vma->vm_start)) {
- /*
- * Remember the place where we stopped the search:
- */
- mm->free_area_cache = addr + len;
- return addr;
- }
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
-
- addr = vma->vm_end;
- if (do_colour_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- }
-}
-#endif /* CONFIG_MMU */
-
-static inline long
-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, int fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file *file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
asmlinkage int old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
@@ -147,127 +34,62 @@ asmlinkage int old_mmap(unsigned long addr, unsigned long len,
{
if (off & ~PAGE_MASK)
return -EINVAL;
- return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
}
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
-}
+ /*
+ * The shift for mmap2 is constant, regardless of PAGE_SIZE
+ * setting.
+ */
+ if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
+ return -EINVAL;
-/*
- * sys_ipc() is the de-multiplexer for the SysV IPC calls..
- *
- * This is really horribly ugly.
- */
-asmlinkage int sys_ipc(uint call, int first, int second,
- int third, void __user *ptr, long fifth)
-{
- int version, ret;
+ pgoff >>= PAGE_SHIFT - 12;
- version = call >> 16; /* hack for backward compatibility */
- call &= 0xffff;
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+}
- if (call <= SEMCTL)
- switch (call) {
- case SEMOP:
- return sys_semtimedop(first,
- (struct sembuf __user *)ptr,
- second, NULL);
- case SEMTIMEDOP:
- return sys_semtimedop(first,
- (struct sembuf __user *)ptr, second,
- (const struct timespec __user *)fifth);
- case SEMGET:
- return sys_semget (first, second, third);
- case SEMCTL: {
- union semun fourth;
- if (!ptr)
- return -EINVAL;
- if (get_user(fourth.__pad, (void * __user *) ptr))
- return -EFAULT;
- return sys_semctl (first, second, third, fourth);
- }
- default:
- return -EINVAL;
- }
+/* sys_cacheflush -- flush (part of) the processor cache. */
+asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
+{
+ struct vm_area_struct *vma;
- if (call <= MSGCTL)
- switch (call) {
- case MSGSND:
- return sys_msgsnd (first, (struct msgbuf __user *) ptr,
- second, third);
- case MSGRCV:
- switch (version) {
- case 0:
- {
- struct ipc_kludge tmp;
+ if ((op <= 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I)))
+ return -EINVAL;
- if (!ptr)
- return -EINVAL;
+ /*
+ * Verify that the specified address region actually belongs
+ * to this process.
+ */
+ if (addr + len < addr)
+ return -EFAULT;
- if (copy_from_user(&tmp,
- (struct ipc_kludge __user *) ptr,
- sizeof (tmp)))
- return -EFAULT;
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma (current->mm, addr);
+ if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
- return sys_msgrcv (first, tmp.msgp, second,
- tmp.msgtyp, third);
- }
- default:
- return sys_msgrcv (first,
- (struct msgbuf __user *) ptr,
- second, fifth, third);
- }
- case MSGGET:
- return sys_msgget ((key_t) first, second);
- case MSGCTL:
- return sys_msgctl (first, second,
- (struct msqid_ds __user *) ptr);
- default:
- return -EINVAL;
- }
- if (call <= SHMCTL)
- switch (call) {
- case SHMAT:
- switch (version) {
- default: {
- ulong raddr;
- ret = do_shmat (first, (char __user *) ptr,
- second, &raddr);
- if (ret)
- return ret;
- return put_user (raddr, (ulong __user *) third);
- }
- case 1: /* iBCS2 emulator entry point */
- if (!segment_eq(get_fs(), get_ds()))
- return -EINVAL;
- return do_shmat (first, (char __user *) ptr,
- second, (ulong *) third);
- }
- case SHMDT:
- return sys_shmdt ((char __user *)ptr);
- case SHMGET:
- return sys_shmget (first, second, third);
- case SHMCTL:
- return sys_shmctl (first, second,
- (struct shmid_ds __user *) ptr);
- default:
- return -EINVAL;
- }
+ switch (op & CACHEFLUSH_D_PURGE) {
+ case CACHEFLUSH_D_INVAL:
+ __flush_invalidate_region((void *)addr, len);
+ break;
+ case CACHEFLUSH_D_WB:
+ __flush_wback_region((void *)addr, len);
+ break;
+ case CACHEFLUSH_D_PURGE:
+ __flush_purge_region((void *)addr, len);
+ break;
+ }
- return -EINVAL;
-}
+ if (op & CACHEFLUSH_I)
+ flush_icache_range(addr, addr+len);
-asmlinkage int sys_uname(struct old_utsname * name)
-{
- int err;
- if (!name)
- return -EFAULT;
- down_read(&uts_sem);
- err = copy_to_user(name, utsname(), sizeof (*name));
- up_read(&uts_sem);
- return err?-EFAULT:0;
+ up_read(&current->mm->mmap_sem);
+ return 0;
}
diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c
index 125e493ead8..b66d1c62eb1 100644
--- a/arch/sh/kernel/sys_sh32.c
+++ b/arch/sh/kernel/sys_sh32.c
@@ -9,41 +9,38 @@
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
-#include <linux/utsname.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/ipc.h>
#include <asm/cacheflush.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
+#include <asm/syscalls.h>
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
*/
-asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
+asmlinkage int sys_sh_pipe(void)
{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
int fd[2];
int error;
- error = do_pipe(fd);
+ error = do_pipe_flags(fd, 0);
if (!error) {
- regs->regs[1] = fd[1];
+ current_pt_regs()->regs[1] = fd[1];
return fd[0];
}
return error;
}
-asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
+asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,
size_t count, long dummy, loff_t pos)
{
return sys_pread64(fd, buf, count, pos);
}
-asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
+asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf,
size_t count, long dummy, loff_t pos)
{
return sys_pwrite64(fd, buf, count, pos);
@@ -60,25 +57,3 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
(u64)len0 << 32 | len1, advice);
#endif
}
-
-#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
-#define SYSCALL_ARG3 "trapa #0x23"
-#else
-#define SYSCALL_ARG3 "trapa #0x13"
-#endif
-
-/*
- * Do a system call from kernel instead of calling sys_execve so we
- * end up with proper pt_regs.
- */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
-{
- register long __sc0 __asm__ ("r3") = __NR_execve;
- register long __sc4 __asm__ ("r4") = (long) filename;
- register long __sc5 __asm__ ("r5") = (long) argv;
- register long __sc6 __asm__ ("r6") = (long) envp;
- __asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)
- : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
- : "memory");
- return __sc0;
-}
diff --git a/arch/sh/kernel/sys_sh64.c b/arch/sh/kernel/sys_sh64.c
deleted file mode 100644
index 578004d71e0..00000000000
--- a/arch/sh/kernel/sys_sh64.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * arch/sh/kernel/sys_sh64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- *
- * This file contains various random system calls that
- * have a non-standard calling sequence on the Linux/SH5
- * platform.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/errno.h>
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/smp.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/stat.h>
-#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/utsname.h>
-#include <linux/syscalls.h>
-#include <linux/ipc.h>
-#include <asm/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/unistd.h>
-
-/*
- * sys_pipe() is the normal C calling standard for creating
- * a pipe. It's not the way Unix traditionally does this, though.
- */
-asmlinkage int sys_pipe(unsigned long * fildes)
-{
- int fd[2];
- int error;
-
- error = do_pipe(fd);
- if (!error) {
- if (copy_to_user(fildes, fd, 2*sizeof(int)))
- error = -EFAULT;
- }
- return error;
-}
-
-/*
- * Do a system call from kernel instead of calling sys_execve so we
- * end up with proper pt_regs.
- */
-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
-{
- register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
- register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
- register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv;
- register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp;
- __asm__ __volatile__ ("trapa %1 !\t\t\t execve(%2,%3,%4)"
- : "=r" (__sc0)
- : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );
- __asm__ __volatile__ ("!dummy %0 %1 %2 %3"
- : : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory");
- return __sc0;
-}
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index a46cc3a4114..734234be2f0 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -58,7 +58,7 @@ ENTRY(sys_call_table)
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
- .long sys_pipe
+ .long sys_sh_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
@@ -105,7 +105,7 @@ ENTRY(sys_call_table)
.long sys_uselib
.long sys_swapon
.long sys_reboot
- .long old_readdir
+ .long sys_old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
@@ -139,7 +139,7 @@ ENTRY(sys_call_table)
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
- .long sys_ni_syscall /* sys_modify_ldt */
+ .long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
@@ -185,7 +185,7 @@ ENTRY(sys_call_table)
.long sys_ni_syscall /* vm86 */
.long sys_ni_syscall /* old "query_module" */
.long sys_poll
- .long sys_nfsservctl
+ .long sys_ni_syscall /* was nfsservctl */
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
@@ -204,8 +204,8 @@ ENTRY(sys_call_table)
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
- .long sys_ni_syscall /* streams1 */
- .long sys_ni_syscall /* streams2 */
+ .long sys_ni_syscall /* getpmsg */
+ .long sys_ni_syscall /* putpmsg */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
@@ -259,8 +259,8 @@ ENTRY(sys_call_table)
.long sys_futex /* 240 */
.long sys_sched_setaffinity
.long sys_sched_getaffinity
- .long sys_ni_syscall
- .long sys_ni_syscall
+ .long sys_ni_syscall /* reserved for set_thread_area */
+ .long sys_ni_syscall /* reserved for get_thread_area */
.long sys_io_setup /* 245 */
.long sys_io_destroy
.long sys_io_getevents
@@ -343,3 +343,46 @@ ENTRY(sys_call_table)
.long sys_fallocate
.long sys_timerfd_settime /* 325 */
.long sys_timerfd_gettime
+ .long sys_signalfd4
+ .long sys_eventfd2
+ .long sys_epoll_create1
+ .long sys_dup3 /* 330 */
+ .long sys_pipe2
+ .long sys_inotify_init1
+ .long sys_preadv
+ .long sys_pwritev
+ .long sys_rt_tgsigqueueinfo /* 335 */
+ .long sys_perf_event_open
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64
+ /* Broken-out socket family */
+ .long sys_socket /* 340 */
+ .long sys_bind
+ .long sys_connect
+ .long sys_listen
+ .long sys_accept
+ .long sys_getsockname /* 345 */
+ .long sys_getpeername
+ .long sys_socketpair
+ .long sys_send
+ .long sys_sendto
+ .long sys_recv /* 350 */
+ .long sys_recvfrom
+ .long sys_shutdown
+ .long sys_setsockopt
+ .long sys_getsockopt
+ .long sys_sendmsg /* 355 */
+ .long sys_recvmsg
+ .long sys_recvmmsg
+ .long sys_accept4
+ .long sys_name_to_handle_at
+ .long sys_open_by_handle_at /* 360 */
+ .long sys_clock_adjtime
+ .long sys_syncfs
+ .long sys_sendmmsg
+ .long sys_setns
+ .long sys_process_vm_readv /* 365 */
+ .long sys_process_vm_writev
+ .long sys_kcmp
+ .long sys_finit_module
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index d5d7843aad9..579fcb9a896 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -109,7 +109,7 @@ sys_call_table:
.long sys_uselib
.long sys_swapon
.long sys_reboot
- .long old_readdir
+ .long sys_old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
@@ -143,7 +143,7 @@ sys_call_table:
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
- .long sys_ni_syscall /* sys_modify_ldt */
+ .long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
@@ -189,7 +189,7 @@ sys_call_table:
.long sys_ni_syscall /* vm86 */
.long sys_ni_syscall /* old "query_module" */
.long sys_poll
- .long sys_nfsservctl
+ .long sys_ni_syscall /* was nfsservctl */
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
@@ -208,8 +208,8 @@ sys_call_table:
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
- .long sys_ni_syscall /* streams1 */
- .long sys_ni_syscall /* streams2 */
+ .long sys_ni_syscall /* getpmsg */
+ .long sys_ni_syscall /* putpmsg */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
@@ -296,8 +296,8 @@ sys_call_table:
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity /* 270 */
- .long sys_ni_syscall
- .long sys_ni_syscall
+ .long sys_ni_syscall /* reserved for set_thread_area */
+ .long sys_ni_syscall /* reserved for get_thread_area */
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents /* 275 */
@@ -381,3 +381,28 @@ sys_call_table:
.long sys_fallocate
.long sys_timerfd_settime
.long sys_timerfd_gettime
+ .long sys_signalfd4 /* 355 */
+ .long sys_eventfd2
+ .long sys_epoll_create1
+ .long sys_dup3
+ .long sys_pipe2
+ .long sys_inotify_init1 /* 360 */
+ .long sys_preadv
+ .long sys_pwritev
+ .long sys_rt_tgsigqueueinfo
+ .long sys_perf_event_open
+ .long sys_recvmmsg /* 365 */
+ .long sys_accept4
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_prlimit64
+ .long sys_name_to_handle_at /* 370 */
+ .long sys_open_by_handle_at
+ .long sys_clock_adjtime
+ .long sys_syncfs
+ .long sys_sendmmsg
+ .long sys_setns /* 375 */
+ .long sys_process_vm_readv
+ .long sys_process_vm_writev
+ .long sys_kcmp
+ .long sys_finit_module
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
new file mode 100644
index 00000000000..552c8fcf941
--- /dev/null
+++ b/arch/sh/kernel/time.c
@@ -0,0 +1,115 @@
+/*
+ * arch/sh/kernel/time.c
+ *
+ * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
+ * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 2002 - 2009 Paul Mundt
+ * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/profile.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
+#include <linux/clockchips.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/rtc.h>
+#include <asm/clock.h>
+#include <asm/rtc.h>
+
+/* Dummy RTC ops */
+static void null_rtc_get_time(struct timespec *tv)
+{
+ tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
+ tv->tv_nsec = 0;
+}
+
+static int null_rtc_set_time(const time_t secs)
+{
+ return 0;
+}
+
+void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
+int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
+
+void read_persistent_clock(struct timespec *ts)
+{
+ rtc_sh_get_time(ts);
+}
+
+#ifdef CONFIG_GENERIC_CMOS_UPDATE
+int update_persistent_clock(struct timespec now)
+{
+ return rtc_sh_set_time(now.tv_sec);
+}
+#endif
+
+unsigned int get_rtc_time(struct rtc_time *tm)
+{
+ if (rtc_sh_get_time != null_rtc_get_time) {
+ struct timespec tv;
+
+ rtc_sh_get_time(&tv);
+ rtc_time_to_tm(tv.tv_sec, tm);
+ }
+
+ return RTC_24H;
+}
+EXPORT_SYMBOL(get_rtc_time);
+
+int set_rtc_time(struct rtc_time *tm)
+{
+ unsigned long secs;
+
+ rtc_tm_to_time(tm, &secs);
+ return rtc_sh_set_time(secs);
+}
+EXPORT_SYMBOL(set_rtc_time);
+
+static int __init rtc_generic_init(void)
+{
+ struct platform_device *pdev;
+
+ if (rtc_sh_get_time == null_rtc_get_time)
+ return -ENODEV;
+
+ pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return 0;
+}
+module_init(rtc_generic_init);
+
+void (*board_time_init)(void);
+
+static void __init sh_late_time_init(void)
+{
+ /*
+ * Make sure all compiled-in early timers register themselves.
+ *
+ * Run probe() for two "earlytimer" devices, these will be the
+ * clockevents and clocksource devices respectively. In the event
+ * that only a clockevents device is available, we -ENODEV on the
+ * clocksource and the jiffies clocksource is used transparently
+ * instead. No error handling is necessary here.
+ */
+ early_platform_driver_register_all("earlytimer");
+ early_platform_driver_probe("earlytimer", 2, 0);
+}
+
+void __init time_init(void)
+{
+ if (board_time_init)
+ board_time_init();
+
+ clk_init();
+
+ late_time_init = sh_late_time_init;
+}
diff --git a/arch/sh/kernel/time_32.c b/arch/sh/kernel/time_32.c
deleted file mode 100644
index 7281342c044..00000000000
--- a/arch/sh/kernel/time_32.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * arch/sh/kernel/time.c
- *
- * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
- * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
- * Copyright (C) 2002 - 2007 Paul Mundt
- * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
- *
- * Some code taken from i386 version.
- * Copyright (C) 1991, 1992, 1995 Linus Torvalds
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/profile.h>
-#include <linux/timex.h>
-#include <linux/sched.h>
-#include <linux/clockchips.h>
-#include <asm/clock.h>
-#include <asm/rtc.h>
-#include <asm/timer.h>
-#include <asm/kgdb.h>
-
-struct sys_timer *sys_timer;
-
-/* Move this somewhere more sensible.. */
-DEFINE_SPINLOCK(rtc_lock);
-EXPORT_SYMBOL(rtc_lock);
-
-/* Dummy RTC ops */
-static void null_rtc_get_time(struct timespec *tv)
-{
- tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
- tv->tv_nsec = 0;
-}
-
-static int null_rtc_set_time(const time_t secs)
-{
- return 0;
-}
-
-/*
- * Null high precision timer functions for systems lacking one.
- */
-static cycle_t null_hpt_read(void)
-{
- return 0;
-}
-
-void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
-int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
-
-#ifndef CONFIG_GENERIC_TIME
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long flags;
- unsigned long seq;
- unsigned long usec, sec;
-
- do {
- /*
- * Turn off IRQs when grabbing xtime_lock, so that
- * the sys_timer get_offset code doesn't have to handle it.
- */
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
- usec = get_timer_offset();
- sec = xtime.tv_sec;
- usec += xtime.tv_nsec / NSEC_PER_USEC;
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
- while (usec >= 1000000) {
- usec -= 1000000;
- sec++;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-EXPORT_SYMBOL(do_gettimeofday);
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- /*
- * This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * made, and then undo it!
- */
- nsec -= get_timer_offset() * NSEC_PER_USEC;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
-
- return 0;
-}
-EXPORT_SYMBOL(do_settimeofday);
-#endif /* !CONFIG_GENERIC_TIME */
-
-#ifndef CONFIG_GENERIC_CLOCKEVENTS
-/* last time the RTC clock got updated */
-static long last_rtc_update;
-
-/*
- * handle_timer_tick() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
- */
-void handle_timer_tick(void)
-{
- if (current->pid)
- profile_tick(CPU_PROFILING);
-
-#ifdef CONFIG_HEARTBEAT
- if (sh_mv.mv_heartbeat != NULL)
- sh_mv.mv_heartbeat();
-#endif
-
- /*
- * Here we are in the timer irq handler. We just have irqs locally
- * disabled but we don't know if the timer_bh is running on the other
- * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
- * the irq version of write_lock because as just said we have irq
- * locally disabled. -arca
- */
- write_seqlock(&xtime_lock);
- do_timer(1);
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- if (rtc_sh_set_time(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- /* do it again in 60s */
- last_rtc_update = xtime.tv_sec - 600;
- }
- write_sequnlock(&xtime_lock);
-
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
-}
-#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
-
-#ifdef CONFIG_PM
-int timer_suspend(struct sys_device *dev, pm_message_t state)
-{
- struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
-
- sys_timer->ops->stop();
-
- return 0;
-}
-
-int timer_resume(struct sys_device *dev)
-{
- struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
-
- sys_timer->ops->start();
-
- return 0;
-}
-#else
-#define timer_suspend NULL
-#define timer_resume NULL
-#endif
-
-static struct sysdev_class timer_sysclass = {
- .name = "timer",
- .suspend = timer_suspend,
- .resume = timer_resume,
-};
-
-static int __init timer_init_sysfs(void)
-{
- int ret = sysdev_class_register(&timer_sysclass);
- if (ret != 0)
- return ret;
-
- sys_timer->dev.cls = &timer_sysclass;
- return sysdev_register(&sys_timer->dev);
-}
-device_initcall(timer_init_sysfs);
-
-void (*board_time_init)(void);
-
-/*
- * Shamelessly based on the MIPS and Sparc64 work.
- */
-static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
-unsigned long sh_hpt_frequency = 0;
-
-#define NSEC_PER_CYC_SHIFT 10
-
-struct clocksource clocksource_sh = {
- .name = "SuperH",
- .rating = 200,
- .mask = CLOCKSOURCE_MASK(32),
- .read = null_hpt_read,
- .shift = 16,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static void __init init_sh_clocksource(void)
-{
- if (!sh_hpt_frequency || clocksource_sh.read == null_hpt_read)
- return;
-
- clocksource_sh.mult = clocksource_hz2mult(sh_hpt_frequency,
- clocksource_sh.shift);
-
- timer_ticks_per_nsec_quotient =
- clocksource_hz2mult(sh_hpt_frequency, NSEC_PER_CYC_SHIFT);
-
- clocksource_register(&clocksource_sh);
-}
-
-#ifdef CONFIG_GENERIC_TIME
-unsigned long long sched_clock(void)
-{
- unsigned long long ticks = clocksource_sh.read();
- return (ticks * timer_ticks_per_nsec_quotient) >> NSEC_PER_CYC_SHIFT;
-}
-#endif
-
-void __init time_init(void)
-{
- if (board_time_init)
- board_time_init();
-
- clk_init();
-
- rtc_sh_get_time(&xtime);
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
-
- /*
- * Find the timer to use as the system timer, it will be
- * initialized for us.
- */
- sys_timer = get_sys_timer();
- printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
-
- if (sys_timer->ops->read)
- clocksource_sh.read = sys_timer->ops->read;
-
- init_sh_clocksource();
-
- if (sh_hpt_frequency)
- printk("Using %lu.%03lu MHz high precision timer.\n",
- ((sh_hpt_frequency + 500) / 1000) / 1000,
- ((sh_hpt_frequency + 500) / 1000) % 1000);
-
-#if defined(CONFIG_SH_KGDB)
- /*
- * Set up kgdb as requested. We do it here because the serial
- * init uses the timer vars we just set up for figuring baud.
- */
- kgdb_init();
-#endif
-}
diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c
deleted file mode 100644
index 898977ee203..00000000000
--- a/arch/sh/kernel/time_64.c
+++ /dev/null
@@ -1,522 +0,0 @@
-/*
- * arch/sh/kernel/time_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
- * Copyright (C) 2003 Richard Curnow
- *
- * Original TMU/RTC code taken from sh version.
- * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
- * Some code taken from i386 version.
- * Copyright (C) 1991, 1992, 1995 Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/errno.h>
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/profile.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <linux/bcd.h>
-#include <linux/timex.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <asm/cpu/registers.h> /* required by inline __asm__ stmt. */
-#include <asm/cpu/irq.h>
-#include <asm/addrspace.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
-#include <asm/delay.h>
-
-#define TMU_TOCR_INIT 0x00
-#define TMU0_TCR_INIT 0x0020
-#define TMU_TSTR_INIT 1
-#define TMU_TSTR_OFF 0
-
-/* Real Time Clock */
-#define RTC_BLOCK_OFF 0x01040000
-#define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
-#define RTC_RCR1_CIE 0x10 /* Carry Interrupt Enable */
-#define RTC_RCR1 (rtc_base + 0x38)
-
-/* Clock, Power and Reset Controller */
-#define CPRC_BLOCK_OFF 0x01010000
-#define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
-
-#define FRQCR (cprc_base+0x0)
-#define WTCSR (cprc_base+0x0018)
-#define STBCR (cprc_base+0x0030)
-
-/* Time Management Unit */
-#define TMU_BLOCK_OFF 0x01020000
-#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
-#define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0)
-#define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1)
-#define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2)
-
-#define TMU_TOCR tmu_base+0x0 /* Byte access */
-#define TMU_TSTR tmu_base+0x4 /* Byte access */
-
-#define TMU0_TCOR TMU0_BASE+0x0 /* Long access */
-#define TMU0_TCNT TMU0_BASE+0x4 /* Long access */
-#define TMU0_TCR TMU0_BASE+0x8 /* Word access */
-
-#define TICK_SIZE (tick_nsec / 1000)
-
-static unsigned long tmu_base, rtc_base;
-unsigned long cprc_base;
-
-/* Variables to allow interpolation of time of day to resolution better than a
- * jiffy. */
-
-/* This is effectively protected by xtime_lock */
-static unsigned long ctc_last_interrupt;
-static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
-
-#define CTC_JIFFY_SCALE_SHIFT 40
-
-/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
-static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
-
-/* Estimate number of microseconds that have elapsed since the last timer tick,
- by scaling the delta that has occurred in the CTC register.
-
- WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
- the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this
- in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm
- probably needs to use TMU.TCNT0 instead. This will work even if the CPU is
- sleeping, though will be coarser.
-
- FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
- is running or if the freq or tick arguments of adjtimex are modified after
- we have calibrated the scaling factor? This will result in either a jump at
- the end of a tick period, or a wrap backwards at the start of the next one,
- if the application is reading the time of day often enough. I think we
- ought to do better than this. For this reason, usecs_per_jiffy is left
- separated out in the calculation below. This allows some future hook into
- the adjtime-related stuff in kernel/timer.c to remove this hazard.
-
-*/
-
-static unsigned long usecs_since_tick(void)
-{
- unsigned long long current_ctc;
- long ctc_ticks_since_interrupt;
- unsigned long long ull_ctc_ticks_since_interrupt;
- unsigned long result;
-
- unsigned long long mul1_out;
- unsigned long long mul1_out_high;
- unsigned long long mul2_out_low, mul2_out_high;
-
- /* Read CTC register */
- asm ("getcon cr62, %0" : "=r" (current_ctc));
- /* Note, the CTC counts down on each CPU clock, not up.
- Note(2), use long type to get correct wraparound arithmetic when
- the counter crosses zero. */
- ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
- ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
-
- /* Inline assembly to do 32x32x32->64 multiplier */
- asm volatile ("mulu.l %1, %2, %0" :
- "=r" (mul1_out) :
- "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
-
- mul1_out_high = mul1_out >> 32;
-
- asm volatile ("mulu.l %1, %2, %0" :
- "=r" (mul2_out_low) :
- "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
-
-#if 1
- asm volatile ("mulu.l %1, %2, %0" :
- "=r" (mul2_out_high) :
- "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
-#endif
-
- result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
-
- return result;
-}
-
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long flags;
- unsigned long seq;
- unsigned long usec, sec;
-
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
- usec = usecs_since_tick();
- sec = xtime.tv_sec;
- usec += xtime.tv_nsec / 1000;
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
- while (usec >= 1000000) {
- usec -= 1000000;
- sec++;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- /*
- * This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * made, and then undo it!
- */
- nsec -= 1000 * usecs_since_tick();
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
-
- return 0;
-}
-EXPORT_SYMBOL(do_settimeofday);
-
-/* Dummy RTC ops */
-static void null_rtc_get_time(struct timespec *tv)
-{
- tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
- tv->tv_nsec = 0;
-}
-
-static int null_rtc_set_time(const time_t secs)
-{
- return 0;
-}
-
-void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
-int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
-
-/* last time the RTC clock got updated */
-static long last_rtc_update;
-
-/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
- */
-static inline void do_timer_interrupt(void)
-{
- unsigned long long current_ctc;
-
- if (current->pid)
- profile_tick(CPU_PROFILING);
-
- /*
- * Here we are in the timer irq handler. We just have irqs locally
- * disabled but we don't know if the timer_bh is running on the other
- * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
- * the irq version of write_lock because as just said we have irq
- * locally disabled. -arca
- */
- write_lock(&xtime_lock);
- asm ("getcon cr62, %0" : "=r" (current_ctc));
- ctc_last_interrupt = (unsigned long) current_ctc;
-
- do_timer(1);
-
-#ifdef CONFIG_HEARTBEAT
- if (sh_mv.mv_heartbeat != NULL)
- sh_mv.mv_heartbeat();
-#endif
-
- /*
- * If we have an externally synchronized Linux clock, then update
- * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
- * called as close as possible to 500 ms before the new second starts.
- */
- if (ntp_synced() &&
- xtime.tv_sec > last_rtc_update + 660 &&
- (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
- (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- if (rtc_sh_set_time(xtime.tv_sec) == 0)
- last_rtc_update = xtime.tv_sec;
- else
- /* do it again in 60 s */
- last_rtc_update = xtime.tv_sec - 600;
- }
- write_unlock(&xtime_lock);
-
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
-}
-
-/*
- * This is the same as the above, except we _also_ save the current
- * Time Stamp Counter value at the time of the timer interrupt, so that
- * we later on can estimate the time of day more exactly.
- */
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- unsigned long timer_status;
-
- /* Clear UNF bit */
- timer_status = ctrl_inw(TMU0_TCR);
- timer_status &= ~0x100;
- ctrl_outw(timer_status, TMU0_TCR);
-
- do_timer_interrupt();
-
- return IRQ_HANDLED;
-}
-
-
-static __init unsigned int get_cpu_hz(void)
-{
- unsigned int count;
- unsigned long __dummy;
- unsigned long ctc_val_init, ctc_val;
-
- /*
- ** Regardless the toolchain, force the compiler to use the
- ** arbitrary register r3 as a clock tick counter.
- ** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
- */
- register unsigned long long __rtc_irq_flag __asm__ ("r3");
-
- local_irq_enable();
- do {} while (ctrl_inb(rtc_base) != 0);
- ctrl_outb(RTC_RCR1_CIE, RTC_RCR1); /* Enable carry interrupt */
-
- /*
- * r3 is arbitrary. CDC does not support "=z".
- */
- ctc_val_init = 0xffffffff;
- ctc_val = ctc_val_init;
-
- asm volatile("gettr tr0, %1\n\t"
- "putcon %0, " __CTC "\n\t"
- "and %2, r63, %2\n\t"
- "pta $+4, tr0\n\t"
- "beq/l %2, r63, tr0\n\t"
- "ptabs %1, tr0\n\t"
- "getcon " __CTC ", %0\n\t"
- : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
- : "0" (0));
- local_irq_disable();
- /*
- * SH-3:
- * CPU clock = 4 stages * loop
- * tst rm,rm if id ex
- * bt/s 1b if id ex
- * add #1,rd if id ex
- * (if) pipe line stole
- * tst rm,rm if id ex
- * ....
- *
- *
- * SH-4:
- * CPU clock = 6 stages * loop
- * I don't know why.
- * ....
- *
- * SH-5:
- * Use CTC register to count. This approach returns the right value
- * even if the I-cache is disabled (e.g. whilst debugging.)
- *
- */
-
- count = ctc_val_init - ctc_val; /* CTC counts down */
-
- /*
- * This really is count by the number of clock cycles
- * by the ratio between a complete R64CNT
- * wrap-around (128) and CUI interrupt being raised (64).
- */
- return count*2;
-}
-
-static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id)
-{
- struct pt_regs *regs = get_irq_regs();
-
- ctrl_outb(0, RTC_RCR1); /* Disable Carry Interrupts */
- regs->regs[3] = 1; /* Using r3 */
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction irq0 = {
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
- .name = "timer",
-};
-static struct irqaction irq1 = {
- .handler = sh64_rtc_interrupt,
- .flags = IRQF_DISABLED,
- .mask = CPU_MASK_NONE,
- .name = "rtc",
-};
-
-void __init time_init(void)
-{
- unsigned int cpu_clock, master_clock, bus_clock, module_clock;
- unsigned long interval;
- unsigned long frqcr, ifc, pfc;
- static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
-#define bfc_table ifc_table /* Same */
-#define pfc_table ifc_table /* Same */
-
- tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
- if (!tmu_base) {
- panic("Unable to remap TMU\n");
- }
-
- rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
- if (!rtc_base) {
- panic("Unable to remap RTC\n");
- }
-
- cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
- if (!cprc_base) {
- panic("Unable to remap CPRC\n");
- }
-
- rtc_sh_get_time(&xtime);
-
- setup_irq(TIMER_IRQ, &irq0);
- setup_irq(RTC_IRQ, &irq1);
-
- /* Check how fast it is.. */
- cpu_clock = get_cpu_hz();
-
- /* Note careful order of operations to maintain reasonable precision and avoid overflow. */
- scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
-
- free_irq(RTC_IRQ, NULL);
-
- printk("CPU clock: %d.%02dMHz\n",
- (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
- {
- unsigned short bfc;
- frqcr = ctrl_inl(FRQCR);
- ifc = ifc_table[(frqcr>> 6) & 0x0007];
- bfc = bfc_table[(frqcr>> 3) & 0x0007];
- pfc = pfc_table[(frqcr>> 12) & 0x0007];
- master_clock = cpu_clock * ifc;
- bus_clock = master_clock/bfc;
- }
-
- printk("Bus clock: %d.%02dMHz\n",
- (bus_clock/1000000), (bus_clock % 1000000)/10000);
- module_clock = master_clock/pfc;
- printk("Module clock: %d.%02dMHz\n",
- (module_clock/1000000), (module_clock % 1000000)/10000);
- interval = (module_clock/(HZ*4));
-
- printk("Interval = %ld\n", interval);
-
- current_cpu_data.cpu_clock = cpu_clock;
- current_cpu_data.master_clock = master_clock;
- current_cpu_data.bus_clock = bus_clock;
- current_cpu_data.module_clock = module_clock;
-
- /* Start TMU0 */
- ctrl_outb(TMU_TSTR_OFF, TMU_TSTR);
- ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
- ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
- ctrl_outl(interval, TMU0_TCOR);
- ctrl_outl(interval, TMU0_TCNT);
- ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
-}
-
-void enter_deep_standby(void)
-{
- /* Disable watchdog timer */
- ctrl_outl(0xa5000000, WTCSR);
- /* Configure deep standby on sleep */
- ctrl_outl(0x03, STBCR);
-
-#ifdef CONFIG_SH_ALPHANUMERIC
- {
- extern void mach_alphanum(int position, unsigned char value);
- extern void mach_alphanum_brightness(int setting);
- char halted[] = "Halted. ";
- int i;
- mach_alphanum_brightness(6); /* dimmest setting above off */
- for (i=0; i<8; i++) {
- mach_alphanum(i, halted[i]);
- }
- asm __volatile__ ("synco");
- }
-#endif
-
- asm __volatile__ ("sleep");
- asm __volatile__ ("synci");
- asm __volatile__ ("nop");
- asm __volatile__ ("nop");
- asm __volatile__ ("nop");
- asm __volatile__ ("nop");
- panic("Unexpected wakeup!\n");
-}
-
-static struct resource rtc_resources[] = {
- [0] = {
- /* RTC base, filled in by rtc_init */
- .flags = IORESOURCE_IO,
- },
- [1] = {
- /* Period IRQ */
- .start = IRQ_PRI,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* Carry IRQ */
- .start = IRQ_CUI,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- /* Alarm IRQ */
- .start = IRQ_ATI,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device rtc_device = {
- .name = "sh-rtc",
- .id = -1,
- .num_resources = ARRAY_SIZE(rtc_resources),
- .resource = rtc_resources,
-};
-
-static int __init rtc_init(void)
-{
- rtc_resources[0].start = rtc_base;
- rtc_resources[0].end = rtc_resources[0].start + 0x58 - 1;
-
- return platform_device_register(&rtc_device);
-}
-device_initcall(rtc_init);
diff --git a/arch/sh/kernel/timers/Makefile b/arch/sh/kernel/timers/Makefile
deleted file mode 100644
index bcf244ff6a1..00000000000
--- a/arch/sh/kernel/timers/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for the various Linux/SuperH timers
-#
-
-obj-y := timer.o
-
-obj-$(CONFIG_SH_TMU) += timer-tmu.o
-obj-$(CONFIG_SH_MTU2) += timer-mtu2.o
-obj-$(CONFIG_SH_CMT) += timer-cmt.o
-
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
deleted file mode 100644
index d20c8c37588..00000000000
--- a/arch/sh/kernel/timers/timer-cmt.c
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * arch/sh/kernel/timers/timer-cmt.c - CMT Timer Support
- *
- * Copyright (C) 2005 Yoshinori Sato
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/seqlock.h>
-#include <asm/timer.h>
-#include <asm/rtc.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/clock.h>
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7619)
-#define CMT_CMSTR 0xf84a0070
-#define CMT_CMCSR_0 0xf84a0072
-#define CMT_CMCNT_0 0xf84a0074
-#define CMT_CMCOR_0 0xf84a0076
-#define CMT_CMCSR_1 0xf84a0078
-#define CMT_CMCNT_1 0xf84a007a
-#define CMT_CMCOR_1 0xf84a007c
-
-#define STBCR3 0xf80a0000
-#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0)
-#define CMT_CMCSR_INIT 0x0040
-#define CMT_CMCSR_CALIB 0x0000
-#elif defined(CONFIG_CPU_SUBTYPE_SH7203) || \
- defined(CONFIG_CPU_SUBTYPE_SH7206) || \
- defined(CONFIG_CPU_SUBTYPE_SH7263)
-#define CMT_CMSTR 0xfffec000
-#define CMT_CMCSR_0 0xfffec002
-#define CMT_CMCNT_0 0xfffec004
-#define CMT_CMCOR_0 0xfffec006
-
-#define STBCR4 0xfffe040c
-#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR4) & ~0x04, STBCR4); } while(0)
-#define CMT_CMCSR_INIT 0x0040
-#define CMT_CMCSR_CALIB 0x0000
-#else
-#error "Unknown CPU SUBTYPE"
-#endif
-
-static unsigned long cmt_timer_get_offset(void)
-{
- int count;
- static unsigned short count_p = 0xffff; /* for the first call after boot */
- static unsigned long jiffies_p = 0;
-
- /*
- * cache volatile jiffies temporarily; we have IRQs turned off.
- */
- unsigned long jiffies_t;
-
- /* timer count may underflow right here */
- count = ctrl_inw(CMT_CMCOR_0);
- count -= ctrl_inw(CMT_CMCNT_0);
-
- jiffies_t = jiffies;
-
- /*
- * avoiding timer inconsistencies (they are rare, but they happen)...
- * there is one kind of problem that must be avoided here:
- * 1. the timer counter underflows
- */
-
- if (jiffies_t == jiffies_p) {
- if (count > count_p) {
- /* the nutcase */
- if (ctrl_inw(CMT_CMCSR_0) & 0x80) { /* Check CMF bit */
- count -= LATCH;
- } else {
- printk("%s (): hardware timer problem?\n",
- __func__);
- }
- }
- } else
- jiffies_p = jiffies_t;
-
- count_p = count;
-
- count = ((LATCH-1) - count) * TICK_SIZE;
- count = (count + LATCH/2) / LATCH;
-
- return count;
-}
-
-static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id)
-{
- unsigned long timer_status;
-
- /* Clear CMF bit */
- timer_status = ctrl_inw(CMT_CMCSR_0);
- timer_status &= ~0x80;
- ctrl_outw(timer_status, CMT_CMCSR_0);
-
- handle_timer_tick();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction cmt_irq = {
- .name = "timer",
- .handler = cmt_timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .mask = CPU_MASK_NONE,
-};
-
-static void cmt_clk_init(struct clk *clk)
-{
- u8 divisor = CMT_CMCSR_INIT & 0x3;
- ctrl_inw(CMT_CMCSR_0);
- ctrl_outw(CMT_CMCSR_INIT, CMT_CMCSR_0);
- clk->parent = clk_get(NULL, "module_clk");
- clk->rate = clk->parent->rate / (8 << (divisor << 1));
-}
-
-static void cmt_clk_recalc(struct clk *clk)
-{
- u8 divisor = ctrl_inw(CMT_CMCSR_0) & 0x3;
- clk->rate = clk->parent->rate / (8 << (divisor << 1));
-}
-
-static struct clk_ops cmt_clk_ops = {
- .init = cmt_clk_init,
- .recalc = cmt_clk_recalc,
-};
-
-static struct clk cmt0_clk = {
- .name = "cmt0_clk",
- .ops = &cmt_clk_ops,
-};
-
-static int cmt_timer_start(void)
-{
- ctrl_outw(ctrl_inw(CMT_CMSTR) | 0x01, CMT_CMSTR);
- return 0;
-}
-
-static int cmt_timer_stop(void)
-{
- ctrl_outw(ctrl_inw(CMT_CMSTR) & ~0x01, CMT_CMSTR);
- return 0;
-}
-
-static int cmt_timer_init(void)
-{
- unsigned long interval;
-
- cmt_clock_enable();
-
- setup_irq(CONFIG_SH_TIMER_IRQ, &cmt_irq);
-
- cmt0_clk.parent = clk_get(NULL, "module_clk");
-
- cmt_timer_stop();
-
- interval = cmt0_clk.parent->rate / 8 / HZ;
- printk(KERN_INFO "Interval = %ld\n", interval);
-
- ctrl_outw(interval, CMT_CMCOR_0);
-
- clk_register(&cmt0_clk);
- clk_enable(&cmt0_clk);
-
- cmt_timer_start();
-
- return 0;
-}
-
-struct sys_timer_ops cmt_timer_ops = {
- .init = cmt_timer_init,
- .start = cmt_timer_start,
- .stop = cmt_timer_stop,
-#ifndef CONFIG_GENERIC_TIME
- .get_offset = cmt_timer_get_offset,
-#endif
-};
-
-struct sys_timer cmt_timer = {
- .name = "cmt",
- .ops = &cmt_timer_ops,
-};
diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c
deleted file mode 100644
index fe453c01f9c..00000000000
--- a/arch/sh/kernel/timers/timer-mtu2.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * arch/sh/kernel/timers/timer-mtu2.c - MTU2 Timer Support
- *
- * Copyright (C) 2005 Paul Mundt
- *
- * Based off of arch/sh/kernel/timers/timer-tmu.c
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/seqlock.h>
-#include <asm/timer.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/clock.h>
-
-/*
- * We use channel 1 for our lowly system timer. Channel 2 would be the other
- * likely candidate, but we leave it alone as it has higher divisors that
- * would be of more use to other more interesting applications.
- *
- * TODO: Presently we only implement a 16-bit single-channel system timer.
- * However, we can implement channel cascade if we go the overflow route and
- * get away with using 2 MTU2 channels as a 32-bit timer.
- */
-#define MTU2_TSTR 0xfffe4280
-#define MTU2_TCR_1 0xfffe4380
-#define MTU2_TMDR_1 0xfffe4381
-#define MTU2_TIOR_1 0xfffe4382
-#define MTU2_TIER_1 0xfffe4384
-#define MTU2_TSR_1 0xfffe4385
-#define MTU2_TCNT_1 0xfffe4386 /* 16-bit counter */
-#define MTU2_TGRA_1 0xfffe438a
-
-#define STBCR3 0xfffe0408
-
-#define MTU2_TSTR_CST1 (1 << 1) /* Counter Start 1 */
-
-#define MTU2_TSR_TGFA (1 << 0) /* GRA compare match */
-
-#define MTU2_TIER_TGIEA (1 << 0) /* GRA compare match interrupt enable */
-
-#define MTU2_TCR_INIT 0x22
-
-#define MTU2_TCR_CALIB 0x00
-
-static unsigned long mtu2_timer_get_offset(void)
-{
- int count;
- static int count_p = 0x7fff; /* for the first call after boot */
- static unsigned long jiffies_p = 0;
-
- /*
- * cache volatile jiffies temporarily; we have IRQs turned off.
- */
- unsigned long jiffies_t;
-
- /* timer count may underflow right here */
- count = ctrl_inw(MTU2_TCNT_1); /* read the latched count */
-
- jiffies_t = jiffies;
-
- /*
- * avoiding timer inconsistencies (they are rare, but they happen)...
- * there is one kind of problem that must be avoided here:
- * 1. the timer counter underflows
- */
-
- if (jiffies_t == jiffies_p) {
- if (count > count_p) {
- if (ctrl_inb(MTU2_TSR_1) & MTU2_TSR_TGFA) {
- count -= LATCH;
- } else {
- printk("%s (): hardware timer problem?\n",
- __func__);
- }
- }
- } else
- jiffies_p = jiffies_t;
-
- count_p = count;
-
- count = ((LATCH-1) - count) * TICK_SIZE;
- count = (count + LATCH/2) / LATCH;
-
- return count;
-}
-
-static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id)
-{
- unsigned long timer_status;
-
- /* Clear TGFA bit */
- timer_status = ctrl_inb(MTU2_TSR_1);
- timer_status &= ~MTU2_TSR_TGFA;
- ctrl_outb(timer_status, MTU2_TSR_1);
-
- /* Do timer tick */
- handle_timer_tick();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction mtu2_irq = {
- .name = "timer",
- .handler = mtu2_timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .mask = CPU_MASK_NONE,
-};
-
-static unsigned int divisors[] = { 1, 4, 16, 64, 1, 1, 256 };
-
-static void mtu2_clk_init(struct clk *clk)
-{
- u8 idx = MTU2_TCR_INIT & 0x7;
-
- clk->rate = clk->parent->rate / divisors[idx];
- /* Start TCNT counting */
- ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR);
-
-}
-
-static void mtu2_clk_recalc(struct clk *clk)
-{
- u8 idx = ctrl_inb(MTU2_TCR_1) & 0x7;
- clk->rate = clk->parent->rate / divisors[idx];
-}
-
-static struct clk_ops mtu2_clk_ops = {
- .init = mtu2_clk_init,
- .recalc = mtu2_clk_recalc,
-};
-
-static struct clk mtu2_clk1 = {
- .name = "mtu2_clk1",
- .ops = &mtu2_clk_ops,
-};
-
-static int mtu2_timer_start(void)
-{
- ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR);
- return 0;
-}
-
-static int mtu2_timer_stop(void)
-{
- ctrl_outb(ctrl_inb(MTU2_TSTR) & ~MTU2_TSTR_CST1, MTU2_TSTR);
- return 0;
-}
-
-static int mtu2_timer_init(void)
-{
- unsigned long interval;
-
- setup_irq(CONFIG_SH_TIMER_IRQ, &mtu2_irq);
-
- mtu2_clk1.parent = clk_get(NULL, "module_clk");
-
- ctrl_outb(ctrl_inb(STBCR3) & (~0x20), STBCR3);
-
- /* Normal operation */
- ctrl_outb(0, MTU2_TMDR_1);
- ctrl_outb(MTU2_TCR_INIT, MTU2_TCR_1);
- ctrl_outb(0x01, MTU2_TIOR_1);
-
- /* Enable underflow interrupt */
- ctrl_outb(ctrl_inb(MTU2_TIER_1) | MTU2_TIER_TGIEA, MTU2_TIER_1);
-
- interval = CONFIG_SH_PCLK_FREQ / 16 / HZ;
- printk(KERN_INFO "Interval = %ld\n", interval);
-
- ctrl_outw(interval, MTU2_TGRA_1);
- ctrl_outw(0, MTU2_TCNT_1);
-
- clk_register(&mtu2_clk1);
- clk_enable(&mtu2_clk1);
-
- return 0;
-}
-
-struct sys_timer_ops mtu2_timer_ops = {
- .init = mtu2_timer_init,
- .start = mtu2_timer_start,
- .stop = mtu2_timer_stop,
-#ifndef CONFIG_GENERIC_TIME
- .get_offset = mtu2_timer_get_offset,
-#endif
-};
-
-struct sys_timer mtu2_timer = {
- .name = "mtu2",
- .ops = &mtu2_timer_ops,
-};
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
deleted file mode 100644
index 8935570008d..00000000000
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support
- *
- * Copyright (C) 2005 - 2007 Paul Mundt
- *
- * TMU handling code hacked out of arch/sh/kernel/time.c
- *
- * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
- * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
- * Copyright (C) 2002, 2003, 2004 Paul Mundt
- * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/seqlock.h>
-#include <linux/clockchips.h>
-#include <asm/timer.h>
-#include <asm/rtc.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/clock.h>
-
-#define TMU_TOCR_INIT 0x00
-#define TMU_TCR_INIT 0x0020
-
-static int tmu_timer_start(void)
-{
- ctrl_outb(ctrl_inb(TMU_012_TSTR) | 0x3, TMU_012_TSTR);
- return 0;
-}
-
-static void tmu0_timer_set_interval(unsigned long interval, unsigned int reload)
-{
- ctrl_outl(interval, TMU0_TCNT);
-
- /*
- * TCNT reloads from TCOR on underflow, clear it if we don't
- * intend to auto-reload
- */
- if (reload)
- ctrl_outl(interval, TMU0_TCOR);
- else
- ctrl_outl(0, TMU0_TCOR);
-
- tmu_timer_start();
-}
-
-static int tmu_timer_stop(void)
-{
- ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~0x3, TMU_012_TSTR);
- return 0;
-}
-
-static cycle_t tmu_timer_read(void)
-{
- return ~ctrl_inl(TMU1_TCNT);
-}
-
-static int tmu_set_next_event(unsigned long cycles,
- struct clock_event_device *evt)
-{
- tmu0_timer_set_interval(cycles, 1);
- return 0;
-}
-
-static void tmu_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
- ctrl_outl(ctrl_inl(TMU0_TCNT), TMU0_TCOR);
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- ctrl_outl(0, TMU0_TCOR);
- break;
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
-}
-
-static struct clock_event_device tmu0_clockevent = {
- .name = "tmu0",
- .shift = 32,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = tmu_set_mode,
- .set_next_event = tmu_set_next_event,
-};
-
-static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
-{
- struct clock_event_device *evt = &tmu0_clockevent;
- unsigned long timer_status;
-
- /* Clear UNF bit */
- timer_status = ctrl_inw(TMU0_TCR);
- timer_status &= ~0x100;
- ctrl_outw(timer_status, TMU0_TCR);
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction tmu0_irq = {
- .name = "periodic timer",
- .handler = tmu_timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
- .mask = CPU_MASK_NONE,
-};
-
-static void tmu0_clk_init(struct clk *clk)
-{
- u8 divisor = TMU_TCR_INIT & 0x7;
- ctrl_outw(TMU_TCR_INIT, TMU0_TCR);
- clk->rate = clk->parent->rate / (4 << (divisor << 1));
-}
-
-static void tmu0_clk_recalc(struct clk *clk)
-{
- u8 divisor = ctrl_inw(TMU0_TCR) & 0x7;
- clk->rate = clk->parent->rate / (4 << (divisor << 1));
-}
-
-static struct clk_ops tmu0_clk_ops = {
- .init = tmu0_clk_init,
- .recalc = tmu0_clk_recalc,
-};
-
-static struct clk tmu0_clk = {
- .name = "tmu0_clk",
- .ops = &tmu0_clk_ops,
-};
-
-static void tmu1_clk_init(struct clk *clk)
-{
- u8 divisor = TMU_TCR_INIT & 0x7;
- ctrl_outw(divisor, TMU1_TCR);
- clk->rate = clk->parent->rate / (4 << (divisor << 1));
-}
-
-static void tmu1_clk_recalc(struct clk *clk)
-{
- u8 divisor = ctrl_inw(TMU1_TCR) & 0x7;
- clk->rate = clk->parent->rate / (4 << (divisor << 1));
-}
-
-static struct clk_ops tmu1_clk_ops = {
- .init = tmu1_clk_init,
- .recalc = tmu1_clk_recalc,
-};
-
-static struct clk tmu1_clk = {
- .name = "tmu1_clk",
- .ops = &tmu1_clk_ops,
-};
-
-static int tmu_timer_init(void)
-{
- unsigned long interval;
- unsigned long frequency;
-
- setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq);
-
- tmu0_clk.parent = clk_get(NULL, "module_clk");
- tmu1_clk.parent = clk_get(NULL, "module_clk");
-
- tmu_timer_stop();
-
-#if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \
- !defined(CONFIG_CPU_SUBTYPE_SH7721) && \
- !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
- !defined(CONFIG_CPU_SUBTYPE_SH7785) && \
- !defined(CONFIG_CPU_SUBTYPE_SHX3)
- ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
-#endif
-
- clk_register(&tmu0_clk);
- clk_register(&tmu1_clk);
- clk_enable(&tmu0_clk);
- clk_enable(&tmu1_clk);
-
- frequency = clk_get_rate(&tmu0_clk);
- interval = (frequency + HZ / 2) / HZ;
-
- sh_hpt_frequency = clk_get_rate(&tmu1_clk);
- ctrl_outl(~0, TMU1_TCNT);
- ctrl_outl(~0, TMU1_TCOR);
-
- tmu0_timer_set_interval(interval, 1);
-
- tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC,
- tmu0_clockevent.shift);
- tmu0_clockevent.max_delta_ns =
- clockevent_delta2ns(-1, &tmu0_clockevent);
- tmu0_clockevent.min_delta_ns =
- clockevent_delta2ns(1, &tmu0_clockevent);
-
- tmu0_clockevent.cpumask = cpumask_of_cpu(0);
-
- clockevents_register_device(&tmu0_clockevent);
-
- return 0;
-}
-
-struct sys_timer_ops tmu_timer_ops = {
- .init = tmu_timer_init,
- .start = tmu_timer_start,
- .stop = tmu_timer_stop,
- .read = tmu_timer_read,
-};
-
-struct sys_timer tmu_timer = {
- .name = "tmu",
- .ops = &tmu_timer_ops,
-};
diff --git a/arch/sh/kernel/timers/timer.c b/arch/sh/kernel/timers/timer.c
deleted file mode 100644
index 4e7e747d1b6..00000000000
--- a/arch/sh/kernel/timers/timer.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * arch/sh/kernel/timers/timer.c - Common timer code
- *
- * Copyright (C) 2005 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <asm/timer.h>
-
-static struct sys_timer *sys_timers[] = {
-#ifdef CONFIG_SH_TMU
- &tmu_timer,
-#endif
-#ifdef CONFIG_SH_MTU2
- &mtu2_timer,
-#endif
-#ifdef CONFIG_SH_CMT
- &cmt_timer,
-#endif
- NULL,
-};
-
-static char timer_override[10];
-static int __init timer_setup(char *str)
-{
- if (str)
- strlcpy(timer_override, str, sizeof(timer_override));
- return 1;
-}
-__setup("timer=", timer_setup);
-
-struct sys_timer *get_sys_timer(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sys_timers); i++) {
- struct sys_timer *t = sys_timers[i];
-
- if (unlikely(!t))
- break;
- if (unlikely(timer_override[0]))
- if ((strcmp(timer_override, t->name) != 0))
- continue;
- if (likely(t->ops->init() == 0))
- return t;
- }
-
- return NULL;
-}
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 0838942b708..772caffba22 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -11,11 +11,40 @@
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
+#include <linux/topology.h>
#include <linux/node.h>
#include <linux/nodemask.h>
+#include <linux/export.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
+cpumask_t cpu_core_map[NR_CPUS];
+EXPORT_SYMBOL(cpu_core_map);
+
+static cpumask_t cpu_coregroup_map(unsigned int cpu)
+{
+ /*
+ * Presently all SH-X3 SMP cores are multi-cores, so just keep it
+ * simple until we have a method for determining topology..
+ */
+ return *cpu_possible_mask;
+}
+
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+ return &cpu_core_map[cpu];
+}
+
+int arch_update_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+
+ return 0;
+}
+
static int __init topology_init(void)
{
int i, ret;
@@ -26,7 +55,11 @@ static int __init topology_init(void)
#endif
for_each_present_cpu(i) {
- ret = register_cpu(&per_cpu(cpu_devices, i), i);
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = 1;
+
+ ret = register_cpu(c, i);
if (unlikely(ret))
printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",
__func__, i, ret);
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index a3bdc68ef02..dfdad72c61c 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -4,24 +4,120 @@
#include <linux/kdebug.h>
#include <linux/signal.h>
#include <linux/sched.h>
-#include <asm/system.h>
+#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/module.h>
+#include <asm/unwinder.h>
+#include <asm/traps.h>
-#ifdef CONFIG_BUG
+static DEFINE_SPINLOCK(die_lock);
+
+void die(const char *str, struct pt_regs *regs, long err)
+{
+ static int die_counter;
+
+ oops_enter();
+
+ spin_lock_irq(&die_lock);
+ console_verbose();
+ bust_spinlocks(1);
+
+ printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+ print_modules();
+ show_regs(regs);
+
+ printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
+ task_pid_nr(current), task_stack_page(current) + 1);
+
+ if (!user_mode(regs) || in_interrupt())
+ dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
+ (unsigned long)task_stack_page(current));
+
+ notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irq(&die_lock);
+ oops_exit();
+
+ if (kexec_should_crash(current))
+ crash_kexec(regs);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ do_exit(SIGSEGV);
+}
+
+void die_if_kernel(const char *str, struct pt_regs *regs, long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+/*
+ * try and fix up kernelspace address errors
+ * - userspace errors just cause EFAULT to be returned, resulting in SEGV
+ * - kernel/userspace interfaces cause a jump to an appropriate handler
+ * - other kernel errors are bad
+ */
+void die_if_no_fixup(const char *str, struct pt_regs *regs, long err)
+{
+ if (!user_mode(regs)) {
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->pc);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return;
+ }
+
+ die(str, regs, err);
+ }
+}
+
+#ifdef CONFIG_GENERIC_BUG
static void handle_BUG(struct pt_regs *regs)
{
+ const struct bug_entry *bug;
+ unsigned long bugaddr = regs->pc;
enum bug_trap_type tt;
- tt = report_bug(regs->pc, regs);
+
+ if (!is_valid_bugaddr(bugaddr))
+ goto invalid;
+
+ bug = find_bug(bugaddr);
+
+ /* Switch unwinders when unwind_stack() is called */
+ if (bug->flags & BUGFLAG_UNWINDER)
+ unwinder_faulted = 1;
+
+ tt = report_bug(bugaddr, regs);
if (tt == BUG_TRAP_TYPE_WARN) {
- regs->pc += instruction_size(regs->pc);
+ regs->pc += instruction_size(bugaddr);
return;
}
+invalid:
die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
}
int is_valid_bugaddr(unsigned long addr)
{
- return addr >= PAGE_OFFSET;
+ insn_size_t opcode;
+
+ if (addr < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((insn_size_t *)addr, opcode))
+ return 0;
+ if (opcode == TRAPA_BUG_OPCODE)
+ return 1;
+
+ return 0;
}
#endif
@@ -33,7 +129,7 @@ BUILD_TRAP_HANDLER(debug)
TRAP_HANDLER_DECL;
/* Rewind */
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
SIGTRAP) == NOTIFY_STOP)
@@ -50,19 +146,42 @@ BUILD_TRAP_HANDLER(bug)
TRAP_HANDLER_DECL;
/* Rewind */
- regs->pc -= instruction_size(ctrl_inw(regs->pc - 4));
+ regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
SIGTRAP) == NOTIFY_STOP)
return;
-#ifdef CONFIG_BUG
+#ifdef CONFIG_GENERIC_BUG
if (__kernel_text_address(instruction_pointer(regs))) {
- opcode_t insn = *(opcode_t *)instruction_pointer(regs);
+ insn_size_t insn = *(insn_size_t *)instruction_pointer(regs);
if (insn == TRAPA_BUG_OPCODE)
handle_BUG(regs);
+ return;
}
#endif
force_sig(SIGTRAP, current);
}
+
+BUILD_TRAP_HANDLER(nmi)
+{
+ unsigned int cpu = smp_processor_id();
+ TRAP_HANDLER_DECL;
+
+ nmi_enter();
+ nmi_count(cpu)++;
+
+ switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) {
+ case NOTIFY_OK:
+ case NOTIFY_STOP:
+ break;
+ case NOTIFY_BAD:
+ die("Fatal Non-Maskable Interrupt", regs, SIGINT);
+ default:
+ printk(KERN_ALERT "Got NMI, but nobody cared. Ignoring...\n");
+ break;
+ }
+
+ nmi_exit();
+}
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index baa4fa368dc..ff639342a8b 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -5,7 +5,7 @@
* SuperH version: Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf
* Copyright (C) 2000 David Howells
- * Copyright (C) 2002 - 2007 Paul Mundt
+ * Copyright (C) 2002 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -13,35 +13,31 @@
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
+#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/debug_locks.h>
#include <linux/kdebug.h>
-#include <linux/kexec.h>
#include <linux/limits.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#ifdef CONFIG_SH_KGDB
-#include <asm/kgdb.h>
-#define CHK_REMOTE_DEBUG(regs) \
-{ \
- if (kgdb_debug_hook && !user_mode(regs))\
- (*kgdb_debug_hook)(regs); \
-}
-#else
-#define CHK_REMOTE_DEBUG(regs)
-#endif
+#include <linux/sysfs.h>
+#include <linux/uaccess.h>
+#include <linux/perf_event.h>
+#include <asm/alignment.h>
+#include <asm/fpu.h>
+#include <asm/kprobes.h>
+#include <asm/traps.h>
+#include <asm/bl_bit.h>
#ifdef CONFIG_CPU_SH2
# define TRAP_RESERVED_INST 4
# define TRAP_ILLEGAL_SLOT_INST 6
# define TRAP_ADDRESS_ERROR 9
# ifdef CONFIG_CPU_SH2A
+# define TRAP_UBC 12
+# define TRAP_FPU_ERROR 13
# define TRAP_DIVZERO_ERROR 17
# define TRAP_DIVOVF_ERROR 18
# endif
@@ -50,103 +46,6 @@
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
-static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
-{
- unsigned long p;
- int i;
-
- printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
-
- for (p = bottom & ~31; p < top; ) {
- printk("%04lx: ", p & 0xffff);
-
- for (i = 0; i < 8; i++, p += 4) {
- unsigned int val;
-
- if (p < bottom || p >= top)
- printk(" ");
- else {
- if (__get_user(val, (unsigned int __user *)p)) {
- printk("\n");
- return;
- }
- printk("%08x ", val);
- }
- }
- printk("\n");
- }
-}
-
-static DEFINE_SPINLOCK(die_lock);
-
-void die(const char * str, struct pt_regs * regs, long err)
-{
- static int die_counter;
-
- oops_enter();
-
- console_verbose();
- spin_lock_irq(&die_lock);
- bust_spinlocks(1);
-
- printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-
- CHK_REMOTE_DEBUG(regs);
- print_modules();
- show_regs(regs);
-
- printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
- task_pid_nr(current), task_stack_page(current) + 1);
-
- if (!user_mode(regs) || in_interrupt())
- dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
- (unsigned long)task_stack_page(current));
-
- bust_spinlocks(0);
- add_taint(TAINT_DIE);
- spin_unlock_irq(&die_lock);
-
- if (kexec_should_crash(current))
- crash_kexec(regs);
-
- if (in_interrupt())
- panic("Fatal exception in interrupt");
-
- if (panic_on_oops)
- panic("Fatal exception");
-
- oops_exit();
- do_exit(SIGSEGV);
-}
-
-static inline void die_if_kernel(const char *str, struct pt_regs *regs,
- long err)
-{
- if (!user_mode(regs))
- die(str, regs, err);
-}
-
-/*
- * try and fix up kernelspace address errors
- * - userspace errors just cause EFAULT to be returned, resulting in SEGV
- * - kernel/userspace interfaces cause a jump to an appropriate handler
- * - other kernel errors are bad
- * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
- */
-static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
-{
- if (!user_mode(regs)) {
- const struct exception_table_entry *fixup;
- fixup = search_exception_tables(regs->pc);
- if (fixup) {
- regs->pc = fixup->fixup;
- return 0;
- }
- die(str, regs, err);
- }
- return -EFAULT;
-}
-
static inline void sign_extend(unsigned int count, unsigned char *dst)
{
#ifdef __LITTLE_ENDIAN__
@@ -184,12 +83,13 @@ static struct mem_access user_mem_access = {
* (if that instruction is in a branch delay slot)
* - return 0 if emulation okay, -EFAULT on existential error
*/
-static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
+static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma)
{
int ret, index, count;
unsigned long *rm, *rn;
unsigned char *src, *dst;
+ unsigned char __user *srcu, *dstu;
index = (instruction>>8)&15; /* 0x0F00 */
rn = &regs->regs[index];
@@ -199,33 +99,40 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
count = 1<<(instruction&3);
+ switch (count) {
+ case 1: inc_unaligned_byte_access(); break;
+ case 2: inc_unaligned_word_access(); break;
+ case 4: inc_unaligned_dword_access(); break;
+ case 8: inc_unaligned_multi_access(); break;
+ }
+
ret = -EFAULT;
switch (instruction>>12) {
case 0: /* mov.[bwl] to/from memory via r0+rn */
if (instruction & 8) {
/* from memory */
- src = (unsigned char*) *rm;
- src += regs->regs[0];
- dst = (unsigned char*) rn;
- *(unsigned long*)dst = 0;
+ srcu = (unsigned char __user *)*rm;
+ srcu += regs->regs[0];
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
- if (ma->from(dst, src, count))
+ if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
} else {
/* to memory */
- src = (unsigned char*) rm;
+ src = (unsigned char *)rm;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
- dst = (unsigned char*) *rn;
- dst += regs->regs[0];
+ dstu = (unsigned char __user *)*rn;
+ dstu += regs->regs[0];
- if (ma->to(dst, src, count))
+ if (ma->to(dstu, src, count))
goto fetch_fault;
}
ret = 0;
@@ -233,10 +140,10 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
case 1: /* mov.l Rm,@(disp,Rn) */
src = (unsigned char*) rm;
- dst = (unsigned char*) *rn;
- dst += (instruction&0x000F)<<2;
+ dstu = (unsigned char __user *)*rn;
+ dstu += (instruction&0x000F)<<2;
- if (ma->to(dst, src, 4))
+ if (ma->to(dstu, src, 4))
goto fetch_fault;
ret = 0;
break;
@@ -245,28 +152,28 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
if (instruction & 4)
*rn -= count;
src = (unsigned char*) rm;
- dst = (unsigned char*) *rn;
+ dstu = (unsigned char __user *)*rn;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
- if (ma->to(dst, src, count))
+ if (ma->to(dstu, src, count))
goto fetch_fault;
ret = 0;
break;
case 5: /* mov.l @(disp,Rm),Rn */
- src = (unsigned char*) *rm;
- src += (instruction&0x000F)<<2;
- dst = (unsigned char*) rn;
- *(unsigned long*)dst = 0;
+ srcu = (unsigned char __user *)*rm;
+ srcu += (instruction & 0x000F) << 2;
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
- if (ma->from(dst, src, 4))
+ if (ma->from(dst, srcu, 4))
goto fetch_fault;
ret = 0;
break;
case 6: /* mov.[bwl] from memory, possibly with post-increment */
- src = (unsigned char*) *rm;
+ srcu = (unsigned char __user *)*rm;
if (instruction & 4)
*rm += count;
dst = (unsigned char*) rn;
@@ -275,7 +182,7 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
- if (ma->from(dst, src, count))
+ if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
ret = 0;
@@ -284,34 +191,63 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
case 8:
switch ((instruction&0xFF00)>>8) {
case 0x81: /* mov.w R0,@(disp,Rn) */
- src = (unsigned char*) &regs->regs[0];
+ src = (unsigned char *) &regs->regs[0];
#if !defined(__LITTLE_ENDIAN__)
src += 2;
#endif
- dst = (unsigned char*) *rm; /* called Rn in the spec */
- dst += (instruction&0x000F)<<1;
+ dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
+ dstu += (instruction & 0x000F) << 1;
- if (ma->to(dst, src, 2))
+ if (ma->to(dstu, src, 2))
goto fetch_fault;
ret = 0;
break;
case 0x85: /* mov.w @(disp,Rm),R0 */
- src = (unsigned char*) *rm;
- src += (instruction&0x000F)<<1;
- dst = (unsigned char*) &regs->regs[0];
- *(unsigned long*)dst = 0;
+ srcu = (unsigned char __user *)*rm;
+ srcu += (instruction & 0x000F) << 1;
+ dst = (unsigned char *) &regs->regs[0];
+ *(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 2;
#endif
- if (ma->from(dst, src, 2))
+ if (ma->from(dst, srcu, 2))
goto fetch_fault;
sign_extend(2, dst);
ret = 0;
break;
}
break;
+
+ case 9: /* mov.w @(disp,PC),Rn */
+ srcu = (unsigned char __user *)regs->pc;
+ srcu += 4;
+ srcu += (instruction & 0x00FF) << 1;
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
+
+#if !defined(__LITTLE_ENDIAN__)
+ dst += 2;
+#endif
+
+ if (ma->from(dst, srcu, 2))
+ goto fetch_fault;
+ sign_extend(2, dst);
+ ret = 0;
+ break;
+
+ case 0xd: /* mov.l @(disp,PC),Rn */
+ srcu = (unsigned char __user *)(regs->pc & ~0x3);
+ srcu += 4;
+ srcu += (instruction & 0x00FF) << 2;
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
+
+ if (ma->from(dst, srcu, 4))
+ goto fetch_fault;
+ ret = 0;
+ break;
}
return ret;
@@ -319,7 +255,8 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
/* Argh. Address not only misaligned but also non-existent.
* Raise an EFAULT and see if it's trapped
*/
- return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
+ die_if_no_fixup("Fault in unaligned fixup", regs, 0);
+ return -EFAULT;
}
/*
@@ -327,11 +264,12 @@ static int handle_unaligned_ins(opcode_t instruction, struct pt_regs *regs,
* - fetches the instruction from PC+2
*/
static inline int handle_delayslot(struct pt_regs *regs,
- opcode_t old_instruction,
+ insn_size_t old_instruction,
struct mem_access *ma)
{
- opcode_t instruction;
- void *addr = (void *)(regs->pc + instruction_size(old_instruction));
+ insn_size_t instruction;
+ void __user *addr = (void __user *)(regs->pc +
+ instruction_size(old_instruction));
if (copy_from_user(&instruction, addr, sizeof(instruction))) {
/* the instruction-fetch faulted */
@@ -362,30 +300,33 @@ static inline int handle_delayslot(struct pt_regs *regs,
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
-/*
- * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
- * opcodes..
- */
-
-static int handle_unaligned_notify_count = 10;
-
-int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
- struct mem_access *ma)
+int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
+ struct mem_access *ma, int expected,
+ unsigned long address)
{
u_int rm;
int ret, index;
+ /*
+ * XXX: We can't handle mixed 16/32-bit instructions yet
+ */
+ if (instruction_size(instruction) != 2)
+ return -EINVAL;
+
index = (instruction>>8)&15; /* 0x0F00 */
rm = regs->regs[index];
- /* shout about the first ten userspace fixups */
- if (user_mode(regs) && handle_unaligned_notify_count>0) {
- handle_unaligned_notify_count--;
-
- printk(KERN_NOTICE "Fixing up unaligned userspace access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, task_pid_nr(current),
- (void *)regs->pc, instruction);
+ /*
+ * Log the unexpected fixups, and then pass them on to perf.
+ *
+ * We intentionally don't report the expected cases to perf as
+ * otherwise the trapped I/O case will skew the results too much
+ * to be useful.
+ */
+ if (!expected) {
+ unaligned_fixups_notify(current, instruction, regs);
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
+ regs, address);
}
ret = -EFAULT;
@@ -457,6 +398,7 @@ int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
case 0x0500: /* mov.w @(disp,Rm),R0 */
goto simple;
case 0x0B00: /* bf lab - no delayslot*/
+ ret = 0;
break;
case 0x0F00: /* bf/s lab */
ret = handle_delayslot(regs, instruction, ma);
@@ -470,6 +412,7 @@ int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
}
break;
case 0x0900: /* bt lab - no delayslot */
+ ret = 0;
break;
case 0x0D00: /* bt/s lab */
ret = handle_delayslot(regs, instruction, ma);
@@ -485,6 +428,9 @@ int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
}
break;
+ case 0x9000: /* mov.w @(disp,Rm),Rn */
+ goto simple;
+
case 0xA000: /* bra label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
@@ -498,6 +444,9 @@ int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
regs->pc += SH_PC_12BIT_OFFSET(instruction);
}
break;
+
+ case 0xD000: /* mov.l @(disp,Rm),Rn */
+ goto simple;
}
return ret;
@@ -509,14 +458,6 @@ int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
return ret;
}
-#ifdef CONFIG_CPU_HAS_SR_RB
-#define lookup_exception_vector(x) \
- __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
-#else
-#define lookup_exception_vector(x) \
- __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
-#endif
-
/*
* Handle various address error exceptions:
* - instruction address error:
@@ -535,21 +476,46 @@ asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long error_code = 0;
mm_segment_t oldfs;
siginfo_t info;
- opcode_t instruction;
+ insn_size_t instruction;
int tmp;
/* Intentional ifdef */
#ifdef CONFIG_CPU_HAS_SR_RB
- lookup_exception_vector(error_code);
+ error_code = lookup_exception_vector();
#endif
oldfs = get_fs();
if (user_mode(regs)) {
int si_code = BUS_ADRERR;
+ unsigned int user_action;
local_irq_enable();
+ inc_unaligned_user_access();
+
+ set_fs(USER_DS);
+ if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
+ sizeof(instruction))) {
+ set_fs(oldfs);
+ goto uspace_segv;
+ }
+ set_fs(oldfs);
+ /* shout about userspace fixups */
+ unaligned_fixups_notify(current, instruction, regs);
+
+ user_action = unaligned_user_action();
+ if (user_action & UM_FIXUP)
+ goto fixup;
+ if (user_action & UM_SIGNAL)
+ goto uspace_segv;
+ else {
+ /* ignore */
+ regs->pc += instruction_size(instruction);
+ return;
+ }
+
+fixup:
/* bad PC is not something we can fix */
if (regs->pc & 1) {
si_code = BUS_ADRALN;
@@ -557,20 +523,12 @@ asmlinkage void do_address_error(struct pt_regs *regs,
}
set_fs(USER_DS);
- if (copy_from_user(&instruction, (void *)(regs->pc),
- sizeof(instruction))) {
- /* Argh. Fault on the instruction itself.
- This should never happen non-SMP
- */
- set_fs(oldfs);
- goto uspace_segv;
- }
-
tmp = handle_unaligned_access(instruction, regs,
- &user_mem_access);
+ &user_mem_access, 0,
+ address);
set_fs(oldfs);
- if (tmp==0)
+ if (tmp == 0)
return; /* sorted */
uspace_segv:
printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
@@ -583,11 +541,13 @@ uspace_segv:
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
} else {
+ inc_unaligned_kernel_access();
+
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
set_fs(KERNEL_DS);
- if (copy_from_user(&instruction, (void *)(regs->pc),
+ if (copy_from_user(&instruction, (void __user *)(regs->pc),
sizeof(instruction))) {
/* Argh. Fault on the instruction itself.
This should never happen non-SMP
@@ -596,7 +556,10 @@ uspace_segv:
die("insn faulting in do_address_error", regs, 0);
}
- handle_unaligned_access(instruction, regs, &user_mem_access);
+ unaligned_fixups_notify(current, instruction, regs);
+
+ handle_unaligned_access(instruction, regs, &user_mem_access,
+ 0, address);
set_fs(oldfs);
}
}
@@ -631,9 +594,7 @@ int is_dsp_inst(struct pt_regs *regs)
#endif /* CONFIG_SH_DSP */
#ifdef CONFIG_CPU_SH2A
-asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
+asmlinkage void do_divide_error(unsigned long r4)
{
siginfo_t info;
@@ -650,11 +611,9 @@ asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
}
#endif
-asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
+asmlinkage void do_reserved_inst(void)
{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+ struct pt_regs *regs = current_pt_regs();
unsigned long error_code;
struct task_struct *tsk = current;
@@ -677,20 +636,21 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
if (is_dsp_inst(regs)) {
/* Enable DSP mode, and restart instruction. */
regs->sr |= SR_DSP;
+ /* Save DSP mode */
+ tsk->thread.dsp_status.status |= SR_DSP;
return;
}
#endif
- lookup_exception_vector(error_code);
+ error_code = lookup_exception_vector();
local_irq_enable();
- CHK_REMOTE_DEBUG(regs);
force_sig(SIGILL, tsk);
die_if_no_fixup("reserved instruction", regs, error_code);
}
#ifdef CONFIG_SH_FPU_EMU
-static int emulate_branch(unsigned short inst, struct pt_regs* regs)
+static int emulate_branch(unsigned short inst, struct pt_regs *regs)
{
/*
* bfs: 8fxx: PC+=d*2+4;
@@ -703,27 +663,32 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs)
* jsr: 4x0b: PC=Rn after PR=PC+4;
* rts: 000b: PC=PR;
*/
- if ((inst & 0xfd00) == 0x8d00) {
+ if (((inst & 0xf000) == 0xb000) || /* bsr */
+ ((inst & 0xf0ff) == 0x0003) || /* bsrf */
+ ((inst & 0xf0ff) == 0x400b)) /* jsr */
+ regs->pr = regs->pc + 4;
+
+ if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */
regs->pc += SH_PC_8BIT_OFFSET(inst);
return 0;
}
- if ((inst & 0xe000) == 0xa000) {
+ if ((inst & 0xe000) == 0xa000) { /* bra, bsr */
regs->pc += SH_PC_12BIT_OFFSET(inst);
return 0;
}
- if ((inst & 0xf0df) == 0x0003) {
+ if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */
regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
return 0;
}
- if ((inst & 0xf0df) == 0x400b) {
+ if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */
regs->pc = regs->regs[(inst & 0x0f00) >> 8];
return 0;
}
- if ((inst & 0xffff) == 0x000b) {
+ if ((inst & 0xffff) == 0x000b) { /* rts */
regs->pc = regs->pr;
return 0;
}
@@ -732,16 +697,16 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs)
}
#endif
-asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
+asmlinkage void do_illegal_slot_inst(void)
{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
- unsigned long error_code;
+ struct pt_regs *regs = current_pt_regs();
+ unsigned long inst;
struct task_struct *tsk = current;
-#ifdef CONFIG_SH_FPU_EMU
- unsigned short inst = 0;
+ if (kprobe_handle_illslot(regs->pc) == 0)
+ return;
+
+#ifdef CONFIG_SH_FPU_EMU
get_user(inst, (unsigned short *)regs->pc + 1);
if (!do_fpu_inst(inst, regs)) {
get_user(inst, (unsigned short *)regs->pc);
@@ -752,54 +717,25 @@ asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
/* not a FPU inst. */
#endif
- lookup_exception_vector(error_code);
+ inst = lookup_exception_vector();
local_irq_enable();
- CHK_REMOTE_DEBUG(regs);
force_sig(SIGILL, tsk);
- die_if_no_fixup("illegal slot instruction", regs, error_code);
+ die_if_no_fixup("illegal slot instruction", regs, inst);
}
-asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs __regs)
+asmlinkage void do_exception_error(void)
{
- struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
long ex;
- lookup_exception_vector(ex);
- die_if_kernel("exception", regs, ex);
+ ex = lookup_exception_vector();
+ die_if_kernel("exception", current_pt_regs(), ex);
}
-#if defined(CONFIG_SH_STANDARD_BIOS)
-void *gdb_vbr_vector;
-
-static inline void __init gdb_vbr_init(void)
-{
- register unsigned long vbr;
-
- /*
- * Read the old value of the VBR register to initialise
- * the vector through which debug and BIOS traps are
- * delegated by the Linux trap handler.
- */
- asm volatile("stc vbr, %0" : "=r" (vbr));
-
- gdb_vbr_vector = (void *)(vbr + 0x100);
- printk("Setting GDB trap vector to 0x%08lx\n",
- (unsigned long)gdb_vbr_vector);
-}
-#endif
-
-void __cpuinit per_cpu_trap_init(void)
+void per_cpu_trap_init(void)
{
extern void *vbr_base;
-#ifdef CONFIG_SH_STANDARD_BIOS
- if (raw_smp_processor_id() == 0)
- gdb_vbr_init();
-#endif
-
/* NOTE: The VBR value should be at P1
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
@@ -808,6 +744,9 @@ void __cpuinit per_cpu_trap_init(void)
: /* no output */
: "r" (&vbr_base)
: "memory");
+
+ /* disable exception blocking now when the vbr has been setup */
+ clear_bl_bit();
}
void *set_exception_table_vec(unsigned int vec, void *handler)
@@ -835,14 +774,9 @@ void __init trap_init(void)
set_exception_table_evt(0x800, do_reserved_inst);
set_exception_table_evt(0x820, do_illegal_slot_inst);
#elif defined(CONFIG_SH_FPU)
-#ifdef CONFIG_CPU_SUBTYPE_SHX3
- set_exception_table_evt(0xd80, fpu_state_restore_trap_handler);
- set_exception_table_evt(0xda0, fpu_state_restore_trap_handler);
-#else
set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
#endif
-#endif
#ifdef CONFIG_CPU_SH2
set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
@@ -850,58 +784,12 @@ void __init trap_init(void)
#ifdef CONFIG_CPU_SH2A
set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+#ifdef CONFIG_SH_FPU
+ set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
#endif
-
- /* Setup VBR for boot cpu */
- per_cpu_trap_init();
-}
-
-void show_trace(struct task_struct *tsk, unsigned long *sp,
- struct pt_regs *regs)
-{
- unsigned long addr;
-
- if (regs && user_mode(regs))
- return;
-
- printk("\nCall trace: ");
-#ifdef CONFIG_KALLSYMS
- printk("\n");
#endif
- while (!kstack_end(sp)) {
- addr = *sp++;
- if (kernel_text_address(addr))
- print_ip_sym(addr);
- }
-
- printk("\n");
-
- if (!tsk)
- tsk = current;
-
- debug_show_held_locks(tsk);
-}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp)
-{
- unsigned long stack;
-
- if (!tsk)
- tsk = current;
- if (tsk == current)
- sp = (unsigned long *)current_stack_pointer;
- else
- sp = (unsigned long *)tsk->thread.sp;
-
- stack = (unsigned long)sp;
- dump_mem("Stack: ", stack, THREAD_SIZE +
- (unsigned long)task_stack_page(tsk));
- show_trace(tsk, sp, NULL);
-}
-
-void dump_stack(void)
-{
- show_stack(NULL, NULL);
+#ifdef TRAP_UBC
+ set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
+#endif
}
-EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index 1b58a749908..112ea11c030 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -24,287 +24,28 @@
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/module.h>
-#include <asm/system.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <asm/alignment.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
+#include <asm/fpu.h>
-#undef DEBUG_EXCEPTION
-#ifdef DEBUG_EXCEPTION
-/* implemented in ../lib/dbg.c */
-extern void show_excp_regs(char *fname, int trapnr, int signr,
- struct pt_regs *regs);
-#else
-#define show_excp_regs(a, b, c, d)
-#endif
-
-static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
- unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
-
-#define DO_ERROR(trapnr, signr, str, name, tsk) \
-asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
-{ \
- do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
-}
-
-spinlock_t die_lock;
-
-void die(const char * str, struct pt_regs * regs, long err)
-{
- console_verbose();
- spin_lock_irq(&die_lock);
- printk("%s: %lx\n", str, (err & 0xffffff));
- show_regs(regs);
- spin_unlock_irq(&die_lock);
- do_exit(SIGSEGV);
-}
-
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-{
- if (!user_mode(regs))
- die(str, regs, err);
-}
-
-static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
-{
- if (!user_mode(regs)) {
- const struct exception_table_entry *fixup;
- fixup = search_exception_tables(regs->pc);
- if (fixup) {
- regs->pc = fixup->fixup;
- return;
- }
- die(str, regs, err);
- }
-}
-
-DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
-DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
-
-
-/* Implement misaligned load/store handling for kernel (and optionally for user
- mode too). Limitation : only SHmedia mode code is handled - there is no
- handling at all for misaligned accesses occurring in SHcompact code yet. */
-
-static int misaligned_fixup(struct pt_regs *regs);
-
-asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
-{
- if (misaligned_fixup(regs) < 0) {
- do_unhandled_exception(7, SIGSEGV, "address error(load)",
- "do_address_error_load",
- error_code, regs, current);
- }
- return;
-}
-
-asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
-{
- if (misaligned_fixup(regs) < 0) {
- do_unhandled_exception(8, SIGSEGV, "address error(store)",
- "do_address_error_store",
- error_code, regs, current);
- }
- return;
-}
-
-#if defined(CONFIG_SH64_ID2815_WORKAROUND)
-
-#define OPCODE_INVALID 0
-#define OPCODE_USER_VALID 1
-#define OPCODE_PRIV_VALID 2
-
-/* getcon/putcon - requires checking which control register is referenced. */
-#define OPCODE_CTRL_REG 3
-
-/* Table of valid opcodes for SHmedia mode.
- Form a 10-bit value by concatenating the major/minor opcodes i.e.
- opcode[31:26,20:16]. The 6 MSBs of this value index into the following
- array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
- LSBs==4'b0000 etc). */
-static unsigned long shmedia_opcode_table[64] = {
- 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
- 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
- 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
- 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
- 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
-};
-
-void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
-{
- /* Workaround SH5-101 cut2 silicon defect #2815 :
- in some situations, inter-mode branches from SHcompact -> SHmedia
- which should take ITLBMISS or EXECPROT exceptions at the target
- falsely take RESINST at the target instead. */
-
- unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
- unsigned long pc, aligned_pc;
- int get_user_error;
- int trapnr = 12;
- int signr = SIGILL;
- char *exception_name = "reserved_instruction";
-
- pc = regs->pc;
- if ((pc & 3) == 1) {
- /* SHmedia : check for defect. This requires executable vmas
- to be readable too. */
- aligned_pc = pc & ~3;
- if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
- get_user_error = -EFAULT;
- } else {
- get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
- }
- if (get_user_error >= 0) {
- unsigned long index, shift;
- unsigned long major, minor, combined;
- unsigned long reserved_field;
- reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
- major = (opcode >> 26) & 0x3f;
- minor = (opcode >> 16) & 0xf;
- combined = (major << 4) | minor;
- index = major;
- shift = minor << 1;
- if (reserved_field == 0) {
- int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
- switch (opcode_state) {
- case OPCODE_INVALID:
- /* Trap. */
- break;
- case OPCODE_USER_VALID:
- /* Restart the instruction : the branch to the instruction will now be from an RTE
- not from SHcompact so the silicon defect won't be triggered. */
- return;
- case OPCODE_PRIV_VALID:
- if (!user_mode(regs)) {
- /* Should only ever get here if a module has
- SHcompact code inside it. If so, the same fix up is needed. */
- return; /* same reason */
- }
- /* Otherwise, user mode trying to execute a privileged instruction -
- fall through to trap. */
- break;
- case OPCODE_CTRL_REG:
- /* If in privileged mode, return as above. */
- if (!user_mode(regs)) return;
- /* In user mode ... */
- if (combined == 0x9f) { /* GETCON */
- unsigned long regno = (opcode >> 20) & 0x3f;
- if (regno >= 62) {
- return;
- }
- /* Otherwise, reserved or privileged control register, => trap */
- } else if (combined == 0x1bf) { /* PUTCON */
- unsigned long regno = (opcode >> 4) & 0x3f;
- if (regno >= 62) {
- return;
- }
- /* Otherwise, reserved or privileged control register, => trap */
- } else {
- /* Trap */
- }
- break;
- default:
- /* Fall through to trap. */
- break;
- }
- }
- /* fall through to normal resinst processing */
- } else {
- /* Error trying to read opcode. This typically means a
- real fault, not a RESINST any more. So change the
- codes. */
- trapnr = 87;
- exception_name = "address error (exec)";
- signr = SIGSEGV;
- }
- }
-
- do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
-}
-
-#else /* CONFIG_SH64_ID2815_WORKAROUND */
-
-/* If the workaround isn't needed, this is just a straightforward reserved
- instruction */
-DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
-
-#endif /* CONFIG_SH64_ID2815_WORKAROUND */
-
-/* Called with interrupts disabled */
-asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
-{
- show_excp_regs(__func__, -1, -1, regs);
- die_if_kernel("exception", regs, ex);
-}
-
-int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
-{
- /* Syscall debug */
- printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
-
- die_if_kernel("unknown trapa", regs, scId);
-
- return -ENOSYS;
-}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp)
-{
-#ifdef CONFIG_KALLSYMS
- extern void sh64_unwind(struct pt_regs *regs);
- struct pt_regs *regs;
-
- regs = tsk ? tsk->thread.kregs : NULL;
-
- sh64_unwind(regs);
-#else
- printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
-#endif
-}
-
-void show_task(unsigned long *sp)
-{
- show_stack(NULL, sp);
-}
-
-void dump_stack(void)
-{
- show_task(NULL);
-}
-/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
-EXPORT_SYMBOL(dump_stack);
-
-static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
- unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
-{
- show_excp_regs(fn_name, trapnr, signr, regs);
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = trapnr;
-
- if (user_mode(regs))
- force_sig(signr, tsk);
-
- die_if_no_fixup(str, regs, error_code);
-}
-
-static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
+static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
{
int get_user_error;
unsigned long aligned_pc;
- unsigned long opcode;
+ insn_size_t opcode;
if ((pc & 3) == 1) {
/* SHmedia */
aligned_pc = pc & ~3;
if (from_user_mode) {
- if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
+ if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) {
get_user_error = -EFAULT;
} else {
- get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
+ get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
*result_opcode = opcode;
}
return get_user_error;
@@ -312,7 +53,7 @@ static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int
/* If the fault was in the kernel, we can either read
* this directly, or if not, we fault.
*/
- *result_opcode = *(unsigned long *) aligned_pc;
+ *result_opcode = *(insn_size_t *)aligned_pc;
return 0;
}
} else if ((pc & 1) == 0) {
@@ -338,17 +79,23 @@ static int address_is_sign_extended(__u64 a)
#endif
}
+/* return -1 for fault, 0 for OK */
static int generate_and_check_address(struct pt_regs *regs,
- __u32 opcode,
+ insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
__u64 *address)
{
- /* return -1 for fault, 0 for OK */
-
__u64 base_address, addr;
int basereg;
+ switch (1 << width_shift) {
+ case 1: inc_unaligned_byte_access(); break;
+ case 2: inc_unaligned_word_access(); break;
+ case 4: inc_unaligned_dword_access(); break;
+ case 8: inc_unaligned_multi_access(); break;
+ }
+
basereg = (opcode >> 20) & 0x3f;
base_address = regs->regs[basereg];
if (displacement_not_indexed) {
@@ -365,33 +112,27 @@ static int generate_and_check_address(struct pt_regs *regs,
}
/* Check sign extended */
- if (!address_is_sign_extended(addr)) {
+ if (!address_is_sign_extended(addr))
return -1;
- }
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
/* Check accessible. For misaligned access in the kernel, assume the
address is always accessible (and if not, just fault when the
load/store gets done.) */
if (user_mode(regs)) {
- if (addr >= TASK_SIZE) {
+ inc_unaligned_user_access();
+
+ if (addr >= TASK_SIZE)
return -1;
- }
- /* Do access_ok check later - it depends on whether it's a load or a store. */
- }
-#endif
+ } else
+ inc_unaligned_kernel_access();
*address = addr;
- return 0;
-}
-/* Default value as for sh */
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-static int user_mode_unaligned_fixup_count = 10;
-static int user_mode_unaligned_fixup_enable = 1;
-#endif
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
+ unaligned_fixups_notify(current, opcode, regs);
-static int kernel_mode_unaligned_fixup_count = 32;
+ return 0;
+}
static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
{
@@ -422,7 +163,7 @@ static void misaligned_kernel_word_store(__u64 address, __u64 value)
}
static int misaligned_load(struct pt_regs *regs,
- __u32 opcode,
+ insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_sign_extend)
@@ -434,12 +175,10 @@ static int misaligned_load(struct pt_regs *regs,
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
- if (error < 0) {
+ if (error < 0)
return error;
- }
destreg = (opcode >> 4) & 0x3f;
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
if (user_mode(regs)) {
__u64 buffer;
@@ -469,9 +208,7 @@ static int misaligned_load(struct pt_regs *regs,
width_shift, (unsigned long) regs->pc);
break;
}
- } else
-#endif
- {
+ } else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 lo, hi;
@@ -498,11 +235,10 @@ static int misaligned_load(struct pt_regs *regs,
}
return 0;
-
}
static int misaligned_store(struct pt_regs *regs,
- __u32 opcode,
+ insn_size_t opcode,
int displacement_not_indexed,
int width_shift)
{
@@ -513,12 +249,10 @@ static int misaligned_store(struct pt_regs *regs,
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
- if (error < 0) {
+ if (error < 0)
return error;
- }
srcreg = (opcode >> 4) & 0x3f;
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
if (user_mode(regs)) {
__u64 buffer;
@@ -545,9 +279,7 @@ static int misaligned_store(struct pt_regs *regs,
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
- } else
-#endif
- {
+ } else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 val = regs->regs[srcreg];
@@ -572,14 +304,12 @@ static int misaligned_store(struct pt_regs *regs,
}
return 0;
-
}
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
error. */
static int misaligned_fpu_load(struct pt_regs *regs,
- __u32 opcode,
+ insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
@@ -591,9 +321,8 @@ static int misaligned_fpu_load(struct pt_regs *regs,
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
- if (error < 0) {
+ if (error < 0)
return error;
- }
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
@@ -612,7 +341,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
- save_fpu(current, regs);
+ save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
@@ -623,19 +352,19 @@ static int misaligned_fpu_load(struct pt_regs *regs,
switch (width_shift) {
case 2:
- current->thread.fpu.hard.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
- current->thread.fpu.hard.fp_regs[destreg] = buflo;
- current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- current->thread.fpu.hard.fp_regs[destreg] = bufhi;
- current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
#else
- current->thread.fpu.hard.fp_regs[destreg] = buflo;
- current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
#endif
}
break;
@@ -649,12 +378,10 @@ static int misaligned_fpu_load(struct pt_regs *regs,
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
-
-
}
static int misaligned_fpu_store(struct pt_regs *regs,
- __u32 opcode,
+ insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
@@ -666,9 +393,8 @@ static int misaligned_fpu_store(struct pt_regs *regs,
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
- if (error < 0) {
+ if (error < 0)
return error;
- }
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
@@ -685,7 +411,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
- save_fpu(current, regs);
+ save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
@@ -693,19 +419,19 @@ static int misaligned_fpu_store(struct pt_regs *regs,
switch (width_shift) {
case 2:
- buflo = current->thread.fpu.hard.fp_regs[srcreg];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
break;
case 3:
if (do_paired_load) {
- buflo = current->thread.fpu.hard.fp_regs[srcreg];
- bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- bufhi = current->thread.fpu.hard.fp_regs[srcreg];
- buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#else
- buflo = current->thread.fpu.hard.fp_regs[srcreg];
- bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#endif
}
break;
@@ -726,20 +452,17 @@ static int misaligned_fpu_store(struct pt_regs *regs,
return -1;
}
}
-#endif
static int misaligned_fixup(struct pt_regs *regs)
{
- unsigned long opcode;
+ insn_size_t opcode;
int error;
int major, minor;
+ unsigned int user_action;
-#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
- /* Never fixup user mode misaligned accesses without this option enabled. */
- return -1;
-#else
- if (!user_mode_unaligned_fixup_enable) return -1;
-#endif
+ user_action = unaligned_user_action();
+ if (!(user_action & UM_FIXUP))
+ return -1;
error = read_opcode(regs->pc, &opcode, user_mode(regs));
if (error < 0) {
@@ -748,26 +471,6 @@ static int misaligned_fixup(struct pt_regs *regs)
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
- if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
- --user_mode_unaligned_fixup_count;
- /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
- printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
- current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
- } else
-#endif
- if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
- --kernel_mode_unaligned_fixup_count;
- if (in_interrupt()) {
- printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
- (__u32)regs->pc, opcode);
- } else {
- printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
- current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
- }
- }
-
-
switch (major) {
case (0x84>>2): /* LD.W */
error = misaligned_load(regs, opcode, 1, 1, 1);
@@ -829,7 +532,6 @@ static int misaligned_fixup(struct pt_regs *regs)
}
break;
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
case (0x94>>2): /* FLD.S */
error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
break;
@@ -880,7 +582,6 @@ static int misaligned_fixup(struct pt_regs *regs)
break;
}
break;
-#endif
default:
/* Fault */
@@ -894,66 +595,202 @@ static int misaligned_fixup(struct pt_regs *regs)
regs->pc += 4; /* Skip the instruction that's just been emulated */
return 0;
}
+}
+static void do_unhandled_exception(int signr, char *str, unsigned long error,
+ struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ force_sig(signr, current);
+
+ die_if_no_fixup(str, regs, error);
}
-static ctl_table unaligned_table[] = {
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "kernel_reports",
- .data = &kernel_mode_unaligned_fixup_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "user_reports",
- .data = &user_mode_unaligned_fixup_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "user_enable",
- .data = &user_mode_unaligned_fixup_enable,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec},
-#endif
- {}
-};
+#define DO_ERROR(signr, str, name) \
+asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
+{ \
+ do_unhandled_exception(signr, str, error_code, regs); \
+}
-static ctl_table unaligned_root[] = {
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "unaligned_fixup",
- .mode = 0555,
- unaligned_table
- },
- {}
-};
+DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst)
+DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
-static ctl_table sh64_root[] = {
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "sh64",
- .mode = 0555,
- .child = unaligned_root
- },
- {}
+#if defined(CONFIG_SH64_ID2815_WORKAROUND)
+
+#define OPCODE_INVALID 0
+#define OPCODE_USER_VALID 1
+#define OPCODE_PRIV_VALID 2
+
+/* getcon/putcon - requires checking which control register is referenced. */
+#define OPCODE_CTRL_REG 3
+
+/* Table of valid opcodes for SHmedia mode.
+ Form a 10-bit value by concatenating the major/minor opcodes i.e.
+ opcode[31:26,20:16]. The 6 MSBs of this value index into the following
+ array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
+ LSBs==4'b0000 etc). */
+static unsigned long shmedia_opcode_table[64] = {
+ 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
+ 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
+ 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
};
-static struct ctl_table_header *sysctl_header;
-static int __init init_sysctl(void)
+
+/* Workaround SH5-101 cut2 silicon defect #2815 :
+ in some situations, inter-mode branches from SHcompact -> SHmedia
+ which should take ITLBMISS or EXECPROT exceptions at the target
+ falsely take RESINST at the target instead. */
+void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
{
- sysctl_header = register_sysctl_table(sh64_root);
- return 0;
+ insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
+ unsigned long pc, aligned_pc;
+ unsigned long index, shift;
+ unsigned long major, minor, combined;
+ unsigned long reserved_field;
+ int opcode_state;
+ int get_user_error;
+ int signr = SIGILL;
+ char *exception_name = "reserved_instruction";
+
+ pc = regs->pc;
+
+ /* SHcompact is not handled */
+ if (unlikely((pc & 3) == 0))
+ goto out;
+
+ /* SHmedia : check for defect. This requires executable vmas
+ to be readable too. */
+ aligned_pc = pc & ~3;
+ if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t)))
+ get_user_error = -EFAULT;
+ else
+ get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
+
+ if (get_user_error < 0) {
+ /*
+ * Error trying to read opcode. This typically means a
+ * real fault, not a RESINST any more. So change the
+ * codes.
+ */
+ exception_name = "address error (exec)";
+ signr = SIGSEGV;
+ goto out;
+ }
+
+ /* These bits are currently reserved as zero in all valid opcodes */
+ reserved_field = opcode & 0xf;
+ if (unlikely(reserved_field))
+ goto out; /* invalid opcode */
+
+ major = (opcode >> 26) & 0x3f;
+ minor = (opcode >> 16) & 0xf;
+ combined = (major << 4) | minor;
+ index = major;
+ shift = minor << 1;
+ opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
+ switch (opcode_state) {
+ case OPCODE_INVALID:
+ /* Trap. */
+ break;
+ case OPCODE_USER_VALID:
+ /*
+ * Restart the instruction: the branch to the instruction
+ * will now be from an RTE not from SHcompact so the
+ * silicon defect won't be triggered.
+ */
+ return;
+ case OPCODE_PRIV_VALID:
+ if (!user_mode(regs)) {
+ /*
+ * Should only ever get here if a module has
+ * SHcompact code inside it. If so, the same fix
+ * up is needed.
+ */
+ return; /* same reason */
+ }
+
+ /*
+ * Otherwise, user mode trying to execute a privileged
+ * instruction - fall through to trap.
+ */
+ break;
+ case OPCODE_CTRL_REG:
+ /* If in privileged mode, return as above. */
+ if (!user_mode(regs))
+ return;
+
+ /* In user mode ... */
+ if (combined == 0x9f) { /* GETCON */
+ unsigned long regno = (opcode >> 20) & 0x3f;
+
+ if (regno >= 62)
+ return;
+
+ /* reserved/privileged control register => trap */
+ } else if (combined == 0x1bf) { /* PUTCON */
+ unsigned long regno = (opcode >> 4) & 0x3f;
+
+ if (regno >= 62)
+ return;
+
+ /* reserved/privileged control register => trap */
+ }
+
+ break;
+ default:
+ /* Fall through to trap. */
+ break;
+ }
+
+out:
+ do_unhandled_exception(signr, exception_name, error_code, regs);
}
-__initcall(init_sysctl);
+#else /* CONFIG_SH64_ID2815_WORKAROUND */
+/* If the workaround isn't needed, this is just a straightforward reserved
+ instruction */
+DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
+
+#endif /* CONFIG_SH64_ID2815_WORKAROUND */
+
+/* Called with interrupts disabled */
+asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
+{
+ die_if_kernel("exception", regs, ex);
+}
+
+asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
+{
+ /* Syscall debug */
+ printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
+
+ die_if_kernel("unknown trapa", regs, scId);
+
+ return -ENOSYS;
+}
+
+/* Implement misaligned load/store handling for kernel (and optionally for user
+ mode too). Limitation : only SHmedia mode code is handled - there is no
+ handling at all for misaligned accesses occurring in SHcompact code yet. */
+
+asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
+{
+ if (misaligned_fixup(regs) < 0)
+ do_unhandled_exception(SIGSEGV, "address error(load)",
+ error_code, regs);
+}
+
+asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
+{
+ if (misaligned_fixup(regs) < 0)
+ do_unhandled_exception(SIGSEGV, "address error(store)",
+ error_code, regs);
+}
asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
{
@@ -965,11 +802,15 @@ asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
of access we make to them - just go direct to their physical
addresses. */
exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
- if (exp_cause & ~4) {
+ if (exp_cause & ~4)
printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
(unsigned long)(exp_cause & 0xffffffff));
- }
show_state();
/* Clear all DEBUGINT causes */
poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
}
+
+void per_cpu_trap_init(void)
+{
+ /* Nothing to do for now, VBR initialization later. */
+}
diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c
new file mode 100644
index 00000000000..521b5432471
--- /dev/null
+++ b/arch/sh/kernel/unwinder.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2009 Matt Fleming
+ *
+ * Based, in part, on kernel/time/clocksource.c.
+ *
+ * This file provides arbitration code for stack unwinders.
+ *
+ * Multiple stack unwinders can be available on a system, usually with
+ * the most accurate unwinder being the currently active one.
+ */
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <asm/unwinder.h>
+#include <linux/atomic.h>
+
+/*
+ * This is the most basic stack unwinder an architecture can
+ * provide. For architectures without reliable frame pointers, e.g.
+ * RISC CPUs, it can be implemented by looking through the stack for
+ * addresses that lie within the kernel text section.
+ *
+ * Other CPUs, e.g. x86, can use their frame pointer register to
+ * construct more accurate stack traces.
+ */
+static struct list_head unwinder_list;
+static struct unwinder stack_reader = {
+ .name = "stack-reader",
+ .dump = stack_reader_dump,
+ .rating = 50,
+ .list = {
+ .next = &unwinder_list,
+ .prev = &unwinder_list,
+ },
+};
+
+/*
+ * "curr_unwinder" points to the stack unwinder currently in use. This
+ * is the unwinder with the highest rating.
+ *
+ * "unwinder_list" is a linked-list of all available unwinders, sorted
+ * by rating.
+ *
+ * All modifications of "curr_unwinder" and "unwinder_list" must be
+ * performed whilst holding "unwinder_lock".
+ */
+static struct unwinder *curr_unwinder = &stack_reader;
+
+static struct list_head unwinder_list = {
+ .next = &stack_reader.list,
+ .prev = &stack_reader.list,
+};
+
+static DEFINE_SPINLOCK(unwinder_lock);
+
+/**
+ * select_unwinder - Select the best registered stack unwinder.
+ *
+ * Private function. Must hold unwinder_lock when called.
+ *
+ * Select the stack unwinder with the best rating. This is useful for
+ * setting up curr_unwinder.
+ */
+static struct unwinder *select_unwinder(void)
+{
+ struct unwinder *best;
+
+ if (list_empty(&unwinder_list))
+ return NULL;
+
+ best = list_entry(unwinder_list.next, struct unwinder, list);
+ if (best == curr_unwinder)
+ return NULL;
+
+ return best;
+}
+
+/*
+ * Enqueue the stack unwinder sorted by rating.
+ */
+static int unwinder_enqueue(struct unwinder *ops)
+{
+ struct list_head *tmp, *entry = &unwinder_list;
+
+ list_for_each(tmp, &unwinder_list) {
+ struct unwinder *o;
+
+ o = list_entry(tmp, struct unwinder, list);
+ if (o == ops)
+ return -EBUSY;
+ /* Keep track of the place, where to insert */
+ if (o->rating >= ops->rating)
+ entry = tmp;
+ }
+ list_add(&ops->list, entry);
+
+ return 0;
+}
+
+/**
+ * unwinder_register - Used to install new stack unwinder
+ * @u: unwinder to be registered
+ *
+ * Install the new stack unwinder on the unwinder list, which is sorted
+ * by rating.
+ *
+ * Returns -EBUSY if registration fails, zero otherwise.
+ */
+int unwinder_register(struct unwinder *u)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&unwinder_lock, flags);
+ ret = unwinder_enqueue(u);
+ if (!ret)
+ curr_unwinder = select_unwinder();
+ spin_unlock_irqrestore(&unwinder_lock, flags);
+
+ return ret;
+}
+
+int unwinder_faulted = 0;
+
+/*
+ * Unwind the call stack and pass information to the stacktrace_ops
+ * functions. Also handle the case where we need to switch to a new
+ * stack dumper because the current one faulted unexpectedly.
+ */
+void unwind_stack(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp, const struct stacktrace_ops *ops,
+ void *data)
+{
+ unsigned long flags;
+
+ /*
+ * The problem with unwinders with high ratings is that they are
+ * inherently more complicated than the simple ones with lower
+ * ratings. We are therefore more likely to fault in the
+ * complicated ones, e.g. hitting BUG()s. If we fault in the
+ * code for the current stack unwinder we try to downgrade to
+ * one with a lower rating.
+ *
+ * Hopefully this will give us a semi-reliable stacktrace so we
+ * can diagnose why curr_unwinder->dump() faulted.
+ */
+ if (unwinder_faulted) {
+ spin_lock_irqsave(&unwinder_lock, flags);
+
+ /* Make sure no one beat us to changing the unwinder */
+ if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
+ list_del(&curr_unwinder->list);
+ curr_unwinder = select_unwinder();
+
+ unwinder_faulted = 0;
+ }
+
+ spin_unlock_irqrestore(&unwinder_lock, flags);
+ }
+
+ curr_unwinder->dump(task, regs, sp, ops, data);
+}
+EXPORT_SYMBOL_GPL(unwind_stack);
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index d7d4991f32a..db88cbf9eaf 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -1,5 +1,87 @@
-#ifdef CONFIG_SUPERH32
-# include "vmlinux_32.lds.S"
+/*
+ * ld script to make SuperH Linux kernel
+ * Written by Niibe Yutaka and Paul Mundt
+ */
+#ifdef CONFIG_SUPERH64
+#define LOAD_OFFSET PAGE_OFFSET
+OUTPUT_ARCH(sh:sh5)
#else
-# include "vmlinux_64.lds.S"
+#define LOAD_OFFSET 0
+OUTPUT_ARCH(sh)
#endif
+
+#include <asm/thread_info.h>
+#include <asm/cache.h>
+#include <asm/vmlinux.lds.h>
+
+#ifdef CONFIG_PMB
+ #define MEMORY_OFFSET 0
+#else
+ #define MEMORY_OFFSET __MEMORY_START
+#endif
+
+ENTRY(_start)
+SECTIONS
+{
+ . = PAGE_OFFSET + MEMORY_OFFSET + PHYSICAL_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
+
+ _text = .; /* Text and read-only data */
+
+ .empty_zero_page : AT(ADDR(.empty_zero_page) - LOAD_OFFSET) {
+ *(.empty_zero_page)
+ } = 0
+
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ HEAD_TEXT
+ TEXT_TEXT
+ EXTRA_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ _etext = .; /* End of text section */
+ } = 0x0009
+
+ EXCEPTION_TABLE(16)
+ NOTES
+
+ _sdata = .;
+ RO_DATA(PAGE_SIZE)
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+
+ DWARF_EH_FRAME
+
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
+
+ . = ALIGN(4);
+ .machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
+ __machvec_start = .;
+ *(.machvec.init)
+ __machvec_end = .;
+ }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ /*
+ * .exit.text is discarded at runtime, not link time, to deal with
+ * references from __bug_table
+ */
+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT }
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
+
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+ BSS_SECTION(0, PAGE_SIZE, 4)
+ _end = . ;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ DISCARDS
+}
diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S
deleted file mode 100644
index c7113786ecd..00000000000
--- a/arch/sh/kernel/vmlinux_32.lds.S
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * ld script to make SuperH Linux kernel
- * Written by Niibe Yutaka
- */
-#include <asm/thread_info.h>
-#include <asm/cache.h>
-#include <asm-generic/vmlinux.lds.h>
-
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
-#else
-OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
-#endif
-OUTPUT_ARCH(sh)
-ENTRY(_start)
-SECTIONS
-{
-#ifdef CONFIG_32BIT
- . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
-#else
- . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
-#endif
-
- _text = .; /* Text and read-only data */
-
- .empty_zero_page : {
- *(.empty_zero_page)
- } = 0
-
- .text : {
- *(.text.head)
- TEXT_TEXT
- SCHED_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- *(.fixup)
- *(.gnu.warning)
- } = 0x0009
-
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
-
- _etext = .; /* End of text section */
-
- BUG_TABLE
- NOTES
- RO_DATA(PAGE_SIZE)
-
- /*
- * Code which must be executed uncached and the associated data
- */
- . = ALIGN(PAGE_SIZE);
- __uncached_start = .;
- .uncached.text : { *(.uncached.text) }
- .uncached.data : { *(.uncached.data) }
- __uncached_end = .;
-
- . = ALIGN(THREAD_SIZE);
- .data : { /* Data */
- *(.data.init_task)
-
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.cacheline_aligned)
-
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.read_mostly)
-
- . = ALIGN(PAGE_SIZE);
- *(.data.page_aligned)
-
- __nosave_begin = .;
- *(.data.nosave)
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
-
- DATA_DATA
- CONSTRUCTORS
- }
-
- _edata = .; /* End of data section */
-
- . = ALIGN(PAGE_SIZE); /* Init code and data */
- __init_begin = .;
- _sinittext = .;
- .init.text : { INIT_TEXT }
- _einittext = .;
- .init.data : { INIT_DATA }
-
- . = ALIGN(16);
- __setup_start = .;
- .init.setup : { *(.init.setup) }
- __setup_end = .;
-
- __initcall_start = .;
- .initcall.init : {
- INITCALLS
- }
- __initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
-
- SECURITY_INIT
-
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
-#endif
-
- . = ALIGN(4);
- __machvec_start = .;
- .machvec.init : { *(.machvec.init) }
- __machvec_end = .;
-
- PERCPU(PAGE_SIZE)
-
- /*
- * .exit.text is discarded at runtime, not link time, to deal with
- * references from __bug_table
- */
- .exit.text : { EXIT_TEXT }
- .exit.data : { EXIT_DATA }
-
- . = ALIGN(PAGE_SIZE);
- .bss : {
- __init_end = .;
- __bss_start = .; /* BSS */
- *(.bss.page_aligned)
- *(.bss)
- *(COMMON)
- . = ALIGN(4);
- _ebss = .; /* uClinux MTD sucks */
- _end = . ;
- }
-
- /*
- * When something in the kernel is NOT compiled as a module, the
- * module cleanup code and data are put into these segments. Both
- * can then be thrown away, as cleanup code is never called unless
- * it's a module.
- */
- /DISCARD/ : {
- *(.exitcall.exit)
- }
-
- STABS_DEBUG
- DWARF_DEBUG
-}
diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S
deleted file mode 100644
index d1e177009a4..00000000000
--- a/arch/sh/kernel/vmlinux_64.lds.S
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * ld script to make SH64 Linux kernel
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- *
- * benedict.gaster@superh.com: 2nd May 2002
- * Add definition of empty_zero_page to be the first page of kernel image.
- *
- * benedict.gaster@superh.com: 3rd May 2002
- * Added support for ramdisk, removing statically linked romfs at the
- * same time.
- *
- * lethal@linux-sh.org: 9th May 2003
- * Kill off GLOBAL_NAME() usage and other CDC-isms.
- *
- * lethal@linux-sh.org: 19th May 2003
- * Remove support for ancient toolchains.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <asm/page.h>
-#include <asm/cache.h>
-#include <asm/thread_info.h>
-
-#define LOAD_OFFSET CONFIG_PAGE_OFFSET
-#include <asm-generic/vmlinux.lds.h>
-
-OUTPUT_ARCH(sh:sh5)
-
-#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET)
-
-ENTRY(__start)
-SECTIONS
-{
- . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE;
- _text = .; /* Text and read-only data */
-
- .empty_zero_page : C_PHYS(.empty_zero_page) {
- *(.empty_zero_page)
- } = 0
-
- .text : C_PHYS(.text) {
- *(.text.head)
- TEXT_TEXT
- *(.text64)
- *(.text..SHmedia32)
- SCHED_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- *(.fixup)
- *(.gnu.warning)
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
- } = 0x6ff0fff0
-#else
- } = 0xf0fff06f
-#endif
-
- /* We likely want __ex_table to be Cache Line aligned */
- . = ALIGN(L1_CACHE_BYTES); /* Exception table */
- __start___ex_table = .;
- __ex_table : C_PHYS(__ex_table) { *(__ex_table) }
- __stop___ex_table = .;
-
- _etext = .; /* End of text section */
-
- BUG_TABLE
- NOTES
- RO_DATA(PAGE_SIZE)
-
- . = ALIGN(THREAD_SIZE);
- .data : C_PHYS(.data) { /* Data */
- *(.data.init_task)
-
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.cacheline_aligned)
-
- . = ALIGN(L1_CACHE_BYTES);
- *(.data.read_mostly)
-
- . = ALIGN(PAGE_SIZE);
- *(.data.page_aligned)
-
- __nosave_begin = .;
- *(.data.nosave)
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
-
- DATA_DATA
- CONSTRUCTORS
- }
-
- _edata = .; /* End of data section */
-
- . = ALIGN(PAGE_SIZE); /* Init code and data */
- __init_begin = .;
- _sinittext = .;
- .init.text : C_PHYS(.init.text) { INIT_TEXT }
- _einittext = .;
- .init.data : C_PHYS(.init.data) { INIT_DATA }
- . = ALIGN(L1_CACHE_BYTES); /* Better if Cache Line aligned */
- __setup_start = .;
- .init.setup : C_PHYS(.init.setup) { *(.init.setup) }
- __setup_end = .;
- __initcall_start = .;
- .initcall.init : C_PHYS(.initcall.init) {
- INITCALLS
- }
- __initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : C_PHYS(.con_initcall.init) {
- *(.con_initcall.init)
- }
- __con_initcall_end = .;
-
- SECURITY_INIT
-
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- __initramfs_start = .;
- .init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) }
- __initramfs_end = .;
-#endif
-
- . = ALIGN(8);
- __machvec_start = .;
- .machvec.init : C_PHYS(.machvec.init) { *(.machvec.init) }
- __machvec_end = .;
-
- PERCPU(PAGE_SIZE)
-
- /*
- * .exit.text is discarded at runtime, not link time, to deal with
- * references from __bug_table
- */
- .exit.text : C_PHYS(.exit.text) { EXIT_TEXT }
- .exit.data : C_PHYS(.exit.data) { EXIT_DATA }
-
- . = ALIGN(PAGE_SIZE);
- .bss : C_PHYS(.bss) {
- __init_end = .;
- __bss_start = .; /* BSS */
- *(.bss.page_aligned)
- *(.bss)
- *(COMMON)
- . = ALIGN(4);
- _ebss = .; /* uClinux MTD sucks */
- _end = . ;
- }
-
- /*
- * When something in the kernel is NOT compiled as a module, the
- * module cleanup code and data are put into these segments. Both
- * can then be thrown away, as cleanup code is never called unless
- * it's a module.
- */
- /DISCARD/ : {
- *(.exitcall.exit)
- }
-
- STABS_DEBUG
- DWARF_DEBUG
-}
diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile
index 4bbce1cfa35..8f0ea5fc835 100644
--- a/arch/sh/kernel/vsyscall/Makefile
+++ b/arch/sh/kernel/vsyscall/Makefile
@@ -15,7 +15,7 @@ quiet_cmd_syscall = SYSCALL $@
export CPPFLAGS_vsyscall.lds += -P -C -Ush
vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
- $(call ld-option, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags)
diff --git a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
index 555a64f124c..23af1758405 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
@@ -34,6 +34,41 @@ __kernel_rt_sigreturn:
1: .short __NR_rt_sigreturn
.LEND_rt_sigreturn:
.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
+ .previous
.section .eh_frame,"a",@progbits
+.LCIE1:
+ .ualong .LCIE1_end - .LCIE1_start
+.LCIE1_start:
+ .ualong 0 /* CIE ID */
+ .byte 0x1 /* Version number */
+ .string "zRS" /* NUL-terminated augmentation string */
+ .uleb128 0x1 /* Code alignment factor */
+ .sleb128 -4 /* Data alignment factor */
+ .byte 0x11 /* Return address register column */
+ .uleb128 0x1 /* Augmentation length and data */
+ .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
+ .byte 0xc, 0xf, 0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
+
+ .align 2
+.LCIE1_end:
+
+ .ualong .LFDE0_end-.LFDE0_start /* Length FDE0 */
+.LFDE0_start:
+ .ualong .LFDE0_start-.LCIE1 /* CIE pointer */
+ .ualong .LSTART_sigreturn-. /* PC-relative start address */
+ .ualong .LEND_sigreturn-.LSTART_sigreturn
+ .uleb128 0 /* Augmentation */
+ .align 2
+.LFDE0_end:
+
+ .ualong .LFDE1_end-.LFDE1_start /* Length FDE1 */
+.LFDE1_start:
+ .ualong .LFDE1_start-.LCIE1 /* CIE pointer */
+ .ualong .LSTART_rt_sigreturn-. /* PC-relative start address */
+ .ualong .LEND_rt_sigreturn-.LSTART_rt_sigreturn
+ .uleb128 0 /* Augmentation */
+ .align 2
+.LFDE1_end:
+
.previous
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
index 3b6eb34c43f..0eb74d00690 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -3,9 +3,8 @@
.type __kernel_vsyscall,@function
__kernel_vsyscall:
.LSTART_vsyscall:
- /* XXX: We'll have to do something here once we opt to use the vDSO
- * page for something other than the signal trampoline.. as well as
- * fill out .eh_frame -- PFM. */
+ trapa #0x10
+ nop
.LEND_vsyscall:
.size __kernel_vsyscall,.-.LSTART_vsyscall
.previous
@@ -16,24 +15,22 @@ __kernel_vsyscall:
.LCIE_start:
.ualong 0 /* CIE ID */
.byte 0x1 /* Version number */
- .string "zRS" /* NUL-terminated augmentation string */
+ .string "zR" /* NUL-terminated augmentation string */
.uleb128 0x1 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */
.byte 0x11 /* Return address register column */
- /* Augmentation length and data (none) */
- .byte 0xc /* DW_CFA_def_cfa */
- .uleb128 0xf /* r15 */
- .uleb128 0x0 /* offset 0 */
-
+ .uleb128 0x1 /* Augmentation length and data */
+ .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
+ .byte 0xc,0xf,0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
.align 2
.LCIE_end:
.ualong .LFDE_end-.LFDE_start /* Length FDE */
.LFDE_start:
- .ualong .LCIE /* CIE pointer */
- .ualong .LSTART_vsyscall-. /* start address */
+ .ualong .LFDE_start-.LCIE /* CIE pointer */
+ .ualong .LSTART_vsyscall-. /* PC-relative start address */
.ualong .LEND_vsyscall-.LSTART_vsyscall
- .uleb128 0
+ .uleb128 0 /* Augmentation */
.align 2
.LFDE_end:
.previous
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 95f4de0800e..5ca579720a0 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -11,7 +11,6 @@
* for more details.
*/
#include <linux/mm.h>
-#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gfp.h>
@@ -59,8 +58,7 @@ int __init vsyscall_init(void)
}
/* Setup a VMA at program startup for the vsyscall page */
-int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
unsigned long addr;
@@ -75,8 +73,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
ret = install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC |
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC |
- VM_ALWAYSDUMP,
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
syscall_pages);
if (unlikely(ret))
goto up_fail;
@@ -96,17 +93,17 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return NULL;
}
-struct vm_area_struct *get_gate_vma(struct task_struct *task)
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
-int in_gate_area(struct task_struct *task, unsigned long address)
+int in_gate_area(struct mm_struct *mm, unsigned long address)
{
return 0;
}
-int in_gate_area_no_task(unsigned long address)
+int in_gate_area_no_mm(unsigned long address)
{
return 0;
}