diff options
Diffstat (limited to 'arch/sh/kernel/cpu/sh4')
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/Makefile | 19 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/clock-sh4-202.c | 74 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/clock-sh4.c | 32 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/ex.S | 62 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/fpu.c | 545 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/perf_event.c | 268 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/probe.c | 290 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/setup-sh4-202.c | 131 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/setup-sh7750.c | 399 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/setup-sh7760.c | 380 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/softfloat.c | 930 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh4/sq.c | 94 |
12 files changed, 2472 insertions, 752 deletions
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile index 19ca68c7188..3a1dbc70983 100644 --- a/arch/sh/kernel/cpu/sh4/Makefile +++ b/arch/sh/kernel/cpu/sh4/Makefile @@ -2,15 +2,25 @@ # Makefile for the Linux/SuperH SH-4 backends. # -obj-y := ex.o probe.o common.o -common-y += $(addprefix ../sh3/, entry.o) +obj-y := probe.o common.o +common-y += $(addprefix ../sh3/, entry.o ex.o) -obj-$(CONFIG_SH_FPU) += fpu.o +obj-$(CONFIG_HIBERNATION) += $(addprefix ../sh3/, swsusp.o) +obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o obj-$(CONFIG_SH_STORE_QUEUES) += sq.o +# Perf events +perf-$(CONFIG_CPU_SUBTYPE_SH7750) := perf_event.o +perf-$(CONFIG_CPU_SUBTYPE_SH7750S) := perf_event.o +perf-$(CONFIG_CPU_SUBTYPE_SH7091) := perf_event.o + # CPU subtype setup obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7750S) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7091) += setup-sh7750.o obj-$(CONFIG_CPU_SUBTYPE_SH7751) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7751R) += setup-sh7750.o obj-$(CONFIG_CPU_SUBTYPE_SH7760) += setup-sh7760.o obj-$(CONFIG_CPU_SUBTYPE_SH4_202) += setup-sh4-202.o @@ -22,4 +32,5 @@ endif # Additional clocks by subtype clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o -obj-y += $(clock-y) +obj-y += $(clock-y) +obj-$(CONFIG_PERF_EVENTS) += $(perf-y) diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c index fa2019aabd7..4b5bab5f875 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c @@ -12,19 +12,20 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/err.h> +#include <linux/io.h> +#include <linux/clkdev.h> #include <asm/clock.h> #include <asm/freq.h> -#include <asm/io.h> #define CPG2_FRQCR3 0xfe0a0018 static int frqcr3_divisors[] = { 1, 2, 3, 4, 6, 8, 16 }; static int frqcr3_values[] = { 0, 1, 2, 3, 4, 5, 6 }; -static void emi_clk_recalc(struct clk *clk) +static unsigned long emi_clk_recalc(struct clk *clk) { - int idx = ctrl_inl(CPG2_FRQCR3) & 0x0007; - clk->rate = clk->parent->rate / frqcr3_divisors[idx]; + int idx = __raw_readl(CPG2_FRQCR3) & 0x0007; + return clk->parent->rate / frqcr3_divisors[idx]; } static inline int frqcr3_lookup(struct clk *clk, unsigned long rate) @@ -40,29 +41,27 @@ static inline int frqcr3_lookup(struct clk *clk, unsigned long rate) return 5; } -static struct clk_ops sh4202_emi_clk_ops = { +static struct sh_clk_ops sh4202_emi_clk_ops = { .recalc = emi_clk_recalc, }; static struct clk sh4202_emi_clk = { - .name = "emi_clk", - .flags = CLK_ALWAYS_ENABLED, + .flags = CLK_ENABLE_ON_INIT, .ops = &sh4202_emi_clk_ops, }; -static void femi_clk_recalc(struct clk *clk) +static unsigned long femi_clk_recalc(struct clk *clk) { - int idx = (ctrl_inl(CPG2_FRQCR3) >> 3) & 0x0007; - clk->rate = clk->parent->rate / frqcr3_divisors[idx]; + int idx = (__raw_readl(CPG2_FRQCR3) >> 3) & 0x0007; + return clk->parent->rate / frqcr3_divisors[idx]; } -static struct clk_ops sh4202_femi_clk_ops = { +static struct sh_clk_ops sh4202_femi_clk_ops = { .recalc = femi_clk_recalc, }; static struct clk sh4202_femi_clk = { - .name = "femi_clk", - .flags = CLK_ALWAYS_ENABLED, + .flags = CLK_ENABLE_ON_INIT, .ops = &sh4202_femi_clk_ops, }; @@ -89,10 +88,10 @@ static void shoc_clk_init(struct clk *clk) WARN_ON(i == ARRAY_SIZE(frqcr3_divisors)); /* Undefined clock */ } -static void shoc_clk_recalc(struct clk *clk) +static unsigned long shoc_clk_recalc(struct clk *clk) { - int idx = (ctrl_inl(CPG2_FRQCR3) >> 6) & 0x0007; - clk->rate = clk->parent->rate / frqcr3_divisors[idx]; + int idx = (__raw_readl(CPG2_FRQCR3) >> 6) & 0x0007; + return clk->parent->rate / frqcr3_divisors[idx]; } static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate) @@ -121,25 +120,24 @@ static int shoc_clk_set_rate(struct clk *clk, unsigned long rate) tmp = frqcr3_lookup(clk, rate); - frqcr3 = ctrl_inl(CPG2_FRQCR3); + frqcr3 = __raw_readl(CPG2_FRQCR3); frqcr3 &= ~(0x0007 << 6); frqcr3 |= tmp << 6; - ctrl_outl(frqcr3, CPG2_FRQCR3); + __raw_writel(frqcr3, CPG2_FRQCR3); clk->rate = clk->parent->rate / frqcr3_divisors[tmp]; return 0; } -static struct clk_ops sh4202_shoc_clk_ops = { +static struct sh_clk_ops sh4202_shoc_clk_ops = { .init = shoc_clk_init, .recalc = shoc_clk_recalc, .set_rate = shoc_clk_set_rate, }; static struct clk sh4202_shoc_clk = { - .name = "shoc_clk", - .flags = CLK_ALWAYS_ENABLED, + .flags = CLK_ENABLE_ON_INIT, .ops = &sh4202_shoc_clk_ops, }; @@ -149,31 +147,31 @@ static struct clk *sh4202_onchip_clocks[] = { &sh4202_shoc_clk, }; -static int __init sh4202_clk_init(void) +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("emi_clk", &sh4202_emi_clk), + CLKDEV_CON_ID("femi_clk", &sh4202_femi_clk), + CLKDEV_CON_ID("shoc_clk", &sh4202_shoc_clk), +}; + +int __init arch_clk_init(void) { - struct clk *clk = clk_get(NULL, "master_clk"); - int i; + struct clk *clk; + int i, ret = 0; + cpg_clk_init(); + + clk = clk_get(NULL, "master_clk"); for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) { struct clk *clkp = sh4202_onchip_clocks[i]; clkp->parent = clk; - clk_register(clkp); - clk_enable(clkp); + ret |= clk_register(clkp); } - /* - * Now that we have the rest of the clocks registered, we need to - * force the parent clock to propagate so that these clocks will - * automatically figure out their rate. We cheat by handing the - * parent clock its current rate and forcing child propagation. - */ - clk_set_rate(clk, clk_get_rate(clk)); - clk_put(clk); - return 0; -} - -arch_initcall(sh4202_clk_init); + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4.c b/arch/sh/kernel/cpu/sh4/clock-sh4.c index dca9f87a12d..99e5ec8b483 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4.c @@ -28,51 +28,51 @@ static int pfc_divisors[] = { 2, 3, 4, 6, 8, 2, 2, 2 }; static void master_clk_init(struct clk *clk) { - clk->rate *= pfc_divisors[ctrl_inw(FRQCR) & 0x0007]; + clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0007]; } -static struct clk_ops sh4_master_clk_ops = { +static struct sh_clk_ops sh4_master_clk_ops = { .init = master_clk_init, }; -static void module_clk_recalc(struct clk *clk) +static unsigned long module_clk_recalc(struct clk *clk) { - int idx = (ctrl_inw(FRQCR) & 0x0007); - clk->rate = clk->parent->rate / pfc_divisors[idx]; + int idx = (__raw_readw(FRQCR) & 0x0007); + return clk->parent->rate / pfc_divisors[idx]; } -static struct clk_ops sh4_module_clk_ops = { +static struct sh_clk_ops sh4_module_clk_ops = { .recalc = module_clk_recalc, }; -static void bus_clk_recalc(struct clk *clk) +static unsigned long bus_clk_recalc(struct clk *clk) { - int idx = (ctrl_inw(FRQCR) >> 3) & 0x0007; - clk->rate = clk->parent->rate / bfc_divisors[idx]; + int idx = (__raw_readw(FRQCR) >> 3) & 0x0007; + return clk->parent->rate / bfc_divisors[idx]; } -static struct clk_ops sh4_bus_clk_ops = { +static struct sh_clk_ops sh4_bus_clk_ops = { .recalc = bus_clk_recalc, }; -static void cpu_clk_recalc(struct clk *clk) +static unsigned long cpu_clk_recalc(struct clk *clk) { - int idx = (ctrl_inw(FRQCR) >> 6) & 0x0007; - clk->rate = clk->parent->rate / ifc_divisors[idx]; + int idx = (__raw_readw(FRQCR) >> 6) & 0x0007; + return clk->parent->rate / ifc_divisors[idx]; } -static struct clk_ops sh4_cpu_clk_ops = { +static struct sh_clk_ops sh4_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; -static struct clk_ops *sh4_clk_ops[] = { +static struct sh_clk_ops *sh4_clk_ops[] = { &sh4_master_clk_ops, &sh4_module_clk_ops, &sh4_bus_clk_ops, &sh4_cpu_clk_ops, }; -void __init arch_init_clk_ops(struct clk_ops **ops, int idx) +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh4_clk_ops)) *ops = sh4_clk_ops[idx]; diff --git a/arch/sh/kernel/cpu/sh4/ex.S b/arch/sh/kernel/cpu/sh4/ex.S deleted file mode 100644 index ac8ab57413c..00000000000 --- a/arch/sh/kernel/cpu/sh4/ex.S +++ /dev/null @@ -1,62 +0,0 @@ -/* - * arch/sh/kernel/cpu/sh4/ex.S - * - * The SH-4 exception vector table. - - * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2003 - 2006 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - */ -#include <linux/linkage.h> - - .align 2 - .data - -ENTRY(exception_handling_table) - .long exception_error /* 000 */ - .long exception_error -#if defined(CONFIG_MMU) - .long tlb_miss_load /* 040 */ - .long tlb_miss_store - .long initial_page_write - .long tlb_protection_violation_load - .long tlb_protection_violation_store - .long address_error_load - .long address_error_store /* 100 */ -#else - .long exception_error ! tlb miss load /* 040 */ - .long exception_error ! tlb miss store - .long exception_error ! initial page write - .long exception_error ! tlb prot violation load - .long exception_error ! tlb prot violation store - .long exception_error ! address error load - .long exception_error ! address error store /* 100 */ -#endif -#if defined(CONFIG_SH_FPU) - .long do_fpu_error /* 120 */ -#else - .long exception_error /* 120 */ -#endif - .long exception_error /* 140 */ - .long system_call ! Unconditional Trap /* 160 */ - .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ - .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ -ENTRY(nmi_slot) -#if defined (CONFIG_KGDB_NMI) - .long debug_enter /* 1C0 */ ! Allow trap to debugger -#else - .long exception_none /* 1C0 */ ! Not implemented yet -#endif -ENTRY(user_break_point_trap) - .long break_point_trap /* 1E0 */ - - /* - * Pad the remainder of the table out, exceptions residing in far - * away offsets can be manually inserted in to their appropriate - * location via set_exception_table_{evt,vec}(). - */ - .balign 4096,0,4096 diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index 7624677f662..69ab4d3c8d4 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -1,7 +1,4 @@ -/* $Id: fpu.c,v 1.4 2004/01/13 05:52:11 kkojima Exp $ - * - * linux/arch/sh/kernel/fpu.c - * +/* * Save/restore floating point context for signal handlers. * * This file is subject to the terms and conditions of the GNU General Public @@ -9,14 +6,17 @@ * for more details. * * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka + * Copyright (C) 2006 ST Microelectronics Ltd. (denorm support) * - * FIXME! These routines can be optimized in big endian case. + * FIXME! These routines have not been tested for big endian case. */ - #include <linux/sched.h> #include <linux/signal.h> +#include <linux/io.h> +#include <cpu/fpu.h> #include <asm/processor.h> -#include <asm/io.h> +#include <asm/fpu.h> +#include <asm/traps.h> /* The PR (precision) bit in the FP Status Register must be clear when * an frchg instruction is executed, otherwise the instruction is undefined. @@ -24,177 +24,130 @@ */ #define FPSCR_RCHG 0x00000000 - +extern unsigned long long float64_div(unsigned long long a, + unsigned long long b); +extern unsigned long int float32_div(unsigned long int a, unsigned long int b); +extern unsigned long long float64_mul(unsigned long long a, + unsigned long long b); +extern unsigned long int float32_mul(unsigned long int a, unsigned long int b); +extern unsigned long long float64_add(unsigned long long a, + unsigned long long b); +extern unsigned long int float32_add(unsigned long int a, unsigned long int b); +extern unsigned long long float64_sub(unsigned long long a, + unsigned long long b); +extern unsigned long int float32_sub(unsigned long int a, unsigned long int b); +extern unsigned long int float64_to_float32(unsigned long long a); +static unsigned int fpu_exception_flags; /* * Save FPU registers onto task structure. - * Assume called with FPU enabled (SR.FD=0). */ -void -save_fpu(struct task_struct *tsk, struct pt_regs *regs) +void save_fpu(struct task_struct *tsk) { unsigned long dummy; - clear_tsk_thread_flag(tsk, TIF_USEDFPU); enable_fpu(); - asm volatile("sts.l fpul, @-%0\n\t" - "sts.l fpscr, @-%0\n\t" - "lds %2, fpscr\n\t" - "frchg\n\t" - "fmov.s fr15, @-%0\n\t" - "fmov.s fr14, @-%0\n\t" - "fmov.s fr13, @-%0\n\t" - "fmov.s fr12, @-%0\n\t" - "fmov.s fr11, @-%0\n\t" - "fmov.s fr10, @-%0\n\t" - "fmov.s fr9, @-%0\n\t" - "fmov.s fr8, @-%0\n\t" - "fmov.s fr7, @-%0\n\t" - "fmov.s fr6, @-%0\n\t" - "fmov.s fr5, @-%0\n\t" - "fmov.s fr4, @-%0\n\t" - "fmov.s fr3, @-%0\n\t" - "fmov.s fr2, @-%0\n\t" - "fmov.s fr1, @-%0\n\t" - "fmov.s fr0, @-%0\n\t" - "frchg\n\t" - "fmov.s fr15, @-%0\n\t" - "fmov.s fr14, @-%0\n\t" - "fmov.s fr13, @-%0\n\t" - "fmov.s fr12, @-%0\n\t" - "fmov.s fr11, @-%0\n\t" - "fmov.s fr10, @-%0\n\t" - "fmov.s fr9, @-%0\n\t" - "fmov.s fr8, @-%0\n\t" - "fmov.s fr7, @-%0\n\t" - "fmov.s fr6, @-%0\n\t" - "fmov.s fr5, @-%0\n\t" - "fmov.s fr4, @-%0\n\t" - "fmov.s fr3, @-%0\n\t" - "fmov.s fr2, @-%0\n\t" - "fmov.s fr1, @-%0\n\t" - "fmov.s fr0, @-%0\n\t" - "lds %3, fpscr\n\t" - : "=r" (dummy) - : "0" ((char *)(&tsk->thread.fpu.hard.status)), - "r" (FPSCR_RCHG), - "r" (FPSCR_INIT) - : "memory"); - - disable_fpu(); - release_fpu(regs); -} - -static void -restore_fpu(struct task_struct *tsk) -{ - unsigned long dummy; + asm volatile ("sts.l fpul, @-%0\n\t" + "sts.l fpscr, @-%0\n\t" + "lds %2, fpscr\n\t" + "frchg\n\t" + "fmov.s fr15, @-%0\n\t" + "fmov.s fr14, @-%0\n\t" + "fmov.s fr13, @-%0\n\t" + "fmov.s fr12, @-%0\n\t" + "fmov.s fr11, @-%0\n\t" + "fmov.s fr10, @-%0\n\t" + "fmov.s fr9, @-%0\n\t" + "fmov.s fr8, @-%0\n\t" + "fmov.s fr7, @-%0\n\t" + "fmov.s fr6, @-%0\n\t" + "fmov.s fr5, @-%0\n\t" + "fmov.s fr4, @-%0\n\t" + "fmov.s fr3, @-%0\n\t" + "fmov.s fr2, @-%0\n\t" + "fmov.s fr1, @-%0\n\t" + "fmov.s fr0, @-%0\n\t" + "frchg\n\t" + "fmov.s fr15, @-%0\n\t" + "fmov.s fr14, @-%0\n\t" + "fmov.s fr13, @-%0\n\t" + "fmov.s fr12, @-%0\n\t" + "fmov.s fr11, @-%0\n\t" + "fmov.s fr10, @-%0\n\t" + "fmov.s fr9, @-%0\n\t" + "fmov.s fr8, @-%0\n\t" + "fmov.s fr7, @-%0\n\t" + "fmov.s fr6, @-%0\n\t" + "fmov.s fr5, @-%0\n\t" + "fmov.s fr4, @-%0\n\t" + "fmov.s fr3, @-%0\n\t" + "fmov.s fr2, @-%0\n\t" + "fmov.s fr1, @-%0\n\t" + "fmov.s fr0, @-%0\n\t" + "lds %3, fpscr\n\t":"=r" (dummy) + :"0"((char *)(&tsk->thread.xstate->hardfpu.status)), + "r"(FPSCR_RCHG), "r"(FPSCR_INIT) + :"memory"); - enable_fpu(); - asm volatile("lds %2, fpscr\n\t" - "fmov.s @%0+, fr0\n\t" - "fmov.s @%0+, fr1\n\t" - "fmov.s @%0+, fr2\n\t" - "fmov.s @%0+, fr3\n\t" - "fmov.s @%0+, fr4\n\t" - "fmov.s @%0+, fr5\n\t" - "fmov.s @%0+, fr6\n\t" - "fmov.s @%0+, fr7\n\t" - "fmov.s @%0+, fr8\n\t" - "fmov.s @%0+, fr9\n\t" - "fmov.s @%0+, fr10\n\t" - "fmov.s @%0+, fr11\n\t" - "fmov.s @%0+, fr12\n\t" - "fmov.s @%0+, fr13\n\t" - "fmov.s @%0+, fr14\n\t" - "fmov.s @%0+, fr15\n\t" - "frchg\n\t" - "fmov.s @%0+, fr0\n\t" - "fmov.s @%0+, fr1\n\t" - "fmov.s @%0+, fr2\n\t" - "fmov.s @%0+, fr3\n\t" - "fmov.s @%0+, fr4\n\t" - "fmov.s @%0+, fr5\n\t" - "fmov.s @%0+, fr6\n\t" - "fmov.s @%0+, fr7\n\t" - "fmov.s @%0+, fr8\n\t" - "fmov.s @%0+, fr9\n\t" - "fmov.s @%0+, fr10\n\t" - "fmov.s @%0+, fr11\n\t" - "fmov.s @%0+, fr12\n\t" - "fmov.s @%0+, fr13\n\t" - "fmov.s @%0+, fr14\n\t" - "fmov.s @%0+, fr15\n\t" - "frchg\n\t" - "lds.l @%0+, fpscr\n\t" - "lds.l @%0+, fpul\n\t" - : "=r" (dummy) - : "0" (&tsk->thread.fpu), "r" (FPSCR_RCHG) - : "memory"); disable_fpu(); } -/* - * Load the FPU with signalling NANS. This bit pattern we're using - * has the property that no matter wether considered as single or as - * double precission represents signaling NANS. - */ - -static void -fpu_init(void) +void restore_fpu(struct task_struct *tsk) { + unsigned long dummy; + enable_fpu(); - asm volatile("lds %0, fpul\n\t" - "lds %1, fpscr\n\t" - "fsts fpul, fr0\n\t" - "fsts fpul, fr1\n\t" - "fsts fpul, fr2\n\t" - "fsts fpul, fr3\n\t" - "fsts fpul, fr4\n\t" - "fsts fpul, fr5\n\t" - "fsts fpul, fr6\n\t" - "fsts fpul, fr7\n\t" - "fsts fpul, fr8\n\t" - "fsts fpul, fr9\n\t" - "fsts fpul, fr10\n\t" - "fsts fpul, fr11\n\t" - "fsts fpul, fr12\n\t" - "fsts fpul, fr13\n\t" - "fsts fpul, fr14\n\t" - "fsts fpul, fr15\n\t" - "frchg\n\t" - "fsts fpul, fr0\n\t" - "fsts fpul, fr1\n\t" - "fsts fpul, fr2\n\t" - "fsts fpul, fr3\n\t" - "fsts fpul, fr4\n\t" - "fsts fpul, fr5\n\t" - "fsts fpul, fr6\n\t" - "fsts fpul, fr7\n\t" - "fsts fpul, fr8\n\t" - "fsts fpul, fr9\n\t" - "fsts fpul, fr10\n\t" - "fsts fpul, fr11\n\t" - "fsts fpul, fr12\n\t" - "fsts fpul, fr13\n\t" - "fsts fpul, fr14\n\t" - "fsts fpul, fr15\n\t" - "frchg\n\t" - "lds %2, fpscr\n\t" - : /* no output */ - : "r" (0), "r" (FPSCR_RCHG), "r" (FPSCR_INIT)); - disable_fpu(); + asm volatile ("lds %2, fpscr\n\t" + "fmov.s @%0+, fr0\n\t" + "fmov.s @%0+, fr1\n\t" + "fmov.s @%0+, fr2\n\t" + "fmov.s @%0+, fr3\n\t" + "fmov.s @%0+, fr4\n\t" + "fmov.s @%0+, fr5\n\t" + "fmov.s @%0+, fr6\n\t" + "fmov.s @%0+, fr7\n\t" + "fmov.s @%0+, fr8\n\t" + "fmov.s @%0+, fr9\n\t" + "fmov.s @%0+, fr10\n\t" + "fmov.s @%0+, fr11\n\t" + "fmov.s @%0+, fr12\n\t" + "fmov.s @%0+, fr13\n\t" + "fmov.s @%0+, fr14\n\t" + "fmov.s @%0+, fr15\n\t" + "frchg\n\t" + "fmov.s @%0+, fr0\n\t" + "fmov.s @%0+, fr1\n\t" + "fmov.s @%0+, fr2\n\t" + "fmov.s @%0+, fr3\n\t" + "fmov.s @%0+, fr4\n\t" + "fmov.s @%0+, fr5\n\t" + "fmov.s @%0+, fr6\n\t" + "fmov.s @%0+, fr7\n\t" + "fmov.s @%0+, fr8\n\t" + "fmov.s @%0+, fr9\n\t" + "fmov.s @%0+, fr10\n\t" + "fmov.s @%0+, fr11\n\t" + "fmov.s @%0+, fr12\n\t" + "fmov.s @%0+, fr13\n\t" + "fmov.s @%0+, fr14\n\t" + "fmov.s @%0+, fr15\n\t" + "frchg\n\t" + "lds.l @%0+, fpscr\n\t" + "lds.l @%0+, fpul\n\t" + :"=r" (dummy) + :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG) + :"memory"); + disable_fpu(); } /** - * denormal_to_double - Given denormalized float number, - * store double float + * denormal_to_double - Given denormalized float number, + * store double float * - * @fpu: Pointer to sh_fpu_hard structure - * @n: Index to FP register + * @fpu: Pointer to sh_fpu_hard structure + * @n: Index to FP register */ -static void -denormal_to_double (struct sh_fpu_hard_struct *fpu, int n) +static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n) { unsigned long du, dl; unsigned long x = fpu->fpul; @@ -211,7 +164,7 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n) dl = x << 29; fpu->fp_regs[n] = du; - fpu->fp_regs[n+1] = dl; + fpu->fp_regs[n + 1] = dl; } } @@ -222,68 +175,213 @@ denormal_to_double (struct sh_fpu_hard_struct *fpu, int n) * * Returns 1 when it's handled (should not cause exception). */ -static int -ieee_fpe_handler (struct pt_regs *regs) +static int ieee_fpe_handler(struct pt_regs *regs) { - unsigned short insn = *(unsigned short *) regs->pc; + unsigned short insn = *(unsigned short *)regs->pc; unsigned short finsn; unsigned long nextpc; int nib[4] = { (insn >> 12) & 0xf, (insn >> 8) & 0xf, (insn >> 4) & 0xf, - insn & 0xf}; - - if (nib[0] == 0xb || - (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */ - regs->pr = regs->pc + 4; - - if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */ - nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3); - finsn = *(unsigned short *) (regs->pc + 2); - } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */ + insn & 0xf + }; + + if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) + regs->pr = regs->pc + 4; /* bsr & jsr */ + + if (nib[0] == 0xa || nib[0] == 0xb) { + /* bra & bsr */ + nextpc = regs->pc + 4 + ((short)((insn & 0xfff) << 4) >> 3); + finsn = *(unsigned short *)(regs->pc + 2); + } else if (nib[0] == 0x8 && nib[1] == 0xd) { + /* bt/s */ if (regs->sr & 1) - nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); + nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1); else nextpc = regs->pc + 4; - finsn = *(unsigned short *) (regs->pc + 2); - } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */ + finsn = *(unsigned short *)(regs->pc + 2); + } else if (nib[0] == 0x8 && nib[1] == 0xf) { + /* bf/s */ if (regs->sr & 1) nextpc = regs->pc + 4; else - nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); - finsn = *(unsigned short *) (regs->pc + 2); + nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1); + finsn = *(unsigned short *)(regs->pc + 2); } else if (nib[0] == 0x4 && nib[3] == 0xb && - (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */ + (nib[2] == 0x0 || nib[2] == 0x2)) { + /* jmp & jsr */ nextpc = regs->regs[nib[1]]; - finsn = *(unsigned short *) (regs->pc + 2); + finsn = *(unsigned short *)(regs->pc + 2); } else if (nib[0] == 0x0 && nib[3] == 0x3 && - (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */ + (nib[2] == 0x0 || nib[2] == 0x2)) { + /* braf & bsrf */ nextpc = regs->pc + 4 + regs->regs[nib[1]]; - finsn = *(unsigned short *) (regs->pc + 2); - } else if (insn == 0x000b) { /* rts */ + finsn = *(unsigned short *)(regs->pc + 2); + } else if (insn == 0x000b) { + /* rts */ nextpc = regs->pr; - finsn = *(unsigned short *) (regs->pc + 2); + finsn = *(unsigned short *)(regs->pc + 2); } else { - nextpc = regs->pc + 2; + nextpc = regs->pc + instruction_size(insn); finsn = insn; } - if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ + if ((finsn & 0xf1ff) == 0xf0ad) { + /* fcnvsd */ struct task_struct *tsk = current; - save_fpu(tsk, regs); - if ((tsk->thread.fpu.hard.fpscr & (1 << 17))) { + if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)) /* FPU error */ - denormal_to_double (&tsk->thread.fpu.hard, - (finsn >> 8) & 0xf); - tsk->thread.fpu.hard.fpscr &= - ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); - grab_fpu(regs); - restore_fpu(tsk); - set_tsk_thread_flag(tsk, TIF_USEDFPU); + denormal_to_double(&tsk->thread.xstate->hardfpu, + (finsn >> 8) & 0xf); + else + return 0; + + regs->pc = nextpc; + return 1; + } else if ((finsn & 0xf00f) == 0xf002) { + /* fmul */ + struct task_struct *tsk = current; + int fpscr; + int n, m, prec; + unsigned int hx, hy; + + n = (finsn >> 8) & 0xf; + m = (finsn >> 4) & 0xf; + hx = tsk->thread.xstate->hardfpu.fp_regs[n]; + hy = tsk->thread.xstate->hardfpu.fp_regs[m]; + fpscr = tsk->thread.xstate->hardfpu.fpscr; + prec = fpscr & FPSCR_DBL_PRECISION; + + if ((fpscr & FPSCR_CAUSE_ERROR) + && (prec && ((hx & 0x7fffffff) < 0x00100000 + || (hy & 0x7fffffff) < 0x00100000))) { + long long llx, lly; + + /* FPU error because of denormal (doubles) */ + llx = ((long long)hx << 32) + | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; + lly = ((long long)hy << 32) + | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; + llx = float64_mul(llx, lly); + tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; + tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; + } else if ((fpscr & FPSCR_CAUSE_ERROR) + && (!prec && ((hx & 0x7fffffff) < 0x00800000 + || (hy & 0x7fffffff) < 0x00800000))) { + /* FPU error because of denormal (floats) */ + hx = float32_mul(hx, hy); + tsk->thread.xstate->hardfpu.fp_regs[n] = hx; } else - force_sig(SIGFPE, tsk); + return 0; + + regs->pc = nextpc; + return 1; + } else if ((finsn & 0xf00e) == 0xf000) { + /* fadd, fsub */ + struct task_struct *tsk = current; + int fpscr; + int n, m, prec; + unsigned int hx, hy; + + n = (finsn >> 8) & 0xf; + m = (finsn >> 4) & 0xf; + hx = tsk->thread.xstate->hardfpu.fp_regs[n]; + hy = tsk->thread.xstate->hardfpu.fp_regs[m]; + fpscr = tsk->thread.xstate->hardfpu.fpscr; + prec = fpscr & FPSCR_DBL_PRECISION; + + if ((fpscr & FPSCR_CAUSE_ERROR) + && (prec && ((hx & 0x7fffffff) < 0x00100000 + || (hy & 0x7fffffff) < 0x00100000))) { + long long llx, lly; + + /* FPU error because of denormal (doubles) */ + llx = ((long long)hx << 32) + | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; + lly = ((long long)hy << 32) + | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; + if ((finsn & 0xf00f) == 0xf000) + llx = float64_add(llx, lly); + else + llx = float64_sub(llx, lly); + tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; + tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; + } else if ((fpscr & FPSCR_CAUSE_ERROR) + && (!prec && ((hx & 0x7fffffff) < 0x00800000 + || (hy & 0x7fffffff) < 0x00800000))) { + /* FPU error because of denormal (floats) */ + if ((finsn & 0xf00f) == 0xf000) + hx = float32_add(hx, hy); + else + hx = float32_sub(hx, hy); + tsk->thread.xstate->hardfpu.fp_regs[n] = hx; + } else + return 0; + + regs->pc = nextpc; + return 1; + } else if ((finsn & 0xf003) == 0xf003) { + /* fdiv */ + struct task_struct *tsk = current; + int fpscr; + int n, m, prec; + unsigned int hx, hy; + + n = (finsn >> 8) & 0xf; + m = (finsn >> 4) & 0xf; + hx = tsk->thread.xstate->hardfpu.fp_regs[n]; + hy = tsk->thread.xstate->hardfpu.fp_regs[m]; + fpscr = tsk->thread.xstate->hardfpu.fpscr; + prec = fpscr & FPSCR_DBL_PRECISION; + + if ((fpscr & FPSCR_CAUSE_ERROR) + && (prec && ((hx & 0x7fffffff) < 0x00100000 + || (hy & 0x7fffffff) < 0x00100000))) { + long long llx, lly; + + /* FPU error because of denormal (doubles) */ + llx = ((long long)hx << 32) + | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; + lly = ((long long)hy << 32) + | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; + + llx = float64_div(llx, lly); + + tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; + tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; + } else if ((fpscr & FPSCR_CAUSE_ERROR) + && (!prec && ((hx & 0x7fffffff) < 0x00800000 + || (hy & 0x7fffffff) < 0x00800000))) { + /* FPU error because of denormal (floats) */ + hx = float32_div(hx, hy); + tsk->thread.xstate->hardfpu.fp_regs[n] = hx; + } else + return 0; + + regs->pc = nextpc; + return 1; + } else if ((finsn & 0xf0bd) == 0xf0bd) { + /* fcnvds - double to single precision convert */ + struct task_struct *tsk = current; + int m; + unsigned int hx; + + m = (finsn >> 8) & 0x7; + hx = tsk->thread.xstate->hardfpu.fp_regs[m]; + + if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR) + && ((hx & 0x7fffffff) < 0x00100000)) { + /* subnormal double to float conversion */ + long long llx; + + llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32) + | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; + + tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx); + } else + return 0; regs->pc = nextpc; return 1; @@ -292,41 +390,40 @@ ieee_fpe_handler (struct pt_regs *regs) return 0; } -asmlinkage void -do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, - unsigned long r7, struct pt_regs __regs) +void float_raise(unsigned int flags) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); - struct task_struct *tsk = current; - - if (ieee_fpe_handler(regs)) - return; - - regs->pc += 2; - save_fpu(tsk, regs); - force_sig(SIGFPE, tsk); + fpu_exception_flags |= flags; } -asmlinkage void -do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6, - unsigned long r7, struct pt_regs __regs) +int float_rounding_mode(void) { - struct pt_regs *regs = RELOC_HIDE(&__regs, 0); struct task_struct *tsk = current; + int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr); + return roundingMode; +} - grab_fpu(regs); - if (!user_mode(regs)) { - printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); - return; - } +BUILD_TRAP_HANDLER(fpu_error) +{ + struct task_struct *tsk = current; + TRAP_HANDLER_DECL; - if (used_math()) { - /* Using the FPU again. */ + __unlazy_fpu(tsk, regs); + fpu_exception_flags = 0; + if (ieee_fpe_handler(regs)) { + tsk->thread.xstate->hardfpu.fpscr &= + ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); + tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags; + /* Set the FPSCR flag as well as cause bits - simply + * replicate the cause */ + tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10); + grab_fpu(regs); restore_fpu(tsk); - } else { - /* First time FPU user. */ - fpu_init(); - set_used_math(); + task_thread_info(tsk)->status |= TS_USEDFPU; + if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) & + (fpu_exception_flags >> 2)) == 0) { + return; + } } - set_tsk_thread_flag(tsk, TIF_USEDFPU); + + force_sig(SIGFPE, tsk); } diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c new file mode 100644 index 00000000000..fa4f724b295 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/perf_event.c @@ -0,0 +1,268 @@ +/* + * Performance events support for SH7750-style performance counters + * + * Copyright (C) 2009 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/perf_event.h> +#include <asm/processor.h> + +#define PM_CR_BASE 0xff000084 /* 16-bit */ +#define PM_CTR_BASE 0xff100004 /* 32-bit */ + +#define PMCR(n) (PM_CR_BASE + ((n) * 0x04)) +#define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08)) +#define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08)) + +#define PMCR_PMM_MASK 0x0000003f + +#define PMCR_CLKF 0x00000100 +#define PMCR_PMCLR 0x00002000 +#define PMCR_PMST 0x00004000 +#define PMCR_PMEN 0x00008000 + +static struct sh_pmu sh7750_pmu; + +/* + * There are a number of events supported by each counter (33 in total). + * Since we have 2 counters, each counter will take the event code as it + * corresponds to the PMCR PMM setting. Each counter can be configured + * independently. + * + * Event Code Description + * ---------- ----------- + * + * 0x01 Operand read access + * 0x02 Operand write access + * 0x03 UTLB miss + * 0x04 Operand cache read miss + * 0x05 Operand cache write miss + * 0x06 Instruction fetch (w/ cache) + * 0x07 Instruction TLB miss + * 0x08 Instruction cache miss + * 0x09 All operand accesses + * 0x0a All instruction accesses + * 0x0b OC RAM operand access + * 0x0d On-chip I/O space access + * 0x0e Operand access (r/w) + * 0x0f Operand cache miss (r/w) + * 0x10 Branch instruction + * 0x11 Branch taken + * 0x12 BSR/BSRF/JSR + * 0x13 Instruction execution + * 0x14 Instruction execution in parallel + * 0x15 FPU Instruction execution + * 0x16 Interrupt + * 0x17 NMI + * 0x18 trapa instruction execution + * 0x19 UBCA match + * 0x1a UBCB match + * 0x21 Instruction cache fill + * 0x22 Operand cache fill + * 0x23 Elapsed time + * 0x24 Pipeline freeze by I-cache miss + * 0x25 Pipeline freeze by D-cache miss + * 0x27 Pipeline freeze by branch instruction + * 0x28 Pipeline freeze by CPU register + * 0x29 Pipeline freeze by FPU + */ + +static const int sh7750_general_events[] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0x0023, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x000a, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */ + [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010, + [PERF_COUNT_HW_BRANCH_MISSES] = -1, + [PERF_COUNT_HW_BUS_CYCLES] = -1, +}; + +#define C(x) PERF_COUNT_HW_CACHE_##x + +static const int sh7750_cache_events + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0001, + [ C(RESULT_MISS) ] = 0x0004, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0002, + [ C(RESULT_MISS) ] = 0x0005, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(L1I) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0006, + [ C(RESULT_MISS) ] = 0x0008, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(LL) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0x0003, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0x0007, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + + [ C(BPU) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +static int sh7750_event_map(int event) +{ + return sh7750_general_events[event]; +} + +static u64 sh7750_pmu_read(int idx) +{ + return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) | + __raw_readl(PMCTRL(idx)); +} + +static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx) +{ + unsigned int tmp; + + tmp = __raw_readw(PMCR(idx)); + tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN); + __raw_writew(tmp, PMCR(idx)); +} + +static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx) +{ + __raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx)); + __raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx)); +} + +static void sh7750_pmu_disable_all(void) +{ + int i; + + for (i = 0; i < sh7750_pmu.num_events; i++) + __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i)); +} + +static void sh7750_pmu_enable_all(void) +{ + int i; + + for (i = 0; i < sh7750_pmu.num_events; i++) + __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i)); +} + +static struct sh_pmu sh7750_pmu = { + .name = "sh7750", + .num_events = 2, + .event_map = sh7750_event_map, + .max_events = ARRAY_SIZE(sh7750_general_events), + .raw_event_mask = PMCR_PMM_MASK, + .cache_events = &sh7750_cache_events, + .read = sh7750_pmu_read, + .disable = sh7750_pmu_disable, + .enable = sh7750_pmu_enable, + .disable_all = sh7750_pmu_disable_all, + .enable_all = sh7750_pmu_enable_all, +}; + +static int __init sh7750_pmu_init(void) +{ + /* + * Make sure this CPU actually has perf counters. + */ + if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) { + pr_notice("HW perf events unsupported, software events only.\n"); + return -ENODEV; + } + + return register_sh_pmu(&sh7750_pmu); +} +early_initcall(sh7750_pmu_init); diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 9d28c88d2f9..a521bcf5069 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -3,7 +3,7 @@ * * CPU Subtype Probing for SH-4. * - * Copyright (C) 2001 - 2006 Paul Mundt + * Copyright (C) 2001 - 2007 Paul Mundt * Copyright (C) 2003 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public @@ -15,7 +15,7 @@ #include <asm/processor.h> #include <asm/cache.h> -int __init detect_cpu_and_cache_system(void) +void cpu_probe(void) { unsigned long pvr, prr, cvr; unsigned long size; @@ -28,44 +28,56 @@ int __init detect_cpu_and_cache_system(void) [9] = (1 << 16) }; - pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffffff; - prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff; - cvr = (ctrl_inl(CCN_CVR)); + pvr = (__raw_readl(CCN_PVR) >> 8) & 0xffffff; + prr = (__raw_readl(CCN_PRR) >> 4) & 0xff; + cvr = (__raw_readl(CCN_CVR)); /* * Setup some sane SH-4 defaults for the icache */ - current_cpu_data.icache.way_incr = (1 << 13); - current_cpu_data.icache.entry_shift = 5; - current_cpu_data.icache.sets = 256; - current_cpu_data.icache.ways = 1; - current_cpu_data.icache.linesz = L1_CACHE_BYTES; + boot_cpu_data.icache.way_incr = (1 << 13); + boot_cpu_data.icache.entry_shift = 5; + boot_cpu_data.icache.sets = 256; + boot_cpu_data.icache.ways = 1; + boot_cpu_data.icache.linesz = L1_CACHE_BYTES; /* * And again for the dcache .. */ - current_cpu_data.dcache.way_incr = (1 << 14); - current_cpu_data.dcache.entry_shift = 5; - current_cpu_data.dcache.sets = 512; - current_cpu_data.dcache.ways = 1; - current_cpu_data.dcache.linesz = L1_CACHE_BYTES; + boot_cpu_data.dcache.way_incr = (1 << 14); + boot_cpu_data.dcache.entry_shift = 5; + boot_cpu_data.dcache.sets = 512; + boot_cpu_data.dcache.ways = 1; + boot_cpu_data.dcache.linesz = L1_CACHE_BYTES; + + /* We don't know the chip cut */ + boot_cpu_data.cut_major = boot_cpu_data.cut_minor = -1; /* - * Setup some generic flags we can probe - * (L2 and DSP detection only work on SH-4A) + * Setup some generic flags we can probe on SH-4A parts */ if (((pvr >> 16) & 0xff) == 0x10) { - if ((cvr & 0x02000000) == 0) - current_cpu_data.flags |= CPU_HAS_L2_CACHE; - if ((cvr & 0x10000000) == 0) - current_cpu_data.flags |= CPU_HAS_DSP; + boot_cpu_data.family = CPU_FAMILY_SH4A; + + if ((cvr & 0x10000000) == 0) { + boot_cpu_data.flags |= CPU_HAS_DSP; + boot_cpu_data.family = CPU_FAMILY_SH4AL_DSP; + } - current_cpu_data.flags |= CPU_HAS_LLSC; + boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER; + boot_cpu_data.cut_major = pvr & 0x7f; + + boot_cpu_data.icache.ways = 4; + boot_cpu_data.dcache.ways = 4; + } else { + /* And some SH-4 defaults.. */ + boot_cpu_data.flags |= CPU_HAS_PTEA | CPU_HAS_FPU; + boot_cpu_data.family = CPU_FAMILY_SH4; } - /* FPU detection works for everyone */ - if ((cvr & 0x20000000) == 1) - current_cpu_data.flags |= CPU_HAS_FPU; + /* FPU detection works for almost everyone */ + if ((cvr & 0x20000000)) + boot_cpu_data.flags |= CPU_HAS_FPU; /* Mask off the upper chip ID */ pvr &= 0xffff; @@ -76,178 +88,176 @@ int __init detect_cpu_and_cache_system(void) */ switch (pvr) { case 0x205: - current_cpu_data.type = CPU_SH7750; - current_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | - CPU_HAS_PERF_COUNTER; + boot_cpu_data.type = CPU_SH7750; + boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | + CPU_HAS_PERF_COUNTER; break; case 0x206: - current_cpu_data.type = CPU_SH7750S; - current_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | - CPU_HAS_PERF_COUNTER; + boot_cpu_data.type = CPU_SH7750S; + boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | + CPU_HAS_PERF_COUNTER; break; case 0x1100: - current_cpu_data.type = CPU_SH7751; - current_cpu_data.flags |= CPU_HAS_FPU; - break; - case 0x2000: - current_cpu_data.type = CPU_SH73180; - current_cpu_data.icache.ways = 4; - current_cpu_data.dcache.ways = 4; - current_cpu_data.flags |= CPU_HAS_LLSC; + boot_cpu_data.type = CPU_SH7751; break; case 0x2001: case 0x2004: - current_cpu_data.type = CPU_SH7770; - current_cpu_data.icache.ways = 4; - current_cpu_data.dcache.ways = 4; - - current_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_LLSC; + boot_cpu_data.type = CPU_SH7770; break; case 0x2006: case 0x200A: if (prr == 0x61) - current_cpu_data.type = CPU_SH7781; + boot_cpu_data.type = CPU_SH7781; + else if (prr == 0xa1) + boot_cpu_data.type = CPU_SH7763; else - current_cpu_data.type = CPU_SH7780; - - current_cpu_data.icache.ways = 4; - current_cpu_data.dcache.ways = 4; + boot_cpu_data.type = CPU_SH7780; - current_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER | - CPU_HAS_LLSC; break; case 0x3000: case 0x3003: case 0x3009: - current_cpu_data.type = CPU_SH7343; - current_cpu_data.icache.ways = 4; - current_cpu_data.dcache.ways = 4; - current_cpu_data.flags |= CPU_HAS_LLSC; + boot_cpu_data.type = CPU_SH7343; + break; + case 0x3004: + case 0x3007: + boot_cpu_data.type = CPU_SH7785; + break; + case 0x4004: + case 0x4005: + boot_cpu_data.type = CPU_SH7786; + boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE; break; case 0x3008: - if (prr == 0xa0) { - current_cpu_data.type = CPU_SH7722; - current_cpu_data.icache.ways = 4; - current_cpu_data.dcache.ways = 4; - current_cpu_data.flags |= CPU_HAS_LLSC; + switch (prr) { + case 0x50: + case 0x51: + boot_cpu_data.type = CPU_SH7723; + boot_cpu_data.flags |= CPU_HAS_L2_CACHE; + break; + case 0x70: + boot_cpu_data.type = CPU_SH7366; + break; + case 0xa0: + case 0xa1: + boot_cpu_data.type = CPU_SH7722; + break; } break; - case 0x8000: - current_cpu_data.type = CPU_ST40RA; - current_cpu_data.flags |= CPU_HAS_FPU; + case 0x300b: + switch (prr) { + case 0x20: + boot_cpu_data.type = CPU_SH7724; + boot_cpu_data.flags |= CPU_HAS_L2_CACHE; + break; + case 0x10: + case 0x11: + boot_cpu_data.type = CPU_SH7757; + break; + case 0xd0: + case 0x40: /* yon-ten-go */ + boot_cpu_data.type = CPU_SH7372; + break; + case 0xE0: /* 0x4E0 */ + boot_cpu_data.type = CPU_SH7734; /* SH7733/SH7734 */ + break; + + } break; - case 0x8100: - current_cpu_data.type = CPU_ST40GX1; - current_cpu_data.flags |= CPU_HAS_FPU; + case 0x4000: /* 1st cut */ + case 0x4001: /* 2nd cut */ + boot_cpu_data.type = CPU_SHX3; break; case 0x700: - current_cpu_data.type = CPU_SH4_501; - current_cpu_data.icache.ways = 2; - current_cpu_data.dcache.ways = 2; + boot_cpu_data.type = CPU_SH4_501; + boot_cpu_data.flags &= ~CPU_HAS_FPU; + boot_cpu_data.icache.ways = 2; + boot_cpu_data.dcache.ways = 2; break; case 0x600: - current_cpu_data.type = CPU_SH4_202; - current_cpu_data.icache.ways = 2; - current_cpu_data.dcache.ways = 2; - current_cpu_data.flags |= CPU_HAS_FPU; + boot_cpu_data.type = CPU_SH4_202; + boot_cpu_data.icache.ways = 2; + boot_cpu_data.dcache.ways = 2; break; case 0x500 ... 0x501: switch (prr) { case 0x10: - current_cpu_data.type = CPU_SH7750R; + boot_cpu_data.type = CPU_SH7750R; break; case 0x11: - current_cpu_data.type = CPU_SH7751R; + boot_cpu_data.type = CPU_SH7751R; break; case 0x50 ... 0x5f: - current_cpu_data.type = CPU_SH7760; + boot_cpu_data.type = CPU_SH7760; break; } - current_cpu_data.icache.ways = 2; - current_cpu_data.dcache.ways = 2; + boot_cpu_data.icache.ways = 2; + boot_cpu_data.dcache.ways = 2; - current_cpu_data.flags |= CPU_HAS_FPU; - - break; - default: - current_cpu_data.type = CPU_SH_NONE; break; } -#ifdef CONFIG_SH_DIRECT_MAPPED - current_cpu_data.icache.ways = 1; - current_cpu_data.dcache.ways = 1; -#endif - -#ifdef CONFIG_CPU_HAS_PTEA - current_cpu_data.flags |= CPU_HAS_PTEA; -#endif - /* * On anything that's not a direct-mapped cache, look to the CVR * for I/D-cache specifics. */ - if (current_cpu_data.icache.ways > 1) { + if (boot_cpu_data.icache.ways > 1) { size = sizes[(cvr >> 20) & 0xf]; - current_cpu_data.icache.way_incr = (size >> 1); - current_cpu_data.icache.sets = (size >> 6); + boot_cpu_data.icache.way_incr = (size >> 1); + boot_cpu_data.icache.sets = (size >> 6); } - /* Setup the rest of the I-cache info */ - current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr - - current_cpu_data.icache.linesz; - - current_cpu_data.icache.way_size = current_cpu_data.icache.sets * - current_cpu_data.icache.linesz; - /* And the rest of the D-cache */ - if (current_cpu_data.dcache.ways > 1) { + if (boot_cpu_data.dcache.ways > 1) { size = sizes[(cvr >> 16) & 0xf]; - current_cpu_data.dcache.way_incr = (size >> 1); - current_cpu_data.dcache.sets = (size >> 6); + boot_cpu_data.dcache.way_incr = (size >> 1); + boot_cpu_data.dcache.sets = (size >> 6); } - current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr - - current_cpu_data.dcache.linesz; - - current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets * - current_cpu_data.dcache.linesz; - /* - * Setup the L2 cache desc - * * SH-4A's have an optional PIPT L2. */ - if (current_cpu_data.flags & CPU_HAS_L2_CACHE) { + if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { /* - * Size calculation is much more sensible - * than it is for the L1. - * - * Sizes are 128KB, 258KB, 512KB, and 1MB. + * Verify that it really has something hooked up, this + * is the safety net for CPUs that have optional L2 + * support yet do not implement it. */ - size = (cvr & 0xf) << 17; - - BUG_ON(!size); - - current_cpu_data.scache.way_incr = (1 << 16); - current_cpu_data.scache.entry_shift = 5; - current_cpu_data.scache.ways = 4; - current_cpu_data.scache.linesz = L1_CACHE_BYTES; - - current_cpu_data.scache.entry_mask = - (current_cpu_data.scache.way_incr - - current_cpu_data.scache.linesz); - - current_cpu_data.scache.sets = size / - (current_cpu_data.scache.linesz * - current_cpu_data.scache.ways); - - current_cpu_data.scache.way_size = - (current_cpu_data.scache.sets * - current_cpu_data.scache.linesz); + if ((cvr & 0xf) == 0) + boot_cpu_data.flags &= ~CPU_HAS_L2_CACHE; + else { + /* + * Silicon and specifications have clearly never + * met.. + */ + cvr ^= 0xf; + + /* + * Size calculation is much more sensible + * than it is for the L1. + * + * Sizes are 128KB, 256KB, 512KB, and 1MB. + */ + size = (cvr & 0xf) << 17; + + boot_cpu_data.scache.way_incr = (1 << 16); + boot_cpu_data.scache.entry_shift = 5; + boot_cpu_data.scache.ways = 4; + boot_cpu_data.scache.linesz = L1_CACHE_BYTES; + + boot_cpu_data.scache.entry_mask = + (boot_cpu_data.scache.way_incr - + boot_cpu_data.scache.linesz); + + boot_cpu_data.scache.sets = size / + (boot_cpu_data.scache.linesz * + boot_cpu_data.scache.ways); + + boot_cpu_data.scache.way_size = + (boot_cpu_data.scache.sets * + boot_cpu_data.scache.linesz); + } } - - return 0; } diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c index 6e4e9654135..e7a7b3cdf68 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c @@ -2,6 +2,7 @@ * SH4-202 Setup * * Copyright (C) 2006 Paul Mundt + * Copyright (C) 2009 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -10,29 +11,59 @@ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> -#include <asm/sci.h> - -static struct plat_sci_port sci_platform_data[] = { - { - .mapbase = 0xffe80000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 40, 41, 43, 42 }, - }, { - .flags = 0, - } +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/io.h> + +static struct plat_sci_port scif0_platform_data = { + .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .type = PORT_SCIF, }; -static struct platform_device sci_device = { +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffe80000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x700)), + DEFINE_RES_IRQ(evt2irq(0x720)), + DEFINE_RES_IRQ(evt2irq(0x760)), + DEFINE_RES_IRQ(evt2irq(0x740)), +}; + +static struct platform_device scif0_device = { .name = "sh-sci", - .id = -1, + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), .dev = { - .platform_data = sci_platform_data, + .platform_data = &scif0_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct platform_device *sh4202_devices[] __initdata = { - &sci_device, + &scif0_device, + &tmu0_device, }; static int __init sh4202_devices_setup(void) @@ -40,4 +71,72 @@ static int __init sh4202_devices_setup(void) return platform_add_devices(sh4202_devices, ARRAY_SIZE(sh4202_devices)); } -__initcall(sh4202_devices_setup); +arch_initcall(sh4202_devices_setup); + +static struct platform_device *sh4202_early_devices[] __initdata = { + &scif0_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + early_platform_add_devices(sh4202_early_devices, + ARRAY_SIZE(sh4202_early_devices)); +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */ + HUDI, TMU0, TMU1, TMU2, RTC, SCIF, WDT, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(HUDI, 0x600), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720), + INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760), + INTC_VECT(WDT, 0x560), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, 0, 0, 0 } }, + { 0xffd0000c, 0, 16, 4, /* IPRC */ { 0, 0, SCIF, HUDI } }, + { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh4-202", vectors, NULL, + NULL, prio_registers, NULL); + +static struct intc_vect vectors_irlm[] __initdata = { + INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), + INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), +}; + +static DECLARE_INTC_DESC(intc_desc_irlm, "sh4-202_irlm", vectors_irlm, NULL, + NULL, prio_registers, NULL); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +#define INTC_ICR 0xffd00000UL +#define INTC_ICR_IRLM (1<<7) + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */ + __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); + register_intc_controller(&intc_desc_irlm); + break; + default: + BUG(); + } +} diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 6f8f458912c..5f08c59b9f3 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -1,5 +1,5 @@ /* - * SH7750/SH7751 Setup + * SH7091/SH7750/SH7750S/SH7750R/SH7751/SH7751R Setup * * Copyright (C) 2006 Paul Mundt * Copyright (C) 2006 Jamie Lenehan @@ -12,7 +12,10 @@ #include <linux/init.h> #include <linux/serial.h> #include <linux/io.h> -#include <asm/sci.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/serial_sci.h> +#include <generated/machtypes.h> static struct resource rtc_resources[] = { [0] = { @@ -21,18 +24,8 @@ static struct resource rtc_resources[] = { .flags = IORESOURCE_IO, }, [1] = { - /* Period IRQ */ - .start = 21, - .flags = IORESOURCE_IRQ, - }, - [2] = { - /* Carry IRQ */ - .start = 22, - .flags = IORESOURCE_IRQ, - }, - [3] = { - /* Alarm IRQ */ - .start = 20, + /* Shared Period/Carry/Alarm IRQ */ + .start = evt2irq(0x480), .flags = IORESOURCE_IRQ, }, }; @@ -44,108 +37,330 @@ static struct platform_device rtc_device = { .resource = rtc_resources, }; -static struct plat_sci_port sci_platform_data[] = { - { -#ifndef CONFIG_SH_RTS7751R2D - .mapbase = 0xffe00000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCI, - .irqs = { 23, 24, 25, 0 }, - }, { -#endif - .mapbase = 0xffe80000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 40, 41, 43, 42 }, - }, { - .flags = 0, - } +static struct plat_sci_port sci_platform_data = { + .port_reg = 0xffe0001C, + .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TE | SCSCR_RE, + .type = PORT_SCI, + .regshift = 2, +}; + +static struct resource sci_resources[] = { + DEFINE_RES_MEM(0xffe00000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x4e0)), }; static struct platform_device sci_device = { .name = "sh-sci", - .id = -1, + .id = 0, + .resource = sci_resources, + .num_resources = ARRAY_SIZE(sci_resources), + .dev = { + .platform_data = &sci_platform_data, + }, +}; + +static struct plat_sci_port scif_platform_data = { + .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_TE | SCSCR_RE | SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif_resources[] = { + DEFINE_RES_MEM(0xffe80000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x700)), +}; + +static struct platform_device scif_device = { + .name = "sh-sci", + .id = 1, + .resource = scif_resources, + .num_resources = ARRAY_SIZE(scif_resources), .dev = { - .platform_data = sci_platform_data, + .platform_data = &scif_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +/* SH7750R, SH7751 and SH7751R all have two extra timer channels */ +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751R) + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 3, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xfe100000, 0x20), + DEFINE_RES_IRQ(evt2irq(0xb00)), + DEFINE_RES_IRQ(evt2irq(0xb80)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), }; +#endif + static struct platform_device *sh7750_devices[] __initdata = { &rtc_device, - &sci_device, + &tmu0_device, +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751R) + &tmu1_device, +#endif }; static int __init sh7750_devices_setup(void) { + if (mach_is_rts7751r2d()) { + platform_device_register(&scif_device); + } else { + platform_device_register(&sci_device); + platform_device_register(&scif_device); + } + return platform_add_devices(sh7750_devices, ARRAY_SIZE(sh7750_devices)); } -__initcall(sh7750_devices_setup); - -static struct ipr_data sh7750_ipr_map[] = { - /* IRQ, IPR-idx, shift, priority */ - { 16, 0, 12, 2 }, /* TMU0 TUNI*/ - { 17, 0, 12, 2 }, /* TMU1 TUNI */ - { 18, 0, 4, 2 }, /* TMU2 TUNI */ - { 19, 0, 4, 2 }, /* TMU2 TIPCI */ - { 27, 1, 12, 2 }, /* WDT ITI */ - { 20, 0, 0, 2 }, /* RTC ATI (alarm) */ - { 21, 0, 0, 2 }, /* RTC PRI (period) */ - { 22, 0, 0, 2 }, /* RTC CUI (carry) */ - { 23, 1, 4, 3 }, /* SCI ERI */ - { 24, 1, 4, 3 }, /* SCI RXI */ - { 25, 1, 4, 3 }, /* SCI TXI */ - { 40, 2, 4, 3 }, /* SCIF ERI */ - { 41, 2, 4, 3 }, /* SCIF RXI */ - { 42, 2, 4, 3 }, /* SCIF BRI */ - { 43, 2, 4, 3 }, /* SCIF TXI */ - { 34, 2, 8, 7 }, /* DMAC DMTE0 */ - { 35, 2, 8, 7 }, /* DMAC DMTE1 */ - { 36, 2, 8, 7 }, /* DMAC DMTE2 */ - { 37, 2, 8, 7 }, /* DMAC DMTE3 */ - { 38, 2, 8, 7 }, /* DMAC DMAE */ -}; - -static struct ipr_data sh7751_ipr_map[] = { - { 44, 2, 8, 7 }, /* DMAC DMTE4 */ - { 45, 2, 8, 7 }, /* DMAC DMTE5 */ - { 46, 2, 8, 7 }, /* DMAC DMTE6 */ - { 47, 2, 8, 7 }, /* DMAC DMTE7 */ - /* The following use INTC_INPRI00 for masking, which is a 32-bit - register, not a 16-bit register like the IPRx registers, so it - would need special support */ - /*{ 72, INTPRI00, 8, ? },*/ /* TMU3 TUNI */ - /*{ 76, INTPRI00, 12, ? },*/ /* TMU4 TUNI */ -}; - -static unsigned long ipr_offsets[] = { - 0xffd00004UL, /* 0: IPRA */ - 0xffd00008UL, /* 1: IPRB */ - 0xffd0000cUL, /* 2: IPRC */ - 0xffd00010UL, /* 3: IPRD */ -}; - -/* given the IPR index return the address of the IPR register */ -unsigned int map_ipridx_to_addr(int idx) +arch_initcall(sh7750_devices_setup); + +static struct platform_device *sh7750_early_devices[] __initdata = { + &tmu0_device, +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751R) + &tmu1_device, +#endif +}; + +void __init plat_early_device_setup(void) { - if (idx >= ARRAY_SIZE(ipr_offsets)) - return 0; - return ipr_offsets[idx]; + struct platform_device *dev[1]; + + if (mach_is_rts7751r2d()) { + scif_platform_data.scscr |= SCSCR_CKE1; + dev[0] = &scif_device; + early_platform_add_devices(dev, 1); + } else { + dev[0] = &sci_device; + early_platform_add_devices(dev, 1); + dev[0] = &scif_device; + early_platform_add_devices(dev, 1); + } + + early_platform_add_devices(sh7750_early_devices, + ARRAY_SIZE(sh7750_early_devices)); } -#define INTC_ICR 0xffd00000UL -#define INTC_ICR_IRLM (1<<7) +enum { + UNUSED = 0, + + /* interrupt sources */ + IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */ + HUDI, GPIOI, DMAC, + PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3, + TMU3, TMU4, TMU0, TMU1, TMU2, RTC, SCI1, SCIF, WDT, REF, -/* enable individual interrupt mode for external interupts */ -void ipr_irq_enable_irlm(void) + /* interrupt groups */ + PCIC1, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(SCI1, 0x4e0), INTC_VECT(SCI1, 0x500), + INTC_VECT(SCI1, 0x520), INTC_VECT(SCI1, 0x540), + INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720), + INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760), + INTC_VECT(WDT, 0x560), + INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } }, + { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, SCIF, HUDI } }, + { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, + { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { 0, 0, 0, 0, + TMU4, TMU3, + PCIC1, PCIC0_PCISERR } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, NULL, + NULL, prio_registers, NULL); + +/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */ +#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \ + defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7091) +static struct intc_vect vectors_dma4[] __initdata = { + INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), + INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), + INTC_VECT(DMAC, 0x6c0), +}; + +static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4", + vectors_dma4, NULL, + NULL, prio_registers, NULL); +#endif + +/* SH7750R and SH7751R both have 8-channel DMA controllers */ +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || defined(CONFIG_CPU_SUBTYPE_SH7751R) +static struct intc_vect vectors_dma8[] __initdata = { + INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), + INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), + INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0), + INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0), + INTC_VECT(DMAC, 0x6c0), +}; + +static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8", + vectors_dma8, NULL, + NULL, prio_registers, NULL); +#endif + +/* SH7750R, SH7751 and SH7751R all have two extra timer channels */ +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751R) +static struct intc_vect vectors_tmu34[] __initdata = { + INTC_VECT(TMU3, 0xb00), INTC_VECT(TMU4, 0xb80), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, TMU4, TMU3, + PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, + PCIC1_PCIDMA3, PCIC0_PCISERR } }, +}; + +static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34", + vectors_tmu34, NULL, + mask_registers, prio_registers, NULL); +#endif + +/* SH7750S, SH7750R, SH7751 and SH7751R all have IRLM priority registers */ +static struct intc_vect vectors_irlm[] __initdata = { + INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), + INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), +}; + +static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL, + NULL, prio_registers, NULL); + +/* SH7751 and SH7751R both have PCI */ +#if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R) +static struct intc_vect vectors_pci[] __initdata = { + INTC_VECT(PCIC0_PCISERR, 0xa00), INTC_VECT(PCIC1_PCIERR, 0xae0), + INTC_VECT(PCIC1_PCIPWDWN, 0xac0), INTC_VECT(PCIC1_PCIPWON, 0xaa0), + INTC_VECT(PCIC1_PCIDMA0, 0xa80), INTC_VECT(PCIC1_PCIDMA1, 0xa60), + INTC_VECT(PCIC1_PCIDMA2, 0xa40), INTC_VECT(PCIC1_PCIDMA3, 0xa20), +}; + +static struct intc_group groups_pci[] __initdata = { + INTC_GROUP(PCIC1, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3), +}; + +static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci, + mask_registers, prio_registers, NULL); +#endif + +#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \ + defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ + defined(CONFIG_CPU_SUBTYPE_SH7091) +void __init plat_irq_setup(void) +{ + /* + * same vectors for SH7750, SH7750S and SH7091 except for IRLM, + * see below.. + */ + register_intc_controller(&intc_desc); + register_intc_controller(&intc_desc_dma4); +} +#endif + +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); + register_intc_controller(&intc_desc_dma8); + register_intc_controller(&intc_desc_tmu34); +} +#endif + +#if defined(CONFIG_CPU_SUBTYPE_SH7751) +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); + register_intc_controller(&intc_desc_dma4); + register_intc_controller(&intc_desc_tmu34); + register_intc_controller(&intc_desc_pci); +} +#endif + +#if defined(CONFIG_CPU_SUBTYPE_SH7751R) +void __init plat_irq_setup(void) { - ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); + register_intc_controller(&intc_desc); + register_intc_controller(&intc_desc_dma8); + register_intc_controller(&intc_desc_tmu34); + register_intc_controller(&intc_desc_pci); } +#endif + +#define INTC_ICR 0xffd00000UL +#define INTC_ICR_IRLM (1<<7) -void __init init_IRQ_ipr() +void __init plat_irq_setup_pins(int mode) { - make_ipr_irq(sh7750_ipr_map, ARRAY_SIZE(sh7750_ipr_map)); -#ifdef CONFIG_CPU_SUBTYPE_SH7751 - make_ipr_irq(sh7751_ipr_map, ARRAY_SIZE(sh7751_ipr_map)); +#if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7091) + BUG(); /* impossible to mask interrupts on SH7750 and SH7091 */ + return; #endif + + switch (mode) { + case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */ + __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); + register_intc_controller(&intc_desc_irlm); + break; + default: + BUG(); + } } diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index b7c702821e6..973b736b3b9 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c @@ -10,39 +10,250 @@ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> -#include <asm/sci.h> - -static struct plat_sci_port sci_platform_data[] = { - { - .mapbase = 0xfe600000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 52, 53, 55, 54 }, - }, { - .mapbase = 0xfe610000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 72, 73, 75, 74 }, - }, { - .mapbase = 0xfe620000, - .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCIF, - .irqs = { 76, 77, 79, 78 }, - }, { - .flags = 0, - } +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/serial_sci.h> +#include <linux/io.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRL0, IRL1, IRL2, IRL3, + HUDI, GPIOI, DMAC, + IRQ4, IRQ5, IRQ6, IRQ7, + HCAN20, HCAN21, + SSI0, SSI1, + HAC0, HAC1, + I2C0, I2C1, + USB, LCDC, + DMABRG0, DMABRG1, DMABRG2, + SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, + SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, + SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, + SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI, + HSPI, + MMCIF0, MMCIF1, MMCIF2, MMCIF3, + MFI, ADC, CMT, + TMU0, TMU1, TMU2, + WDT, REF, + + /* interrupt groups */ + DMABRG, SCIF0, SCIF1, SCIF2, SIM, MMCIF, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), + INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), + INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), + INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0), + INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0), + INTC_VECT(DMAC, 0x6c0), + INTC_VECT(IRQ4, 0x800), INTC_VECT(IRQ5, 0x820), + INTC_VECT(IRQ6, 0x840), INTC_VECT(IRQ6, 0x860), + INTC_VECT(HCAN20, 0x900), INTC_VECT(HCAN21, 0x920), + INTC_VECT(SSI0, 0x940), INTC_VECT(SSI1, 0x960), + INTC_VECT(HAC0, 0x980), INTC_VECT(HAC1, 0x9a0), + INTC_VECT(I2C0, 0x9c0), INTC_VECT(I2C1, 0x9e0), + INTC_VECT(USB, 0xa00), INTC_VECT(LCDC, 0xa20), + INTC_VECT(DMABRG0, 0xa80), INTC_VECT(DMABRG1, 0xaa0), + INTC_VECT(DMABRG2, 0xac0), + INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0), + INTC_VECT(SCIF0_BRI, 0x8c0), INTC_VECT(SCIF0_TXI, 0x8e0), + INTC_VECT(SCIF1_ERI, 0xb00), INTC_VECT(SCIF1_RXI, 0xb20), + INTC_VECT(SCIF1_BRI, 0xb40), INTC_VECT(SCIF1_TXI, 0xb60), + INTC_VECT(SCIF2_ERI, 0xb80), INTC_VECT(SCIF2_RXI, 0xba0), + INTC_VECT(SCIF2_BRI, 0xbc0), INTC_VECT(SCIF2_TXI, 0xbe0), + INTC_VECT(SIM_ERI, 0xc00), INTC_VECT(SIM_RXI, 0xc20), + INTC_VECT(SIM_TXI, 0xc40), INTC_VECT(SIM_TEI, 0xc60), + INTC_VECT(HSPI, 0xc80), + INTC_VECT(MMCIF0, 0xd00), INTC_VECT(MMCIF1, 0xd20), + INTC_VECT(MMCIF2, 0xd40), INTC_VECT(MMCIF3, 0xd60), + INTC_VECT(MFI, 0xe80), /* 0xf80 according to data sheet */ + INTC_VECT(ADC, 0xf80), INTC_VECT(CMT, 0xfa0), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(WDT, 0x560), + INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(DMABRG, DMABRG0, DMABRG1, DMABRG2), + INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), + INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), + INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI), + INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI), + INTC_GROUP(MMCIF, MMCIF0, MMCIF1, MMCIF2, MMCIF3), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ + { IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21, + SSI0, SSI1, HAC0, HAC1, I2C0, I2C1, USB, LCDC, + 0, DMABRG0, DMABRG1, DMABRG2, + SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, + SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, + SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, } }, + { 0xfe080044, 0xfe080064, 32, /* INTMSK04 / INTMSKCLR04 */ + { 0, 0, 0, 0, 0, 0, 0, 0, + SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI, + HSPI, MMCIF0, MMCIF1, MMCIF2, + MMCIF3, 0, 0, 0, 0, 0, 0, 0, + 0, MFI, 0, 0, 0, 0, ADC, CMT, } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, + { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } }, + { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, 0, HUDI } }, + { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, + { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfe080004, 0, 32, 4, /* INTPRI04 */ { HCAN20, HCAN21, SSI0, SSI1, + HAC0, HAC1, I2C0, I2C1 } }, + { 0xfe080008, 0, 32, 4, /* INTPRI08 */ { USB, LCDC, DMABRG, SCIF0, + SCIF1, SCIF2, SIM, HSPI } }, + { 0xfe08000c, 0, 32, 4, /* INTPRI0C */ { 0, 0, MMCIF, 0, + MFI, 0, ADC, CMT } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups, + mask_registers, prio_registers, NULL); + +static struct intc_vect vectors_irq[] __initdata = { + INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), + INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), +}; + +static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups, + mask_registers, prio_registers, NULL); + +static struct plat_sci_port scif0_platform_data = { + .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xfe600000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x880)), + DEFINE_RES_IRQ(evt2irq(0x8a0)), + DEFINE_RES_IRQ(evt2irq(0x8e0)), + DEFINE_RES_IRQ(evt2irq(0x8c0)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xfe610000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xb00)), + DEFINE_RES_IRQ(evt2irq(0xb20)), + DEFINE_RES_IRQ(evt2irq(0xb60)), + DEFINE_RES_IRQ(evt2irq(0xb40)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xfe620000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xb80)), + DEFINE_RES_IRQ(evt2irq(0xba0)), + DEFINE_RES_IRQ(evt2irq(0xbe0)), + DEFINE_RES_IRQ(evt2irq(0xbc0)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .flags = UPF_BOOT_AUTOCONF, + .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, + .type = PORT_SCI, + .regshift = 2, }; -static struct platform_device sci_device = { +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xfe480000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc00)), + DEFINE_RES_IRQ(evt2irq(0xc20)), + DEFINE_RES_IRQ(evt2irq(0xc40)), +}; + +static struct platform_device scif3_device = { .name = "sh-sci", - .id = -1, + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), .dev = { - .platform_data = sci_platform_data, + .platform_data = &scif3_platform_data, }, }; +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + + static struct platform_device *sh7760_devices[] __initdata = { - &sci_device, + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &tmu0_device, }; static int __init sh7760_devices_setup(void) @@ -50,103 +261,38 @@ static int __init sh7760_devices_setup(void) return platform_add_devices(sh7760_devices, ARRAY_SIZE(sh7760_devices)); } -__initcall(sh7760_devices_setup); - -static struct intc2_data intc2_irq_table[] = { - {48, 0, 28, 0, 31, 3}, /* IRQ 4 */ - {49, 0, 24, 0, 30, 3}, /* IRQ 3 */ - {50, 0, 20, 0, 29, 3}, /* IRQ 2 */ - {51, 0, 16, 0, 28, 3}, /* IRQ 1 */ - {56, 4, 28, 0, 25, 3}, /* HCAN2_CHAN0 */ - {57, 4, 24, 0, 24, 3}, /* HCAN2_CHAN1 */ - {58, 4, 20, 0, 23, 3}, /* I2S_CHAN0 */ - {59, 4, 16, 0, 22, 3}, /* I2S_CHAN1 */ - {60, 4, 12, 0, 21, 3}, /* AC97_CHAN0 */ - {61, 4, 8, 0, 20, 3}, /* AC97_CHAN1 */ - {62, 4, 4, 0, 19, 3}, /* I2C_CHAN0 */ - {63, 4, 0, 0, 18, 3}, /* I2C_CHAN1 */ - {52, 8, 16, 0, 11, 3}, /* SCIF0_ERI_IRQ */ - {53, 8, 16, 0, 10, 3}, /* SCIF0_RXI_IRQ */ - {54, 8, 16, 0, 9, 3}, /* SCIF0_BRI_IRQ */ - {55, 8, 16, 0, 8, 3}, /* SCIF0_TXI_IRQ */ - {64, 8, 28, 0, 17, 3}, /* USBHI_IRQ */ - {65, 8, 24, 0, 16, 3}, /* LCDC */ - {68, 8, 20, 0, 14, 13}, /* DMABRGI0_IRQ */ - {69, 8, 20, 0, 13, 13}, /* DMABRGI1_IRQ */ - {70, 8, 20, 0, 12, 13}, /* DMABRGI2_IRQ */ - {72, 8, 12, 0, 7, 3}, /* SCIF1_ERI_IRQ */ - {73, 8, 12, 0, 6, 3}, /* SCIF1_RXI_IRQ */ - {74, 8, 12, 0, 5, 3}, /* SCIF1_BRI_IRQ */ - {75, 8, 12, 0, 4, 3}, /* SCIF1_TXI_IRQ */ - {76, 8, 8, 0, 3, 3}, /* SCIF2_ERI_IRQ */ - {77, 8, 8, 0, 2, 3}, /* SCIF2_RXI_IRQ */ - {78, 8, 8, 0, 1, 3}, /* SCIF2_BRI_IRQ */ - {79, 8, 8, 0, 0, 3}, /* SCIF2_TXI_IRQ */ - {80, 8, 4, 4, 23, 3}, /* SIM_ERI */ - {81, 8, 4, 4, 22, 3}, /* SIM_RXI */ - {82, 8, 4, 4, 21, 3}, /* SIM_TXI */ - {83, 8, 4, 4, 20, 3}, /* SIM_TEI */ - {84, 8, 0, 4, 19, 3}, /* HSPII */ - {88, 12, 20, 4, 18, 3}, /* MMCI0 */ - {89, 12, 20, 4, 17, 3}, /* MMCI1 */ - {90, 12, 20, 4, 16, 3}, /* MMCI2 */ - {91, 12, 20, 4, 15, 3}, /* MMCI3 */ - {92, 12, 12, 4, 6, 3}, /* MFI */ - {108,12, 4, 4, 1, 3}, /* ADC */ - {109,12, 0, 4, 0, 3}, /* CMTI */ -}; - -static struct ipr_data sh7760_ipr_map[] = { - /* IRQ, IPR-idx, shift, priority */ - { 16, 0, 12, 2 }, /* TMU0 TUNI*/ - { 17, 0, 8, 2 }, /* TMU1 TUNI */ - { 18, 0, 4, 2 }, /* TMU2 TUNI */ - { 19, 0, 4, 2 }, /* TMU2 TIPCI */ - { 27, 1, 12, 2 }, /* WDT ITI */ - { 28, 1, 8, 2 }, /* REF RCMI */ - { 29, 1, 8, 2 }, /* REF ROVI */ - { 32, 2, 0, 7 }, /* HUDI */ - { 33, 2, 12, 7 }, /* GPIOI */ - { 34, 2, 8, 7 }, /* DMAC DMTE0 */ - { 35, 2, 8, 7 }, /* DMAC DMTE1 */ - { 36, 2, 8, 7 }, /* DMAC DMTE2 */ - { 37, 2, 8, 7 }, /* DMAC DMTE3 */ - { 38, 2, 8, 7 }, /* DMAC DMAE */ - { 44, 2, 8, 7 }, /* DMAC DMTE4 */ - { 45, 2, 8, 7 }, /* DMAC DMTE5 */ - { 46, 2, 8, 7 }, /* DMAC DMTE6 */ - { 47, 2, 8, 7 }, /* DMAC DMTE7 */ -/* these here are only valid if INTC_ICR bit 7 is set to 1! - * XXX: maybe CONFIG_SH_IRLMODE symbol? SH7751 could use it too */ -#if 0 - { 2, 3, 12, 3 }, /* IRL0 */ - { 5, 3, 8, 3 }, /* IRL1 */ - { 8, 3, 4, 3 }, /* IRL2 */ - { 11, 3, 0, 3 }, /* IRL3 */ -#endif -}; - -static unsigned long ipr_offsets[] = { - 0xffd00004UL, /* 0: IPRA */ - 0xffd00008UL, /* 1: IPRB */ - 0xffd0000cUL, /* 2: IPRC */ - 0xffd00010UL, /* 3: IPRD */ -}; - -/* given the IPR index return the address of the IPR register */ -unsigned int map_ipridx_to_addr(int idx) +arch_initcall(sh7760_devices_setup); + +static struct platform_device *sh7760_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) { - if (idx >= ARRAY_SIZE(ipr_offsets)) - return 0; - return ipr_offsets[idx]; + early_platform_add_devices(sh7760_early_devices, + ARRAY_SIZE(sh7760_early_devices)); } -void __init init_IRQ_intc2(void) +#define INTC_ICR 0xffd00000UL +#define INTC_ICR_IRLM (1 << 7) + +void __init plat_irq_setup_pins(int mode) { - make_intc2_irq(intc2_irq_table, ARRAY_SIZE(intc2_irq_table)); + switch (mode) { + case IRQ_MODE_IRQ: + __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); + register_intc_controller(&intc_desc_irq); + break; + default: + BUG(); + } } -void __init init_IRQ_ipr(void) +void __init plat_irq_setup(void) { - make_ipr_irq(sh7760_ipr_map, ARRAY_SIZE(sh7760_ipr_map)); + register_intc_controller(&intc_desc); } diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c new file mode 100644 index 00000000000..42edf2e54e8 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/softfloat.c @@ -0,0 +1,930 @@ +/* + * Floating point emulation support for subnormalised numbers on SH4 + * architecture This file is derived from the SoftFloat IEC/IEEE + * Floating-point Arithmetic Package, Release 2 the original license of + * which is reproduced below. + * + * ======================================================================== + * + * This C source file is part of the SoftFloat IEC/IEEE Floating-point + * Arithmetic Package, Release 2. + * + * Written by John R. Hauser. This work was made possible in part by the + * International Computer Science Institute, located at Suite 600, 1947 Center + * Street, Berkeley, California 94704. Funding was partially provided by the + * National Science Foundation under grant MIP-9311980. The original version + * of this code was written as part of a project to build a fixed-point vector + * processor in collaboration with the University of California at Berkeley, + * overseen by Profs. Nelson Morgan and John Wawrzynek. More information + * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ + * arithmetic/softfloat.html'. + * + * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort + * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT + * TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO + * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY + * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + * + * Derivative works are acceptable, even for commercial purposes, so long as + * (1) they include prominent notice that the work is derivative, and (2) they + * include prominent notice akin to these three paragraphs for those parts of + * this code that are retained. + * + * ======================================================================== + * + * SH4 modifications by Ismail Dhaoui <ismail.dhaoui@st.com> + * and Kamel Khelifi <kamel.khelifi@st.com> + */ +#include <linux/kernel.h> +#include <cpu/fpu.h> +#include <asm/div64.h> + +#define LIT64( a ) a##LL + +typedef char flag; +typedef unsigned char uint8; +typedef signed char int8; +typedef int uint16; +typedef int int16; +typedef unsigned int uint32; +typedef signed int int32; + +typedef unsigned long long int bits64; +typedef signed long long int sbits64; + +typedef unsigned char bits8; +typedef signed char sbits8; +typedef unsigned short int bits16; +typedef signed short int sbits16; +typedef unsigned int bits32; +typedef signed int sbits32; + +typedef unsigned long long int uint64; +typedef signed long long int int64; + +typedef unsigned long int float32; +typedef unsigned long long float64; + +extern void float_raise(unsigned int flags); /* in fpu.c */ +extern int float_rounding_mode(void); /* in fpu.c */ + +bits64 extractFloat64Frac(float64 a); +flag extractFloat64Sign(float64 a); +int16 extractFloat64Exp(float64 a); +int16 extractFloat32Exp(float32 a); +flag extractFloat32Sign(float32 a); +bits32 extractFloat32Frac(float32 a); +float64 packFloat64(flag zSign, int16 zExp, bits64 zSig); +void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr); +float32 packFloat32(flag zSign, int16 zExp, bits32 zSig); +void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr); +float64 float64_sub(float64 a, float64 b); +float32 float32_sub(float32 a, float32 b); +float32 float32_add(float32 a, float32 b); +float64 float64_add(float64 a, float64 b); +float64 float64_div(float64 a, float64 b); +float32 float32_div(float32 a, float32 b); +float32 float32_mul(float32 a, float32 b); +float64 float64_mul(float64 a, float64 b); +float32 float64_to_float32(float64 a); +void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, + bits64 * z1Ptr); +void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, + bits64 * z1Ptr); +void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr); + +static int8 countLeadingZeros32(bits32 a); +static int8 countLeadingZeros64(bits64 a); +static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, + bits64 zSig); +static float64 subFloat64Sigs(float64 a, float64 b, flag zSign); +static float64 addFloat64Sigs(float64 a, float64 b, flag zSign); +static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig); +static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, + bits32 zSig); +static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig); +static float32 subFloat32Sigs(float32 a, float32 b, flag zSign); +static float32 addFloat32Sigs(float32 a, float32 b, flag zSign); +static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, + bits64 * zSigPtr); +static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b); +static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr, + bits32 * zSigPtr); + +bits64 extractFloat64Frac(float64 a) +{ + return a & LIT64(0x000FFFFFFFFFFFFF); +} + +flag extractFloat64Sign(float64 a) +{ + return a >> 63; +} + +int16 extractFloat64Exp(float64 a) +{ + return (a >> 52) & 0x7FF; +} + +int16 extractFloat32Exp(float32 a) +{ + return (a >> 23) & 0xFF; +} + +flag extractFloat32Sign(float32 a) +{ + return a >> 31; +} + +bits32 extractFloat32Frac(float32 a) +{ + return a & 0x007FFFFF; +} + +float64 packFloat64(flag zSign, int16 zExp, bits64 zSig) +{ + return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig; +} + +void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr) +{ + bits64 z; + + if (count == 0) { + z = a; + } else if (count < 64) { + z = (a >> count) | ((a << ((-count) & 63)) != 0); + } else { + z = (a != 0); + } + *zPtr = z; +} + +static int8 countLeadingZeros32(bits32 a) +{ + static const int8 countLeadingZerosHigh[] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + int8 shiftCount; + + shiftCount = 0; + if (a < 0x10000) { + shiftCount += 16; + a <<= 16; + } + if (a < 0x1000000) { + shiftCount += 8; + a <<= 8; + } + shiftCount += countLeadingZerosHigh[a >> 24]; + return shiftCount; + +} + +static int8 countLeadingZeros64(bits64 a) +{ + int8 shiftCount; + + shiftCount = 0; + if (a < ((bits64) 1) << 32) { + shiftCount += 32; + } else { + a >>= 32; + } + shiftCount += countLeadingZeros32(a); + return shiftCount; + +} + +static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros64(zSig) - 1; + return roundAndPackFloat64(zSign, zExp - shiftCount, + zSig << shiftCount); + +} + +static float64 subFloat64Sigs(float64 a, float64 b, flag zSign) +{ + int16 aExp, bExp, zExp; + bits64 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + bSig = extractFloat64Frac(b); + bExp = extractFloat64Exp(b); + expDiff = aExp - bExp; + aSig <<= 10; + bSig <<= 10; + if (0 < expDiff) + goto aExpBigger; + if (expDiff < 0) + goto bExpBigger; + if (aExp == 0) { + aExp = 1; + bExp = 1; + } + if (bSig < aSig) + goto aBigger; + if (aSig < bSig) + goto bBigger; + return packFloat64(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0); + bExpBigger: + if (bExp == 0x7FF) { + return packFloat64(zSign ^ 1, 0x7FF, 0); + } + if (aExp == 0) { + ++expDiff; + } else { + aSig |= LIT64(0x4000000000000000); + } + shift64RightJamming(aSig, -expDiff, &aSig); + bSig |= LIT64(0x4000000000000000); + bBigger: + zSig = bSig - aSig; + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if (aExp == 0x7FF) { + return a; + } + if (bExp == 0) { + --expDiff; + } else { + bSig |= LIT64(0x4000000000000000); + } + shift64RightJamming(bSig, expDiff, &bSig); + aSig |= LIT64(0x4000000000000000); + aBigger: + zSig = aSig - bSig; + zExp = aExp; + normalizeRoundAndPack: + --zExp; + return normalizeRoundAndPackFloat64(zSign, zExp, zSig); + +} +static float64 addFloat64Sigs(float64 a, float64 b, flag zSign) +{ + int16 aExp, bExp, zExp; + bits64 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + bSig = extractFloat64Frac(b); + bExp = extractFloat64Exp(b); + expDiff = aExp - bExp; + aSig <<= 9; + bSig <<= 9; + if (0 < expDiff) { + if (aExp == 0x7FF) { + return a; + } + if (bExp == 0) { + --expDiff; + } else { + bSig |= LIT64(0x2000000000000000); + } + shift64RightJamming(bSig, expDiff, &bSig); + zExp = aExp; + } else if (expDiff < 0) { + if (bExp == 0x7FF) { + return packFloat64(zSign, 0x7FF, 0); + } + if (aExp == 0) { + ++expDiff; + } else { + aSig |= LIT64(0x2000000000000000); + } + shift64RightJamming(aSig, -expDiff, &aSig); + zExp = bExp; + } else { + if (aExp == 0x7FF) { + return a; + } + if (aExp == 0) + return packFloat64(zSign, 0, (aSig + bSig) >> 9); + zSig = LIT64(0x4000000000000000) + aSig + bSig; + zExp = aExp; + goto roundAndPack; + } + aSig |= LIT64(0x2000000000000000); + zSig = (aSig + bSig) << 1; + --zExp; + if ((sbits64) zSig < 0) { + zSig = aSig + bSig; + ++zExp; + } + roundAndPack: + return roundAndPackFloat64(zSign, zExp, zSig); + +} + +float32 packFloat32(flag zSign, int16 zExp, bits32 zSig) +{ + return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig; +} + +void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr) +{ + bits32 z; + if (count == 0) { + z = a; + } else if (count < 32) { + z = (a >> count) | ((a << ((-count) & 31)) != 0); + } else { + z = (a != 0); + } + *zPtr = z; +} + +static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig) +{ + flag roundNearestEven; + int8 roundIncrement, roundBits; + flag isTiny; + + /* SH4 has only 2 rounding modes - round to nearest and round to zero */ + roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST); + roundIncrement = 0x40; + if (!roundNearestEven) { + roundIncrement = 0; + } + roundBits = zSig & 0x7F; + if (0xFD <= (bits16) zExp) { + if ((0xFD < zExp) + || ((zExp == 0xFD) + && ((sbits32) (zSig + roundIncrement) < 0)) + ) { + float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT); + return packFloat32(zSign, 0xFF, + 0) - (roundIncrement == 0); + } + if (zExp < 0) { + isTiny = (zExp < -1) + || (zSig + roundIncrement < 0x80000000); + shift32RightJamming(zSig, -zExp, &zSig); + zExp = 0; + roundBits = zSig & 0x7F; + if (isTiny && roundBits) + float_raise(FPSCR_CAUSE_UNDERFLOW); + } + } + if (roundBits) + float_raise(FPSCR_CAUSE_INEXACT); + zSig = (zSig + roundIncrement) >> 7; + zSig &= ~(((roundBits ^ 0x40) == 0) & roundNearestEven); + if (zSig == 0) + zExp = 0; + return packFloat32(zSign, zExp, zSig); + +} + +static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros32(zSig) - 1; + return roundAndPackFloat32(zSign, zExp - shiftCount, + zSig << shiftCount); +} + +static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig) +{ + flag roundNearestEven; + int16 roundIncrement, roundBits; + flag isTiny; + + /* SH4 has only 2 rounding modes - round to nearest and round to zero */ + roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST); + roundIncrement = 0x200; + if (!roundNearestEven) { + roundIncrement = 0; + } + roundBits = zSig & 0x3FF; + if (0x7FD <= (bits16) zExp) { + if ((0x7FD < zExp) + || ((zExp == 0x7FD) + && ((sbits64) (zSig + roundIncrement) < 0)) + ) { + float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT); + return packFloat64(zSign, 0x7FF, + 0) - (roundIncrement == 0); + } + if (zExp < 0) { + isTiny = (zExp < -1) + || (zSig + roundIncrement < + LIT64(0x8000000000000000)); + shift64RightJamming(zSig, -zExp, &zSig); + zExp = 0; + roundBits = zSig & 0x3FF; + if (isTiny && roundBits) + float_raise(FPSCR_CAUSE_UNDERFLOW); + } + } + if (roundBits) + float_raise(FPSCR_CAUSE_INEXACT); + zSig = (zSig + roundIncrement) >> 10; + zSig &= ~(((roundBits ^ 0x200) == 0) & roundNearestEven); + if (zSig == 0) + zExp = 0; + return packFloat64(zSign, zExp, zSig); + +} + +static float32 subFloat32Sigs(float32 a, float32 b, flag zSign) +{ + int16 aExp, bExp, zExp; + bits32 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + bSig = extractFloat32Frac(b); + bExp = extractFloat32Exp(b); + expDiff = aExp - bExp; + aSig <<= 7; + bSig <<= 7; + if (0 < expDiff) + goto aExpBigger; + if (expDiff < 0) + goto bExpBigger; + if (aExp == 0) { + aExp = 1; + bExp = 1; + } + if (bSig < aSig) + goto aBigger; + if (aSig < bSig) + goto bBigger; + return packFloat32(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0); + bExpBigger: + if (bExp == 0xFF) { + return packFloat32(zSign ^ 1, 0xFF, 0); + } + if (aExp == 0) { + ++expDiff; + } else { + aSig |= 0x40000000; + } + shift32RightJamming(aSig, -expDiff, &aSig); + bSig |= 0x40000000; + bBigger: + zSig = bSig - aSig; + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if (aExp == 0xFF) { + return a; + } + if (bExp == 0) { + --expDiff; + } else { + bSig |= 0x40000000; + } + shift32RightJamming(bSig, expDiff, &bSig); + aSig |= 0x40000000; + aBigger: + zSig = aSig - bSig; + zExp = aExp; + normalizeRoundAndPack: + --zExp; + return normalizeRoundAndPackFloat32(zSign, zExp, zSig); + +} + +static float32 addFloat32Sigs(float32 a, float32 b, flag zSign) +{ + int16 aExp, bExp, zExp; + bits32 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + bSig = extractFloat32Frac(b); + bExp = extractFloat32Exp(b); + expDiff = aExp - bExp; + aSig <<= 6; + bSig <<= 6; + if (0 < expDiff) { + if (aExp == 0xFF) { + return a; + } + if (bExp == 0) { + --expDiff; + } else { + bSig |= 0x20000000; + } + shift32RightJamming(bSig, expDiff, &bSig); + zExp = aExp; + } else if (expDiff < 0) { + if (bExp == 0xFF) { + return packFloat32(zSign, 0xFF, 0); + } + if (aExp == 0) { + ++expDiff; + } else { + aSig |= 0x20000000; + } + shift32RightJamming(aSig, -expDiff, &aSig); + zExp = bExp; + } else { + if (aExp == 0xFF) { + return a; + } + if (aExp == 0) + return packFloat32(zSign, 0, (aSig + bSig) >> 6); + zSig = 0x40000000 + aSig + bSig; + zExp = aExp; + goto roundAndPack; + } + aSig |= 0x20000000; + zSig = (aSig + bSig) << 1; + --zExp; + if ((sbits32) zSig < 0) { + zSig = aSig + bSig; + ++zExp; + } + roundAndPack: + return roundAndPackFloat32(zSign, zExp, zSig); + +} + +float64 float64_sub(float64 a, float64 b) +{ + flag aSign, bSign; + + aSign = extractFloat64Sign(a); + bSign = extractFloat64Sign(b); + if (aSign == bSign) { + return subFloat64Sigs(a, b, aSign); + } else { + return addFloat64Sigs(a, b, aSign); + } + +} + +float32 float32_sub(float32 a, float32 b) +{ + flag aSign, bSign; + + aSign = extractFloat32Sign(a); + bSign = extractFloat32Sign(b); + if (aSign == bSign) { + return subFloat32Sigs(a, b, aSign); + } else { + return addFloat32Sigs(a, b, aSign); + } + +} + +float32 float32_add(float32 a, float32 b) +{ + flag aSign, bSign; + + aSign = extractFloat32Sign(a); + bSign = extractFloat32Sign(b); + if (aSign == bSign) { + return addFloat32Sigs(a, b, aSign); + } else { + return subFloat32Sigs(a, b, aSign); + } + +} + +float64 float64_add(float64 a, float64 b) +{ + flag aSign, bSign; + + aSign = extractFloat64Sign(a); + bSign = extractFloat64Sign(b); + if (aSign == bSign) { + return addFloat64Sigs(a, b, aSign); + } else { + return subFloat64Sigs(a, b, aSign); + } +} + +static void +normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros64(aSig) - 11; + *zSigPtr = aSig << shiftCount; + *zExpPtr = 1 - shiftCount; +} + +void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, + bits64 * z1Ptr) +{ + bits64 z1; + + z1 = a1 + b1; + *z1Ptr = z1; + *z0Ptr = a0 + b0 + (z1 < a1); +} + +void +sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, + bits64 * z1Ptr) +{ + *z1Ptr = a1 - b1; + *z0Ptr = a0 - b0 - (a1 < b1); +} + +static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b) +{ + bits64 b0, b1; + bits64 rem0, rem1, term0, term1; + bits64 z, tmp; + if (b <= a0) + return LIT64(0xFFFFFFFFFFFFFFFF); + b0 = b >> 32; + tmp = a0; + do_div(tmp, b0); + + z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : tmp << 32; + mul64To128(b, z, &term0, &term1); + sub128(a0, a1, term0, term1, &rem0, &rem1); + while (((sbits64) rem0) < 0) { + z -= LIT64(0x100000000); + b1 = b << 32; + add128(rem0, rem1, b0, b1, &rem0, &rem1); + } + rem0 = (rem0 << 32) | (rem1 >> 32); + tmp = rem0; + do_div(tmp, b0); + z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : tmp; + return z; +} + +void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr) +{ + bits32 aHigh, aLow, bHigh, bLow; + bits64 z0, zMiddleA, zMiddleB, z1; + + aLow = a; + aHigh = a >> 32; + bLow = b; + bHigh = b >> 32; + z1 = ((bits64) aLow) * bLow; + zMiddleA = ((bits64) aLow) * bHigh; + zMiddleB = ((bits64) aHigh) * bLow; + z0 = ((bits64) aHigh) * bHigh; + zMiddleA += zMiddleB; + z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32); + zMiddleA <<= 32; + z1 += zMiddleA; + z0 += (z1 < zMiddleA); + *z1Ptr = z1; + *z0Ptr = z0; + +} + +static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr, + bits32 * zSigPtr) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros32(aSig) - 8; + *zSigPtr = aSig << shiftCount; + *zExpPtr = 1 - shiftCount; + +} + +float64 float64_div(float64 a, float64 b) +{ + flag aSign, bSign, zSign; + int16 aExp, bExp, zExp; + bits64 aSig, bSig, zSig; + bits64 rem0, rem1; + bits64 term0, term1; + + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + aSign = extractFloat64Sign(a); + bSig = extractFloat64Frac(b); + bExp = extractFloat64Exp(b); + bSign = extractFloat64Sign(b); + zSign = aSign ^ bSign; + if (aExp == 0x7FF) { + if (bExp == 0x7FF) { + } + return packFloat64(zSign, 0x7FF, 0); + } + if (bExp == 0x7FF) { + return packFloat64(zSign, 0, 0); + } + if (bExp == 0) { + if (bSig == 0) { + if ((aExp | aSig) == 0) { + float_raise(FPSCR_CAUSE_INVALID); + } + return packFloat64(zSign, 0x7FF, 0); + } + normalizeFloat64Subnormal(bSig, &bExp, &bSig); + } + if (aExp == 0) { + if (aSig == 0) + return packFloat64(zSign, 0, 0); + normalizeFloat64Subnormal(aSig, &aExp, &aSig); + } + zExp = aExp - bExp + 0x3FD; + aSig = (aSig | LIT64(0x0010000000000000)) << 10; + bSig = (bSig | LIT64(0x0010000000000000)) << 11; + if (bSig <= (aSig + aSig)) { + aSig >>= 1; + ++zExp; + } + zSig = estimateDiv128To64(aSig, 0, bSig); + if ((zSig & 0x1FF) <= 2) { + mul64To128(bSig, zSig, &term0, &term1); + sub128(aSig, 0, term0, term1, &rem0, &rem1); + while ((sbits64) rem0 < 0) { + --zSig; + add128(rem0, rem1, 0, bSig, &rem0, &rem1); + } + zSig |= (rem1 != 0); + } + return roundAndPackFloat64(zSign, zExp, zSig); + +} + +float32 float32_div(float32 a, float32 b) +{ + flag aSign, bSign, zSign; + int16 aExp, bExp, zExp; + bits32 aSig, bSig; + uint64_t zSig; + + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + aSign = extractFloat32Sign(a); + bSig = extractFloat32Frac(b); + bExp = extractFloat32Exp(b); + bSign = extractFloat32Sign(b); + zSign = aSign ^ bSign; + if (aExp == 0xFF) { + if (bExp == 0xFF) { + } + return packFloat32(zSign, 0xFF, 0); + } + if (bExp == 0xFF) { + return packFloat32(zSign, 0, 0); + } + if (bExp == 0) { + if (bSig == 0) { + return packFloat32(zSign, 0xFF, 0); + } + normalizeFloat32Subnormal(bSig, &bExp, &bSig); + } + if (aExp == 0) { + if (aSig == 0) + return packFloat32(zSign, 0, 0); + normalizeFloat32Subnormal(aSig, &aExp, &aSig); + } + zExp = aExp - bExp + 0x7D; + aSig = (aSig | 0x00800000) << 7; + bSig = (bSig | 0x00800000) << 8; + if (bSig <= (aSig + aSig)) { + aSig >>= 1; + ++zExp; + } + zSig = (((bits64) aSig) << 32); + do_div(zSig, bSig); + + if ((zSig & 0x3F) == 0) { + zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32); + } + return roundAndPackFloat32(zSign, zExp, (bits32)zSig); + +} + +float32 float32_mul(float32 a, float32 b) +{ + char aSign, bSign, zSign; + int aExp, bExp, zExp; + unsigned int aSig, bSig; + unsigned long long zSig64; + unsigned int zSig; + + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + aSign = extractFloat32Sign(a); + bSig = extractFloat32Frac(b); + bExp = extractFloat32Exp(b); + bSign = extractFloat32Sign(b); + zSign = aSign ^ bSign; + if (aExp == 0) { + if (aSig == 0) + return packFloat32(zSign, 0, 0); + normalizeFloat32Subnormal(aSig, &aExp, &aSig); + } + if (bExp == 0) { + if (bSig == 0) + return packFloat32(zSign, 0, 0); + normalizeFloat32Subnormal(bSig, &bExp, &bSig); + } + if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0)) + return roundAndPackFloat32(zSign, 0xff, 0); + + zExp = aExp + bExp - 0x7F; + aSig = (aSig | 0x00800000) << 7; + bSig = (bSig | 0x00800000) << 8; + shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64); + zSig = zSig64; + if (0 <= (signed int)(zSig << 1)) { + zSig <<= 1; + --zExp; + } + return roundAndPackFloat32(zSign, zExp, zSig); + +} + +float64 float64_mul(float64 a, float64 b) +{ + char aSign, bSign, zSign; + int aExp, bExp, zExp; + unsigned long long int aSig, bSig, zSig0, zSig1; + + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + aSign = extractFloat64Sign(a); + bSig = extractFloat64Frac(b); + bExp = extractFloat64Exp(b); + bSign = extractFloat64Sign(b); + zSign = aSign ^ bSign; + + if (aExp == 0) { + if (aSig == 0) + return packFloat64(zSign, 0, 0); + normalizeFloat64Subnormal(aSig, &aExp, &aSig); + } + if (bExp == 0) { + if (bSig == 0) + return packFloat64(zSign, 0, 0); + normalizeFloat64Subnormal(bSig, &bExp, &bSig); + } + if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0)) + return roundAndPackFloat64(zSign, 0x7ff, 0); + + zExp = aExp + bExp - 0x3FF; + aSig = (aSig | 0x0010000000000000LL) << 10; + bSig = (bSig | 0x0010000000000000LL) << 11; + mul64To128(aSig, bSig, &zSig0, &zSig1); + zSig0 |= (zSig1 != 0); + if (0 <= (signed long long int)(zSig0 << 1)) { + zSig0 <<= 1; + --zExp; + } + return roundAndPackFloat64(zSign, zExp, zSig0); +} + +/* + * ------------------------------------------------------------------------------- + * Returns the result of converting the double-precision floating-point value + * `a' to the single-precision floating-point format. The conversion is + * performed according to the IEC/IEEE Standard for Binary Floating-point + * Arithmetic. + * ------------------------------------------------------------------------------- + * */ +float32 float64_to_float32(float64 a) +{ + flag aSign; + int16 aExp; + bits64 aSig; + bits32 zSig; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + + shift64RightJamming( aSig, 22, &aSig ); + zSig = aSig; + if ( aExp || zSig ) { + zSig |= 0x40000000; + aExp -= 0x381; + } + return roundAndPackFloat32(aSign, aExp, zSig); +} diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index d7fff752e56..0a47bd3e7be 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -13,16 +13,17 @@ #include <linux/init.h> #include <linux/cpu.h> #include <linux/bitmap.h> -#include <linux/sysdev.h> +#include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/io.h> +#include <linux/prefetch.h> #include <asm/page.h> #include <asm/cacheflush.h> -#include <asm/cpu/sq.h> +#include <cpu/sq.h> struct sq_mapping; @@ -43,9 +44,9 @@ static unsigned long *sq_bitmap; #define store_queue_barrier() \ do { \ - (void)ctrl_inl(P4SEG_STORE_QUE); \ - ctrl_outl(0, P4SEG_STORE_QUE + 0); \ - ctrl_outl(0, P4SEG_STORE_QUE + 8); \ + (void)__raw_readl(P4SEG_STORE_QUE); \ + __raw_writel(0, P4SEG_STORE_QUE + 0); \ + __raw_writel(0, P4SEG_STORE_QUE + 8); \ } while (0); /** @@ -58,11 +59,11 @@ do { \ */ void sq_flush_range(unsigned long start, unsigned int len) { - volatile unsigned long *sq = (unsigned long *)start; + unsigned long *sq = (unsigned long *)start; /* Flush the queues */ for (len >>= 5; len--; sq += 8) - prefetchw((void *)sq); + prefetchw(sq); /* Wait for completion */ store_queue_barrier(); @@ -100,7 +101,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map) spin_unlock_irq(&sq_mapping_lock); } -static int __sq_remap(struct sq_mapping *map, unsigned long flags) +static int __sq_remap(struct sq_mapping *map, pgprot_t prot) { #if defined(CONFIG_MMU) struct vm_struct *vma; @@ -113,7 +114,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) if (ioremap_page_range((unsigned long)vma->addr, (unsigned long)vma->addr + map->size, - vma->phys_addr, __pgprot(flags))) { + vma->phys_addr, prot)) { vunmap(vma->addr); return -EAGAIN; } @@ -123,8 +124,8 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) * straightforward, as we can just load up each queue's QACR with * the physical address appropriately masked. */ - ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); - ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); + __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); + __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); #endif return 0; @@ -135,14 +136,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) * @phys: Physical address of mapping. * @size: Length of mapping. * @name: User invoking mapping. - * @flags: Protection flags. + * @prot: Protection bits. * * Remaps the physical address @phys through the next available store queue * address of @size length. @name is logged at boot time as well as through * the sysfs interface. */ unsigned long sq_remap(unsigned long phys, unsigned int size, - const char *name, unsigned long flags) + const char *name, pgprot_t prot) { struct sq_mapping *map; unsigned long end; @@ -177,7 +178,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); - ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); + ret = __sq_remap(map, prot); if (unlikely(ret != 0)) goto out; @@ -199,7 +200,7 @@ EXPORT_SYMBOL(sq_remap); /** * sq_unmap - Unmap a Store Queue allocation - * @map: Pre-allocated Store Queue mapping. + * @vaddr: Pre-allocated Store Queue mapping. * * Unmaps the store queue allocation @map that was previously created by * sq_remap(). Also frees up the pte that was previously inserted into @@ -208,7 +209,6 @@ EXPORT_SYMBOL(sq_remap); void sq_unmap(unsigned long vaddr) { struct sq_mapping **p, *map; - struct vm_struct *vma; int page; for (p = &sq_mapping_list; (map = *p); p = &map->next) @@ -217,7 +217,7 @@ void sq_unmap(unsigned long vaddr) if (unlikely(!map)) { printk("%s: bad store queue address 0x%08lx\n", - __FUNCTION__, vaddr); + __func__, vaddr); return; } @@ -225,11 +225,18 @@ void sq_unmap(unsigned long vaddr) bitmap_release_region(sq_bitmap, page, get_order(map->size)); #ifdef CONFIG_MMU - vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK)); - if (!vma) { - printk(KERN_ERR "%s: bad address 0x%08lx\n", - __FUNCTION__, map->sq_addr); - return; + { + /* + * Tear down the VMA in the MMU case. + */ + struct vm_struct *vma; + + vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK)); + if (!vma) { + printk(KERN_ERR "%s: bad address 0x%08lx\n", + __func__, map->sq_addr); + return; + } } #endif @@ -257,7 +264,7 @@ struct sq_sysfs_attr { ssize_t (*store)(const char *buf, size_t count); }; -#define to_sq_sysfs_attr(attr) container_of(attr, struct sq_sysfs_attr, attr) +#define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr) static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) @@ -303,8 +310,7 @@ static ssize_t mapping_store(const char *buf, size_t count) return -EIO; if (likely(len)) { - int ret = sq_remap(base, len, "Userspace", - pgprot_val(PAGE_SHARED)); + int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); if (ret < 0) return ret; } else @@ -321,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = { NULL, }; -static struct sysfs_ops sq_sysfs_ops = { +static const struct sysfs_ops sq_sysfs_ops = { .show = sq_sysfs_show, .store = sq_sysfs_store, }; @@ -331,35 +337,38 @@ static struct kobj_type ktype_percpu_entry = { .default_attrs = sq_sysfs_attrs, }; -static int __devinit sq_sysdev_add(struct sys_device *sysdev) +static int sq_dev_add(struct device *dev, struct subsys_interface *sif) { - unsigned int cpu = sysdev->id; + unsigned int cpu = dev->id; struct kobject *kobj; + int error; sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); if (unlikely(!sq_kobject[cpu])) return -ENOMEM; kobj = sq_kobject[cpu]; - kobj->parent = &sysdev->kobj; - kobject_set_name(kobj, "%s", "sq"); - kobj->ktype = &ktype_percpu_entry; - - return kobject_register(kobj); + error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj, + "%s", "sq"); + if (!error) + kobject_uevent(kobj, KOBJ_ADD); + return error; } -static int __devexit sq_sysdev_remove(struct sys_device *sysdev) +static int sq_dev_remove(struct device *dev, struct subsys_interface *sif) { - unsigned int cpu = sysdev->id; + unsigned int cpu = dev->id; struct kobject *kobj = sq_kobject[cpu]; - kobject_unregister(kobj); + kobject_put(kobj); return 0; } -static struct sysdev_driver sq_sysdev_driver = { - .add = sq_sysdev_add, - .remove = __devexit_p(sq_sysdev_remove), +static struct subsys_interface sq_interface = { + .name = "sq", + .subsys = &cpu_subsys, + .add_dev = sq_dev_add, + .remove_dev = sq_dev_remove, }; static int __init sq_api_init(void) @@ -371,8 +380,7 @@ static int __init sq_api_init(void) printk(KERN_NOTICE "sq: Registering store queue API.\n"); sq_cache = kmem_cache_create("store_queue_cache", - sizeof(struct sq_mapping), 0, 0, - NULL, NULL); + sizeof(struct sq_mapping), 0, 0, NULL); if (unlikely(!sq_cache)) return ret; @@ -380,7 +388,7 @@ static int __init sq_api_init(void) if (unlikely(!sq_bitmap)) goto out; - ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver); + ret = subsys_interface_register(&sq_interface); if (unlikely(ret != 0)) goto out; @@ -395,7 +403,7 @@ out: static void __exit sq_api_exit(void) { - sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver); + subsys_interface_unregister(&sq_interface); kfree(sq_bitmap); kmem_cache_destroy(sq_cache); } |
