diff options
Diffstat (limited to 'arch/sh/kernel')
57 files changed, 2851 insertions, 714 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index 5da88a43d35..99c7e5249f7 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -4,7 +4,7 @@ extra-y := head.o init_task.o vmlinux.lds -obj-y := process.o signal.o entry.o traps.o irq.o \ +obj-y := process.o signal.o traps.o irq.o \ ptrace.o setup.o time.o sys_sh.o semaphore.o \ io.o io_generic.o sh_ksyms.o syscalls.o @@ -21,3 +21,4 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_APM) += apm.o obj-$(CONFIG_PM) += pm.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile index fb5dac06938..d055a3ea6b4 100644 --- a/arch/sh/kernel/cpu/Makefile +++ b/arch/sh/kernel/cpu/Makefile @@ -2,11 +2,13 @@ # Makefile for the Linux/SuperH CPU-specifc backends. # -obj-y += irq/ init.o clock.o - -obj-$(CONFIG_CPU_SH2) += sh2/ -obj-$(CONFIG_CPU_SH3) += sh3/ -obj-$(CONFIG_CPU_SH4) += sh4/ +obj-$(CONFIG_CPU_SH2) = sh2/ +obj-$(CONFIG_CPU_SH2A) = sh2a/ +obj-$(CONFIG_CPU_SH3) = sh3/ +obj-$(CONFIG_CPU_SH4) = sh4/ +obj-$(CONFIG_CPU_SH4A) += sh4a/ obj-$(CONFIG_UBC_WAKEUP) += ubc.o obj-$(CONFIG_SH_ADC) += adc.o + +obj-y += irq/ init.o clock.o diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c index 51ec64cdf34..abb586b1256 100644 --- a/arch/sh/kernel/cpu/clock.c +++ b/arch/sh/kernel/cpu/clock.c @@ -5,9 +5,11 @@ * * This clock framework is derived from the OMAP version by: * - * Copyright (C) 2004 Nokia Corporation + * Copyright (C) 2004 - 2005 Nokia Corporation * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> * + * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> + * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. @@ -20,6 +22,7 @@ #include <linux/kref.h> #include <linux/seq_file.h> #include <linux/err.h> +#include <linux/platform_device.h> #include <asm/clock.h> #include <asm/timer.h> @@ -195,17 +198,37 @@ void clk_recalc_rate(struct clk *clk) propagate_rate(clk); } -struct clk *clk_get(const char *id) +/* + * Returns a clock. Note that we first try to use device id on the bus + * and clock name. If this fails, we try to use clock name only. + */ +struct clk *clk_get(struct device *dev, const char *id) { struct clk *p, *clk = ERR_PTR(-ENOENT); + int idno; + + if (dev == NULL || dev->bus != &platform_bus_type) + idno = -1; + else + idno = to_platform_device(dev)->id; mutex_lock(&clock_list_sem); list_for_each_entry(p, &clock_list, node) { + if (p->id == idno && + strcmp(id, p->name) == 0 && try_module_get(p->owner)) { + clk = p; + goto found; + } + } + + list_for_each_entry(p, &clock_list, node) { if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { clk = p; break; } } + +found: mutex_unlock(&clock_list_sem); return clk; diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index bfb90eb0b7a..48121766e8d 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -68,12 +68,14 @@ static void __init cache_init(void) waysize = cpu_data->dcache.sets; +#ifdef CCR_CACHE_ORA /* * If the OC is already in RAM mode, we only have * half of the entries to flush.. */ if (ccr & CCR_CACHE_ORA) waysize >>= 1; +#endif waysize <<= cpu_data->dcache.entry_shift; diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile index 1c034c283f5..0049d217561 100644 --- a/arch/sh/kernel/cpu/irq/Makefile +++ b/arch/sh/kernel/cpu/irq/Makefile @@ -1,8 +1,9 @@ # # Makefile for the Linux/SuperH CPU-specifc IRQ handlers. # -obj-y += ipr.o imask.o +obj-y += imask.o +obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o obj-$(CONFIG_CPU_HAS_PINT_IRQ) += pint.o obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o obj-$(CONFIG_CPU_HAS_INTC2_IRQ) += intc2.o diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c index a33ae3e0a5a..301b505c427 100644 --- a/arch/sh/kernel/cpu/irq/imask.c +++ b/arch/sh/kernel/cpu/irq/imask.c @@ -53,7 +53,10 @@ void static inline set_interrupt_registers(int ip) { unsigned long __dummy; - asm volatile("ldc %2, r6_bank\n\t" + asm volatile( +#ifdef CONFIG_CPU_HAS_SR_RB + "ldc %2, r6_bank\n\t" +#endif "stc sr, %0\n\t" "and #0xf0, %0\n\t" "shlr2 %0\n\t" diff --git a/arch/sh/kernel/cpu/irq/intc2.c b/arch/sh/kernel/cpu/irq/intc2.c index 74ca576a7ce..74defe76a05 100644 --- a/arch/sh/kernel/cpu/irq/intc2.c +++ b/arch/sh/kernel/cpu/irq/intc2.c @@ -11,22 +11,29 @@ * Hitachi 7751, the STM ST40 STB1, SH7760, and SH7780. */ #include <linux/kernel.h> -#include <linux/irq.h> +#include <linux/interrupt.h> #include <linux/io.h> -#include <asm/system.h> + +#if defined(CONFIG_CPU_SUBTYPE_SH7760) +#define INTC2_BASE 0xfe080000 +#define INTC2_INTMSK (INTC2_BASE + 0x40) +#define INTC2_INTMSKCLR (INTC2_BASE + 0x60) +#elif defined(CONFIG_CPU_SUBTYPE_SH7780) +#define INTC2_BASE 0xffd40000 +#define INTC2_INTMSK (INTC2_BASE + 0x38) +#define INTC2_INTMSKCLR (INTC2_BASE + 0x3c) +#endif static void disable_intc2_irq(unsigned int irq) { struct intc2_data *p = get_irq_chip_data(irq); - ctrl_outl(1 << p->msk_shift, - INTC2_BASE + INTC2_INTMSK_OFFSET + p->msk_offset); + ctrl_outl(1 << p->msk_shift, INTC2_INTMSK + p->msk_offset); } static void enable_intc2_irq(unsigned int irq) { struct intc2_data *p = get_irq_chip_data(irq); - ctrl_outl(1 << p->msk_shift, - INTC2_BASE + INTC2_INTMSKCLR_OFFSET + p->msk_offset); + ctrl_outl(1 << p->msk_shift, INTC2_INTMSKCLR + p->msk_offset); } static struct irq_chip intc2_irq_chip = { @@ -61,12 +68,10 @@ void make_intc2_irq(struct intc2_data *table, unsigned int nr_irqs) /* Set the priority level */ local_irq_save(flags); - ipr = ctrl_inl(INTC2_BASE + INTC2_INTPRI_OFFSET + - p->ipr_offset); + ipr = ctrl_inl(INTC2_BASE + p->ipr_offset); ipr &= ~(0xf << p->ipr_shift); ipr |= p->priority << p->ipr_shift; - ctrl_outl(ipr, INTC2_BASE + INTC2_INTPRI_OFFSET + - p->ipr_offset); + ctrl_outl(ipr, INTC2_BASE + p->ipr_offset); local_irq_restore(flags); diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c index a0089563cbf..35eb5751a3a 100644 --- a/arch/sh/kernel/cpu/irq/ipr.c +++ b/arch/sh/kernel/cpu/irq/ipr.c @@ -19,25 +19,21 @@ #include <linux/init.h> #include <linux/irq.h> #include <linux/module.h> -#include <asm/system.h> -#include <asm/io.h> -#include <asm/machvec.h> - +#include <linux/io.h> +#include <linux/interrupt.h> static void disable_ipr_irq(unsigned int irq) { struct ipr_data *p = get_irq_chip_data(irq); - int shift = p->shift*4; /* Set the priority in IPR to 0 */ - ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << shift)), p->addr); + ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << p->shift)), p->addr); } static void enable_ipr_irq(unsigned int irq) { struct ipr_data *p = get_irq_chip_data(irq); - int shift = p->shift*4; /* Set priority in IPR back to original value */ - ctrl_outw(ctrl_inw(p->addr) | (p->priority << shift), p->addr); + ctrl_outw(ctrl_inw(p->addr) | (p->priority << p->shift), p->addr); } static struct irq_chip ipr_irq_chip = { @@ -53,6 +49,10 @@ void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs) for (i = 0; i < nr_irqs; i++) { unsigned int irq = table[i].irq; + table[i].addr = map_ipridx_to_addr(table[i].ipr_idx); + /* could the IPR index be mapped, if not we ignore this */ + if (table[i].addr == 0) + continue; disable_irq_nosync(irq); set_irq_chip_and_handler_name(irq, &ipr_irq_chip, handle_level_irq, "level"); @@ -62,83 +62,6 @@ void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs) } EXPORT_SYMBOL(make_ipr_irq); -static struct ipr_data sys_ipr_map[] = { -#ifndef CONFIG_CPU_SUBTYPE_SH7780 - { TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY }, - { TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY }, -#ifdef RTC_IRQ - { RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY }, -#endif -#ifdef SCI_ERI_IRQ - { SCI_ERI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY }, - { SCI_RXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY }, - { SCI_TXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY }, -#endif -#ifdef SCIF1_ERI_IRQ - { SCIF1_ERI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, - { SCIF1_RXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, - { SCIF1_BRI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, - { SCIF1_TXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, -#endif -#if defined(CONFIG_CPU_SUBTYPE_SH7300) - { SCIF0_IRQ, SCIF0_IPR_ADDR, SCIF0_IPR_POS, SCIF0_PRIORITY }, - { DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY }, - { DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY }, - { VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY }, -#endif -#ifdef SCIF_ERI_IRQ - { SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, - { SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, - { SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, - { SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, -#endif -#ifdef IRDA_ERI_IRQ - { IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, - { IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, - { IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, - { IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, -#endif -#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \ - defined(CONFIG_CPU_SUBTYPE_SH7706) || \ - defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705) - /* - * Initialize the Interrupt Controller (INTC) - * registers to their power on values - */ - - /* - * Enable external irq (INTC IRQ mode). - * You should set corresponding bits of PFC to "00" - * to enable these interrupts. - */ - { IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, IRQ0_PRIORITY }, - { IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, IRQ1_PRIORITY }, - { IRQ2_IRQ, IRQ2_IPR_ADDR, IRQ2_IPR_POS, IRQ2_PRIORITY }, - { IRQ3_IRQ, IRQ3_IPR_ADDR, IRQ3_IPR_POS, IRQ3_PRIORITY }, - { IRQ4_IRQ, IRQ4_IPR_ADDR, IRQ4_IPR_POS, IRQ4_PRIORITY }, - { IRQ5_IRQ, IRQ5_IPR_ADDR, IRQ5_IPR_POS, IRQ5_PRIORITY }, -#endif -#endif -}; - -void __init init_IRQ(void) -{ - make_ipr_irq(sys_ipr_map, ARRAY_SIZE(sys_ipr_map)); - -#ifdef CONFIG_CPU_HAS_PINT_IRQ - init_IRQ_pint(); -#endif - -#ifdef CONFIG_CPU_HAS_INTC2_IRQ - init_IRQ_intc2(); -#endif - /* Perform the machine specific initialisation */ - if (sh_mv.mv_init_irq != NULL) - sh_mv.mv_init_irq(); - - irq_ctx_init(smp_processor_id()); -} - #if !defined(CONFIG_CPU_HAS_PINT_IRQ) int ipr_irq_demux(int irq) { diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile index 389353fba60..f0f059acfcf 100644 --- a/arch/sh/kernel/cpu/sh2/Makefile +++ b/arch/sh/kernel/cpu/sh2/Makefile @@ -2,5 +2,6 @@ # Makefile for the Linux/SuperH SH-2 backends. # -obj-y := probe.o +obj-y := ex.o probe.o entry.o +obj-$(CONFIG_CPU_SUBTYPE_SH7619) += setup-sh7619.o clock-sh7619.o diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c new file mode 100644 index 00000000000..d0440b26970 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c @@ -0,0 +1,81 @@ +/* + * arch/sh/kernel/cpu/sh2/clock-sh7619.c + * + * SH7619 support for the clock framework + * + * Copyright (C) 2006 Yoshinori Sato + * + * Based on clock-sh4.c + * Copyright (C) 2005 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +const static int pll1rate[]={1,2}; +const static int pfc_divisors[]={1,2,0,4}; + +#if (CONFIG_SH_CLK_MD == 1) || (CONFIG_SH_CLK_MD == 2) +#define PLL2 (4) +#elif (CONFIG_SH_CLK_MD == 5) || (CONFIG_SH_CLK_MD == 6) +#define PLL2 (2) +#else +#error "Illigal Clock Mode!" +#endif + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7]; +} + +static struct clk_ops sh7619_master_clk_ops = { + .init = master_clk_init, +}; + +static void module_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inw(FREQCR) & 0x0007); + clk->rate = clk->parent->rate / pfc_divisors[idx]; +} + +static struct clk_ops sh7619_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static void bus_clk_recalc(struct clk *clk) +{ + clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7]; +} + +static struct clk_ops sh7619_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static void cpu_clk_recalc(struct clk *clk) +{ + clk->rate = clk->parent->rate; +} + +static struct clk_ops sh7619_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct clk_ops *sh7619_clk_ops[] = { + &sh7619_master_clk_ops, + &sh7619_module_clk_ops, + &sh7619_bus_clk_ops, + &sh7619_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7619_clk_ops)) + *ops = sh7619_clk_ops[idx]; +} + diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S new file mode 100644 index 00000000000..d51fa5e9904 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/entry.S @@ -0,0 +1,337 @@ +/* + * arch/sh/kernel/cpu/sh2/entry.S + * + * The SH-2 exception entry + * + * Copyright (C) 2005,2006 Yoshinori Sato + * Copyright (C) 2005 AXE,Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> +#include <asm/cpu/mmu_context.h> +#include <asm/unistd.h> +#include <asm/errno.h> +#include <asm/page.h> + +/* Offsets to the stack */ +OFF_R0 = 0 /* Return value. New ABI also arg4 */ +OFF_R1 = 4 /* New ABI: arg5 */ +OFF_R2 = 8 /* New ABI: arg6 */ +OFF_R3 = 12 /* New ABI: syscall_nr */ +OFF_R4 = 16 /* New ABI: arg0 */ +OFF_R5 = 20 /* New ABI: arg1 */ +OFF_R6 = 24 /* New ABI: arg2 */ +OFF_R7 = 28 /* New ABI: arg3 */ +OFF_SP = (15*4) +OFF_PC = (16*4) +OFF_SR = (16*4+2*4) +OFF_TRA = (16*4+6*4) + +#include <asm/entry-macros.S> + +ENTRY(exception_handler) + ! already saved r0/r1 + mov.l r2,@-sp + mov.l r3,@-sp + mov r0,r1 + cli + mov.l $cpu_mode,r2 + mov.l @r2,r0 + mov.l @(5*4,r15),r3 ! previous SR + shll2 r3 ! set "S" flag + rotl r0 ! T <- "S" flag + rotl r0 ! "S" flag is LSB + rotcr r3 ! T -> r3:b30 + shlr r3 + shlr r0 + bt/s 1f + mov.l r3,@(5*4,r15) ! copy cpu mode to SR + ! switch to kernel mode + mov #1,r0 + rotr r0 + rotr r0 + mov.l r0,@r2 ! enter kernel mode + mov.l $current_thread_info,r2 + mov.l @r2,r2 + mov #0x20,r0 + shll8 r0 + add r2,r0 + mov r15,r2 ! r2 = user stack top + mov r0,r15 ! switch kernel stack + add #-4,r15 ! dummy + mov.l r1,@-r15 ! TRA + sts.l macl, @-r15 + sts.l mach, @-r15 + stc.l gbr, @-r15 + mov.l @(4*4,r2),r0 + mov.l @(5*4,r2),r1 + mov.l r1,@-r15 ! original SR + sts.l pr,@-r15 + mov.l r0,@-r15 ! original PC + mov r2,r3 + add #(4+2)*4,r3 ! rewind r0 - r3 + exception frame + mov.l r3,@-r15 ! original SP + mov.l r14,@-r15 + mov.l r13,@-r15 + mov.l r12,@-r15 + mov.l r11,@-r15 + mov.l r10,@-r15 + mov.l r9,@-r15 + mov.l r8,@-r15 + mov.l r7,@-r15 + mov.l r6,@-r15 + mov.l r5,@-r15 + mov.l r4,@-r15 + mov r2,r8 ! copy user -> kernel stack + mov.l @r8+,r3 + mov.l r3,@-r15 + mov.l @r8+,r2 + mov.l r2,@-r15 + mov.l @r8+,r1 + mov.l r1,@-r15 + mov.l @r8+,r0 + bra 2f + mov.l r0,@-r15 +1: + ! in kernel exception + mov #(22-4-4-1)*4+4,r0 + mov r15,r2 + sub r0,r15 + mov.l @r2+,r0 ! old R3 + mov.l r0,@-r15 + mov.l @r2+,r0 ! old R2 + mov.l r0,@-r15 + mov.l @r2+,r0 ! old R1 + mov.l r0,@-r15 + mov.l @r2+,r0 ! old R0 + mov.l r0,@-r15 + mov.l @r2+,r3 ! old PC + mov.l @r2+,r0 ! old SR + add #-4,r2 ! exception frame stub (sr) + mov.l r1,@-r2 ! TRA + sts.l macl, @-r2 + sts.l mach, @-r2 + stc.l gbr, @-r2 + mov.l r0,@-r2 ! save old SR + sts.l pr,@-r2 + mov.l r3,@-r2 ! save old PC + mov r2,r0 + add #8*4,r0 + mov.l r0,@-r2 ! save old SP + mov.l r14,@-r2 + mov.l r13,@-r2 + mov.l r12,@-r2 + mov.l r11,@-r2 + mov.l r10,@-r2 + mov.l r9,@-r2 + mov.l r8,@-r2 + mov.l r7,@-r2 + mov.l r6,@-r2 + mov.l r5,@-r2 + mov.l r4,@-r2 + mov.l @(OFF_R0,r15),r0 + mov.l @(OFF_R1,r15),r1 + mov.l @(OFF_R2,r15),r2 + mov.l @(OFF_R3,r15),r3 +2: + mov #OFF_TRA,r8 + add r15,r8 + mov.l @r8,r9 + mov #64,r8 + cmp/hs r8,r9 + bt interrupt_entry ! vec >= 64 is interrupt + mov #32,r8 + cmp/hs r8,r9 + bt trap_entry ! 64 > vec >= 32 is trap + mov.l 4f,r8 + mov r9,r4 + shll2 r9 + add r9,r8 + mov.l @r8,r8 + mov #0,r9 + cmp/eq r9,r8 + bf 3f + mov.l 8f,r8 ! unhandled exception +3: + mov.l 5f,r10 + jmp @r8 + lds r10,pr + +interrupt_entry: + mov r9,r4 + mov.l 6f,r9 + mov.l 7f,r8 + jmp @r8 + lds r9,pr + + .align 2 +4: .long exception_handling_table +5: .long ret_from_exception +6: .long ret_from_irq +7: .long do_IRQ +8: .long do_exception_error + +trap_entry: + /* verbose BUG trapa entry check */ + mov #0x3e,r8 + cmp/ge r8,r9 + bf/s 1f + add #-0x10,r9 + add #0x10,r9 +1: + shll2 r9 ! TRA + mov #OFF_TRA,r8 + add r15,r8 + mov.l r9,@r8 + mov r9,r8 +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 2f, r9 + jsr @r9 + nop +#endif + sti + bra system_call + nop + + .align 2 +#ifdef CONFIG_TRACE_IRQFLAGS +2: .long trace_hardirqs_on +#endif + +#if defined(CONFIG_SH_STANDARD_BIOS) + /* Unwind the stack and jmp to the debug entry */ +debug_kernel_fw: + mov r15,r0 + add #(22-4)*4-4,r0 + ldc.l @r0+,gbr + lds.l @r0+,mach + lds.l @r0+,macl + mov r15,r0 + mov.l @(OFF_SP,r0),r1 + mov #OFF_SR,r2 + mov.l @(r0,r2),r3 + mov.l r3,@-r1 + mov #OFF_SP,r2 + mov.l @(r0,r2),r3 + mov.l r3,@-r1 + mov r15,r0 + add #(22-4)*4-8,r0 + mov.l 1f,r2 + mov.l @r2,r2 + stc sr,r3 + mov.l r2,@r0 + mov.l r3,@r0 + mov.l r1,@(8,r0) + mov.l @r15+, r0 + mov.l @r15+, r1 + mov.l @r15+, r2 + mov.l @r15+, r3 + mov.l @r15+, r4 + mov.l @r15+, r5 + mov.l @r15+, r6 + mov.l @r15+, r7 + mov.l @r15+, r8 + mov.l @r15+, r9 + mov.l @r15+, r10 + mov.l @r15+, r11 + mov.l @r15+, r12 + mov.l @r15+, r13 + mov.l @r15+, r14 + add #8,r15 + lds.l @r15+, pr + rte + mov.l @r15+,r15 + .align 2 +1: .long gdb_vbr_vector +#endif /* CONFIG_SH_STANDARD_BIOS */ + +ENTRY(address_error_handler) + mov r15,r4 ! regs + add #4,r4 + mov #OFF_PC,r0 + mov.l @(r0,r15),r6 ! pc + mov.l 1f,r0 + jmp @r0 + mov #0,r5 ! writeaccess is unknown + .align 2 + +1: .long do_address_error + +restore_all: + cli +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 1f, r0 + jsr @r0 + nop +#endif + mov r15,r0 + mov.l $cpu_mode,r2 + mov #OFF_SR,r3 + mov.l @(r0,r3),r1 + mov.l r1,@r2 + shll2 r1 ! clear MD bit + shlr2 r1 + mov.l @(OFF_SP,r0),r2 + add #-8,r2 + mov.l r2,@(OFF_SP,r0) ! point exception frame top + mov.l r1,@(4,r2) ! set sr + mov #OFF_PC,r3 + mov.l @(r0,r3),r1 + mov.l r1,@r2 ! set pc + add #4*16+4,r0 + lds.l @r0+,pr + add #4,r0 ! skip sr + ldc.l @r0+,gbr + lds.l @r0+,mach + lds.l @r0+,macl + get_current_thread_info r0, r1 + mov.l $current_thread_info,r1 + mov.l r0,@r1 + mov.l @r15+,r0 + mov.l @r15+,r1 + mov.l @r15+,r2 + mov.l @r15+,r3 + mov.l @r15+,r4 + mov.l @r15+,r5 + mov.l @r15+,r6 + mov.l @r15+,r7 + mov.l @r15+,r8 + mov.l @r15+,r9 + mov.l @r15+,r10 + mov.l @r15+,r11 + mov.l @r15+,r12 + mov.l @r15+,r13 + mov.l @r15+,r14 + mov.l @r15,r15 + rte + nop + +#ifdef CONFIG_TRACE_IRQFLAGS +1: .long trace_hardirqs_off +#endif +$current_thread_info: + .long __current_thread_info +$cpu_mode: + .long __cpu_mode + +! common exception handler +#include "../../entry-common.S" + + .data +! cpu operation mode +! bit30 = MD (compatible SH3/4) +__cpu_mode: + .long 0x40000000 + + .section .bss +__current_thread_info: + .long 0 + +ENTRY(exception_handling_table) + .space 4*32 diff --git a/arch/sh/kernel/cpu/sh2/ex.S b/arch/sh/kernel/cpu/sh2/ex.S new file mode 100644 index 00000000000..6d285af7846 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/ex.S @@ -0,0 +1,46 @@ +/* + * arch/sh/kernel/cpu/sh2/ex.S + * + * The SH-2 exception vector table + * + * Copyright (C) 2005 Yoshinori Sato + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/linkage.h> + +! +! convert Exception Vector to Exception Number +! +exception_entry: +no = 0 + .rept 256 + mov.l r0,@-sp + mov #no,r0 + bra exception_trampoline + and #0xff,r0 +no = no + 1 + .endr +exception_trampoline: + mov.l r1,@-sp + mov.l $exception_handler,r1 + jmp @r1 + + .align 2 +$exception_entry: + .long exception_entry +$exception_handler: + .long exception_handler +! +! Exception Vector Base +! + .align 2 +ENTRY(vbr_base) +vector = 0 + .rept 256 + .long exception_entry + vector * 8 +vector = vector + 1 + .endr diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c index f17a2a0d588..ba527d9b502 100644 --- a/arch/sh/kernel/cpu/sh2/probe.c +++ b/arch/sh/kernel/cpu/sh2/probe.c @@ -17,17 +17,23 @@ int __init detect_cpu_and_cache_system(void) { - /* - * For now, assume SH7604 .. fix this later. - */ +#if defined(CONFIG_CPU_SUBTYPE_SH7604) cpu_data->type = CPU_SH7604; cpu_data->dcache.ways = 4; - cpu_data->dcache.way_shift = 6; + cpu_data->dcache.way_incr = (1<<10); cpu_data->dcache.sets = 64; cpu_data->dcache.entry_shift = 4; cpu_data->dcache.linesz = L1_CACHE_BYTES; cpu_data->dcache.flags = 0; - +#elif defined(CONFIG_CPU_SUBTYPE_SH7619) + cpu_data->type = CPU_SH7619; + cpu_data->dcache.ways = 4; + cpu_data->dcache.way_incr = (1<<12); + cpu_data->dcache.sets = 256; + cpu_data->dcache.entry_shift = 4; + cpu_data->dcache.linesz = L1_CACHE_BYTES; + cpu_data->dcache.flags = 0; +#endif /* * SH-2 doesn't have separate caches */ diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c new file mode 100644 index 00000000000..79283e6c1d8 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -0,0 +1,94 @@ +/* + * SH7619 Setup + * + * Copyright (C) 2006 Yoshinori Sato + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xf8400000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 88, 89, 91, 90}, + }, { + .mapbase = 0xf8410000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 92, 93, 95, 94}, + }, { + .mapbase = 0xf8420000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 96, 97, 99, 98}, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7619_devices[] __initdata = { + &sci_device, +}; + +static int __init sh7619_devices_setup(void) +{ + return platform_add_devices(sh7619_devices, + ARRAY_SIZE(sh7619_devices)); +} +__initcall(sh7619_devices_setup); + +#define INTC_IPRC 0xf8080000UL +#define INTC_IPRD 0xf8080002UL + +#define CMI0_IRQ 86 + +#define SCIF0_ERI_IRQ 88 +#define SCIF0_RXI_IRQ 89 +#define SCIF0_BRI_IRQ 90 +#define SCIF0_TXI_IRQ 91 + +#define SCIF1_ERI_IRQ 92 +#define SCIF1_RXI_IRQ 93 +#define SCIF1_BRI_IRQ 94 +#define SCIF1_TXI_IRQ 95 + +#define SCIF2_BRI_IRQ 96 +#define SCIF2_ERI_IRQ 97 +#define SCIF2_RXI_IRQ 98 +#define SCIF2_TXI_IRQ 99 + +static struct ipr_data sh7619_ipr_map[] = { + { CMI0_IRQ, INTC_IPRC, 1, 2 }, + { SCIF0_ERI_IRQ, INTC_IPRD, 3, 3 }, + { SCIF0_RXI_IRQ, INTC_IPRD, 3, 3 }, + { SCIF0_BRI_IRQ, INTC_IPRD, 3, 3 }, + { SCIF0_TXI_IRQ, INTC_IPRD, 3, 3 }, + { SCIF1_ERI_IRQ, INTC_IPRD, 2, 3 }, + { SCIF1_RXI_IRQ, INTC_IPRD, 2, 3 }, + { SCIF1_BRI_IRQ, INTC_IPRD, 2, 3 }, + { SCIF1_TXI_IRQ, INTC_IPRD, 2, 3 }, + { SCIF2_ERI_IRQ, INTC_IPRD, 1, 3 }, + { SCIF2_RXI_IRQ, INTC_IPRD, 1, 3 }, + { SCIF2_BRI_IRQ, INTC_IPRD, 1, 3 }, + { SCIF2_TXI_IRQ, INTC_IPRD, 1, 3 }, +}; + +void __init init_IRQ_ipr(void) +{ + make_ipr_irq(sh7619_ipr_map, ARRAY_SIZE(sh7619_ipr_map)); +} diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile new file mode 100644 index 00000000000..350972ae941 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the Linux/SuperH SH-2A backends. +# + +obj-y := common.o probe.o + +common-y += $(addprefix ../sh2/, ex.o) +common-y += $(addprefix ../sh2/, entry.o) + +obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c new file mode 100644 index 00000000000..a9ad309c6a3 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c @@ -0,0 +1,85 @@ +/* + * arch/sh/kernel/cpu/sh2a/clock-sh7206.c + * + * SH7206 support for the clock framework + * + * Copyright (C) 2006 Yoshinori Sato + * + * Based on clock-sh4.c + * Copyright (C) 2005 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +const static int pll1rate[]={1,2,3,4,6,8}; +const static int pfc_divisors[]={1,2,3,4,6,8,12}; +#define ifc_divisors pfc_divisors + +#if (CONFIG_SH_CLK_MD == 2) +#define PLL2 (4) +#elif (CONFIG_SH_CLK_MD == 6) +#define PLL2 (2) +#elif (CONFIG_SH_CLK_MD == 7) +#define PLL2 (1) +#else +#error "Illigal Clock Mode!" +#endif + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; +} + +static struct clk_ops sh7206_master_clk_ops = { + .init = master_clk_init, +}; + +static void module_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inw(FREQCR) & 0x0007); + clk->rate = clk->parent->rate / pfc_divisors[idx]; +} + +static struct clk_ops sh7206_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static void bus_clk_recalc(struct clk *clk) +{ + clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; +} + +static struct clk_ops sh7206_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static void cpu_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inw(FREQCR) & 0x0007); + clk->rate = clk->parent->rate / ifc_divisors[idx]; +} + +static struct clk_ops sh7206_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct clk_ops *sh7206_clk_ops[] = { + &sh7206_master_clk_ops, + &sh7206_module_clk_ops, + &sh7206_bus_clk_ops, + &sh7206_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7206_clk_ops)) + *ops = sh7206_clk_ops[idx]; +} + diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c new file mode 100644 index 00000000000..87c6c054208 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/probe.c @@ -0,0 +1,39 @@ +/* + * arch/sh/kernel/cpu/sh2a/probe.c + * + * CPU Subtype Probing for SH-2A. + * + * Copyright (C) 2004, 2005 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <asm/processor.h> +#include <asm/cache.h> + +int __init detect_cpu_and_cache_system(void) +{ + /* Just SH7206 for now .. */ + cpu_data->type = CPU_SH7206; + + cpu_data->dcache.ways = 4; + cpu_data->dcache.way_incr = (1 << 11); + cpu_data->dcache.sets = 128; + cpu_data->dcache.entry_shift = 4; + cpu_data->dcache.linesz = L1_CACHE_BYTES; + cpu_data->dcache.flags = 0; + + /* + * The icache is the same as the dcache as far as this setup is + * concerned. The only real difference in hardware is that the icache + * lacks the U bit that the dcache has, none of this has any bearing + * on the cache info. + */ + cpu_data->icache = cpu_data->dcache; + + return 0; +} + diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c new file mode 100644 index 00000000000..4b60fcc7d66 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c @@ -0,0 +1,112 @@ +/* + * SH7206 Setup + * + * Copyright (C) 2006 Yoshinori Sato + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xfffe8000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 241, 242, 243, 240}, + }, { + .mapbase = 0xfffe8800, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 247, 244, 245, 246}, + }, { + .mapbase = 0xfffe9000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 249, 250, 251, 248}, + }, { + .mapbase = 0xfffe9800, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 253, 254, 255, 252}, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7206_devices[] __initdata = { + &sci_device, +}; + +static int __init sh7206_devices_setup(void) +{ + return platform_add_devices(sh7206_devices, + ARRAY_SIZE(sh7206_devices)); +} +__initcall(sh7206_devices_setup); + +#define INTC_IPR08 0xfffe0c04UL +#define INTC_IPR09 0xfffe0c06UL +#define INTC_IPR14 0xfffe0c10UL + +#define CMI0_IRQ 140 + +#define MTU1_TGI1A 164 + +#define SCIF0_BRI_IRQ 240 +#define SCIF0_ERI_IRQ 241 +#define SCIF0_RXI_IRQ 242 +#define SCIF0_TXI_IRQ 243 + +#define SCIF1_BRI_IRQ 244 +#define SCIF1_ERI_IRQ 245 +#define SCIF1_RXI_IRQ 246 +#define SCIF1_TXI_IRQ 247 + +#define SCIF2_BRI_IRQ 248 +#define SCIF2_ERI_IRQ 249 +#define SCIF2_RXI_IRQ 250 +#define SCIF2_TXI_IRQ 251 + +#define SCIF3_BRI_IRQ 252 +#define SCIF3_ERI_IRQ 253 +#define SCIF3_RXI_IRQ 254 +#define SCIF3_TXI_IRQ 255 + +static struct ipr_data sh7206_ipr_map[] = { + { CMI0_IRQ, INTC_IPR08, 3, 2 }, + { MTU2_TGI1A, INTC_IPR09, 1, 2 }, + { SCIF0_ERI_IRQ, INTC_IPR14, 3, 3 }, + { SCIF0_RXI_IRQ, INTC_IPR14, 3, 3 }, + { SCIF0_BRI_IRQ, INTC_IPR14, 3, 3 }, + { SCIF0_TXI_IRQ, INTC_IPR14, 3, 3 }, + { SCIF1_ERI_IRQ, INTC_IPR14, 2, 3 }, + { SCIF1_RXI_IRQ, INTC_IPR14, 2, 3 }, + { SCIF1_BRI_IRQ, INTC_IPR14, 2, 3 }, + { SCIF1_TXI_IRQ, INTC_IPR14, 2, 3 }, + { SCIF2_ERI_IRQ, INTC_IPR14, 1, 3 }, + { SCIF2_RXI_IRQ, INTC_IPR14, 1, 3 }, + { SCIF2_BRI_IRQ, INTC_IPR14, 1, 3 }, + { SCIF2_TXI_IRQ, INTC_IPR14, 1, 3 }, + { SCIF3_ERI_IRQ, INTC_IPR14, 0, 3 }, + { SCIF3_RXI_IRQ, INTC_IPR14, 0, 3 }, + { SCIF3_BRI_IRQ, INTC_IPR14, 0, 3 }, + { SCIF3_TXI_IRQ, INTC_IPR14, 0, 3 }, +}; + +void __init init_IRQ_ipr(void) +{ + make_ipr_irq(sh7206_ipr_map, ARRAY_SIZE(sh7206_ipr_map)); +} diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile index 58d3815695f..83905e4e438 100644 --- a/arch/sh/kernel/cpu/sh3/Makefile +++ b/arch/sh/kernel/cpu/sh3/Makefile @@ -2,7 +2,7 @@ # Makefile for the Linux/SuperH SH-3 backends. # -obj-y := ex.o probe.o +obj-y := ex.o probe.o entry.o # CPU subtype setup obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c index 10461a745e5..b791a29fdb6 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c @@ -24,7 +24,7 @@ static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; static void set_bus_parent(struct clk *clk) { - struct clk *bus_clk = clk_get("bus_clk"); + struct clk *bus_clk = clk_get(NULL, "bus_clk"); clk->parent = bus_clk; clk_put(bus_clk); } diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 39aaefb2d83..8c0dc2700c6 100644 --- a/arch/sh/kernel/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -1,5 +1,5 @@ /* - * linux/arch/sh/entry.S + * arch/sh/kernel/entry.S * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 2003 - 2006 Paul Mundt @@ -7,15 +7,16 @@ * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. - * */ #include <linux/sys.h> #include <linux/errno.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> -#include <asm/cpu/mmu_context.h> #include <asm/unistd.h> +#include <asm/cpu/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/page.h> ! NOTE: ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address @@ -81,6 +82,8 @@ OFF_TRA = (16*4+6*4) #define k_g_imask r6_bank /* r6_bank1 */ #define current r7 /* r7_bank1 */ +#include <asm/entry-macros.S> + /* * Kernel mode register usage: * k0 scratch @@ -107,26 +110,6 @@ OFF_TRA = (16*4+6*4) ! this first version depends *much* on C implementation. ! -#define CLI() \ - stc sr, r0; \ - or #0xf0, r0; \ - ldc r0, sr - -#define STI() \ - mov.l __INV_IMASK, r11; \ - stc sr, r10; \ - and r11, r10; \ - stc k_g_imask, r11; \ - or r11, r10; \ - ldc r10, sr - -#if defined(CONFIG_PREEMPT) -# define preempt_stop() CLI() -#else -# define preempt_stop() -# define resume_kernel restore_all -#endif - #if defined(CONFIG_MMU) .align 2 ENTRY(tlb_miss_load) @@ -155,29 +138,14 @@ ENTRY(tlb_protection_violation_store) call_dpf: mov.l 1f, r0 - mov r5, r8 - mov.l @r0, r6 - mov r6, r9 - mov.l 2f, r0 - sts pr, r10 - jsr @r0 - mov r15, r4 - ! - tst r0, r0 - bf/s 0f - lds r10, pr - rts - nop -0: STI() + mov.l @r0, r6 ! address mov.l 3f, r0 - mov r9, r6 - mov r8, r5 + jmp @r0 - mov r15, r4 + mov r15, r4 ! regs .align 2 1: .long MMU_TEA -2: .long __do_page_fault 3: .long do_page_fault .align 2 @@ -203,32 +171,6 @@ call_dae: 2: .long do_address_error #endif /* CONFIG_MMU */ -#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) -! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present. -! If both are configured, handle the debug traps (breakpoints) in SW, -! but still allow BIOS traps to FW. - - .align 2 -debug_kernel: -#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB) - /* Force BIOS call to FW (debug_trap put TRA in r8) */ - mov r8,r0 - shlr2 r0 - cmp/eq #0x3f,r0 - bt debug_kernel_fw -#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */ - -debug_enter: -#if defined(CONFIG_SH_KGDB) - /* Jump to kgdb, pass stacked regs as arg */ -debug_kernel_sw: - mov.l 3f, r0 - jmp @r0 - mov r15, r4 - .align 2 -3: .long kgdb_handle_exception -#endif /* CONFIG_SH_KGDB */ - #if defined(CONFIG_SH_STANDARD_BIOS) /* Unwind the stack and jmp to the debug entry */ debug_kernel_fw: @@ -269,276 +211,6 @@ debug_kernel_fw: 2: .long gdb_vbr_vector #endif /* CONFIG_SH_STANDARD_BIOS */ -#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ - - - .align 2 -debug_trap: -#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) - mov #OFF_SR, r0 - mov.l @(r0,r15), r0 ! get status register - shll r0 - shll r0 ! kernel space? - bt/s debug_kernel -#endif - mov.l @r15, r0 ! Restore R0 value - mov.l 1f, r8 - jmp @r8 - nop - - .align 2 -ENTRY(exception_error) - ! - STI() - mov.l 2f, r0 - jmp @r0 - nop - -! - .align 2 -1: .long break_point_trap_software -2: .long do_exception_error - - .align 2 -ret_from_exception: - preempt_stop() -ENTRY(ret_from_irq) - ! - mov #OFF_SR, r0 - mov.l @(r0,r15), r0 ! get status register - shll r0 - shll r0 ! kernel space? - bt/s resume_kernel ! Yes, it's from kernel, go back soon - GET_THREAD_INFO(r8) - -#ifdef CONFIG_PREEMPT - bra resume_userspace - nop -ENTRY(resume_kernel) - mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count - tst r0, r0 - bf noresched -need_resched: - mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_NEED_RESCHED, r0 ! need_resched set? - bt noresched - - mov #OFF_SR, r0 - mov.l @(r0,r15), r0 ! get status register - and #0xf0, r0 ! interrupts off (exception path)? - cmp/eq #0xf0, r0 - bt noresched - - mov.l 1f, r0 - mov.l r0, @(TI_PRE_COUNT,r8) - - STI() - mov.l 2f, r0 - jsr @r0 - nop - mov #0, r0 - mov.l r0, @(TI_PRE_COUNT,r8) - CLI() - - bra need_resched - nop -noresched: - bra restore_all - nop - - .align 2 -1: .long PREEMPT_ACTIVE -2: .long schedule -#endif - -ENTRY(resume_userspace) - ! r8: current_thread_info - CLI() - mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_WORK_MASK, r0 - bt/s restore_all - tst #_TIF_NEED_RESCHED, r0 - - .align 2 -work_pending: - ! r0: current_thread_info->flags - ! r8: current_thread_info - ! t: result of "tst #_TIF_NEED_RESCHED, r0" - bf/s work_resched - tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 -work_notifysig: - bt/s restore_all - mov r15, r4 - mov r12, r5 ! set arg1(save_r0) - mov r0, r6 - mov.l 2f, r1 - mova restore_all, r0 - jmp @r1 - lds r0, pr -work_resched: -#ifndef CONFIG_PREEMPT - ! gUSA handling - mov.l @(OFF_SP,r15), r0 ! get user space stack pointer - mov r0, r1 - shll r0 - bf/s 1f - shll r0 - bf/s 1f - mov #OFF_PC, r0 - ! SP >= 0xc0000000 : gUSA mark - mov.l @(r0,r15), r2 ! get user space PC (program counter) - mov.l @(OFF_R0,r15), r3 ! end point - cmp/hs r3, r2 ! r2 >= r3? - bt 1f - add r3, r1 ! rewind point #2 - mov.l r1, @(r0,r15) ! reset PC to rewind point #2 - ! -1: -#endif - mov.l 1f, r1 - jsr @r1 ! schedule - nop - CLI() - ! - mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_WORK_MASK, r0 - bt restore_all - bra work_pending - tst #_TIF_NEED_RESCHED, r0 - - .align 2 -1: .long schedule -2: .long do_notify_resume - - .align 2 -syscall_exit_work: - ! r0: current_thread_info->flags - ! r8: current_thread_info - tst #_TIF_SYSCALL_TRACE, r0 - bt/s work_pending - tst #_TIF_NEED_RESCHED, r0 - STI() - ! XXX setup arguments... - mov.l 4f, r0 ! do_syscall_trace - jsr @r0 - nop - bra resume_userspace - nop - - .align 2 -syscall_trace_entry: - ! Yes it is traced. - ! XXX setup arguments... - mov.l 4f, r11 ! Call do_syscall_trace which notifies - jsr @r11 ! superior (will chomp R[0-7]) - nop - ! Reload R0-R4 from kernel stack, where the - ! parent may have modified them using - ! ptrace(POKEUSR). (Note that R0-R2 are - ! used by the system call handler directly - ! from the kernel stack anyway, so don't need - ! to be reloaded here.) This allows the parent - ! to rewrite system calls and args on the fly. - mov.l @(OFF_R4,r15), r4 ! arg0 - mov.l @(OFF_R5,r15), r5 - mov.l @(OFF_R6,r15), r6 - mov.l @(OFF_R7,r15), r7 ! arg3 - mov.l @(OFF_R3,r15), r3 ! syscall_nr - ! Arrange for do_syscall_trace to be called - ! again as the system call returns. - mov.l 2f, r10 ! Number of syscalls - cmp/hs r10, r3 - bf syscall_call - mov #-ENOSYS, r0 - bra syscall_exit - mov.l r0, @(OFF_R0,r15) ! Return value - -/* - * Syscall interface: - * - * Syscall #: R3 - * Arguments #0 to #3: R4--R7 - * Arguments #4 to #6: R0, R1, R2 - * TRA: (number of arguments + 0x10) x 4 - * - * This code also handles delegating other traps to the BIOS/gdb stub - * according to: - * - * Trap number - * (TRA>>2) Purpose - * -------- ------- - * 0x0-0xf old syscall ABI - * 0x10-0x1f new syscall ABI - * 0x20-0xff delegated through debug_trap to BIOS/gdb stub. - * - * Note: When we're first called, the TRA value must be shifted - * right 2 bits in order to get the value that was used as the "trapa" - * argument. - */ - - .align 2 - .globl ret_from_fork -ret_from_fork: - mov.l 1f, r8 - jsr @r8 - mov r0, r4 - bra syscall_exit - nop - .align 2 -1: .long schedule_tail - ! -ENTRY(system_call) - mov.l 1f, r9 - mov.l @r9, r8 ! Read from TRA (Trap Address) Register - ! - ! Is the trap argument >= 0x20? (TRA will be >= 0x80) - mov #0x7f, r9 - cmp/hi r9, r8 - bt/s 0f - mov #OFF_TRA, r9 - add r15, r9 - ! - mov.l r8, @r9 ! set TRA value to tra - STI() - ! Call the system call handler through the table. - ! First check for bad syscall number - mov r3, r9 - mov.l 2f, r8 ! Number of syscalls - cmp/hs r8, r9 - bf/s good_system_call - GET_THREAD_INFO(r8) -syscall_badsys: ! Bad syscall number - mov #-ENOSYS, r0 - bra resume_userspace - mov.l r0, @(OFF_R0,r15) ! Return value - ! -0: - bra debug_trap - nop - ! -good_system_call: ! Good syscall number - mov.l @(TI_FLAGS,r8), r8 - mov #_TIF_SYSCALL_TRACE, r10 - tst r10, r8 - bf syscall_trace_entry - ! -syscall_call: - shll2 r9 ! x4 - mov.l 3f, r8 ! Load the address of sys_call_table - add r8, r9 - mov.l @r9, r8 - jsr @r8 ! jump to specific syscall handler - nop - mov.l @(OFF_R0,r15), r12 ! save r0 - mov.l r0, @(OFF_R0,r15) ! save the return value - ! -syscall_exit: - CLI() - ! - GET_THREAD_INFO(r8) - mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_ALLWORK_MASK, r0 - bf syscall_exit_work restore_all: mov.l @r15+, r0 mov.l @r15+, r1 @@ -606,7 +278,9 @@ skip_restore: ! ! Calculate new SR value mov k3, k2 ! original SR value - mov.l 9f, k1 + mov #0xf0, k1 + extu.b k1, k1 + not k1, k1 and k1, k2 ! Mask orignal SR value ! mov k3, k0 ! Calculate IMASK-bits @@ -632,16 +306,12 @@ skip_restore: nop .align 2 -1: .long TRA -2: .long NR_syscalls -3: .long sys_call_table -4: .long do_syscall_trace 5: .long 0x00001000 ! DSP 7: .long 0x30000000 -9: -__INV_IMASK: - .long 0xffffff0f ! ~(IMASK) +! common exception handler +#include "../../entry-common.S" + ! Exception Vector Base ! ! Should be aligned page boundary. @@ -661,9 +331,176 @@ general_exception: 2: .long ret_from_exception ! ! + +/* This code makes some assumptions to improve performance. + * Make sure they are stil true. */ +#if PTRS_PER_PGD != PTRS_PER_PTE +#error PGD and PTE sizes don't match +#endif + +/* gas doesn't flag impossible values for mov #immediate as an error */ +#if (_PAGE_PRESENT >> 2) > 0x7f +#error cannot load PAGE_PRESENT as an immediate +#endif +#if _PAGE_DIRTY > 0x7f +#error cannot load PAGE_DIRTY as an immediate +#endif +#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED +#error cannot derive PAGE_ACCESSED from PAGE_PRESENT +#endif + +#if defined(CONFIG_CPU_SH4) +#define ldmmupteh(r) mov.l 8f, r +#else +#define ldmmupteh(r) mov #MMU_PTEH, r +#endif + .balign 1024,0,1024 tlb_miss: - mov.l 1f, k2 +#ifdef COUNT_EXCEPTIONS + ! Increment the counts + mov.l 9f, k1 + mov.l @k1, k2 + add #1, k2 + mov.l k2, @k1 +#endif + + ! k0 scratch + ! k1 pgd and pte pointers + ! k2 faulting address + ! k3 pgd and pte index masks + ! k4 shift + + ! Load up the pgd entry (k1) + + ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH + + mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2 + mov #-(PGDIR_SHIFT-2), k4 ! 6 EX + + mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2) + + mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2) + + mov k2, k0 ! 5 MT (latency=0) + shld k4, k0 ! 99 EX + + and k3, k0 ! 78 EX + + mov.l @(k0, k1), k1 ! 21 LS (latency=2) + mov #-(PAGE_SHIFT-2), k4 ! 6 EX + + ! Load up the pte entry (k2) + + mov k2, k0 ! 5 MT (latency=0) + shld k4, k0 ! 99 EX + + tst k1, k1 ! 86 MT + + bt 20f ! 110 BR + + and k3, k0 ! 78 EX + mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT + + mov.l @(k0, k1), k2 ! 21 LS (latency=2) + add k0, k1 ! 49 EX + +#ifdef CONFIG_CPU_HAS_PTEA + ! Test the entry for present and _PAGE_ACCESSED + + mov #-28, k3 ! 6 EX + mov k2, k0 ! 5 MT (latency=0) + + tst k4, k2 ! 68 MT + shld k3, k0 ! 99 EX + + bt 20f ! 110 BR + + ! Set PTEA register + ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1) + ! + ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT + + and #0xe, k0 ! 79 EX + + mov k0, k3 ! 5 MT (latency=0) + mov k2, k0 ! 5 MT (latency=0) + + and #1, k0 ! 79 EX + + or k0, k3 ! 82 EX + + ldmmupteh(k0) ! 9 LS (latency=2) + shll2 k4 ! 101 EX _PAGE_ACCESSED + + tst k4, k2 ! 68 MT + + mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS + + mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK + + ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED +#else + + ! Test the entry for present and _PAGE_ACCESSED + + mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK + tst k4, k2 ! 68 MT + + shll2 k4 ! 101 EX _PAGE_ACCESSED + ldmmupteh(k0) ! 9 LS (latency=2) + + bt 20f ! 110 BR + tst k4, k2 ! 68 MT + + ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED + +#endif + + ! Set up the entry + + and k2, k3 ! 78 EX + bt/s 10f ! 108 BR + + mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS + + ldtlb ! 128 CO + + ! At least one instruction between ldtlb and rte + nop ! 119 NOP + + rte ! 126 CO + + nop ! 119 NOP + + +10: or k4, k2 ! 82 EX + + ldtlb ! 128 CO + + ! At least one instruction between ldtlb and rte + mov.l k2, @k1 ! 27 LS + + rte ! 126 CO + + ! Note we cannot execute mov here, because it is executed after + ! restoring SSR, so would be executed in user space. + nop ! 119 NOP + + + .align 5 + ! Once cache line if possible... +1: .long swapper_pg_dir +4: .short (PTRS_PER_PGD-1) << 2 +5: .short _PAGE_PRESENT +7: .long _PAGE_FLAGS_HARDWARE_MASK +8: .long MMU_PTEH +#ifdef COUNT_EXCEPTIONS +9: .long exception_count_miss +#endif + + ! Either pgd or pte not present +20: mov.l 1f, k2 mov.l 4f, k3 bra handle_exception mov.l @k2, k2 @@ -710,8 +547,9 @@ ENTRY(handle_exception) bt/s 1f ! It's a kernel to kernel transition. mov r15, k0 ! save original stack to k0 /* User space to kernel */ - mov #(THREAD_SIZE >> 8), k1 + mov #(THREAD_SIZE >> 10), k1 shll8 k1 ! k1 := THREAD_SIZE + shll2 k1 add current, k1 mov k1, r15 ! change to kernel stack ! @@ -761,7 +599,7 @@ skip_save: ! Save the user registers on the stack. mov.l k2, @-r15 ! EXPEVT - mov #-1, k4 + mov #-1, k4 mov.l k4, @-r15 ! set TRA (default: -1) ! sts.l macl, @-r15 @@ -813,6 +651,15 @@ skip_save: bf interrupt_exception shlr2 r8 shlr r8 + +#ifdef COUNT_EXCEPTIONS + mov.l 5f, r9 + add r8, r9 + mov.l @r9, r10 + add #1, r10 + mov.l r10, @r9 +#endif + mov.l 4f, r9 add r8, r9 mov.l @r9, r9 @@ -826,6 +673,9 @@ skip_save: 2: .long 0x000080f0 ! FD=1, IMASK=15 3: .long 0xcfffffff ! RB=0, BL=0 4: .long exception_handling_table +#ifdef COUNT_EXCEPTIONS +5: .long exception_count_table +#endif interrupt_exception: mov.l 1f, r9 diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile index 8dbf3895ece..19ca68c7188 100644 --- a/arch/sh/kernel/cpu/sh4/Makefile +++ b/arch/sh/kernel/cpu/sh4/Makefile @@ -2,7 +2,8 @@ # Makefile for the Linux/SuperH SH-4 backends. # -obj-y := ex.o probe.o +obj-y := ex.o probe.o common.o +common-y += $(addprefix ../sh3/, entry.o) obj-$(CONFIG_SH_FPU) += fpu.o obj-$(CONFIG_SH_STORE_QUEUES) += sq.o @@ -11,17 +12,12 @@ obj-$(CONFIG_SH_STORE_QUEUES) += sq.o obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o obj-$(CONFIG_CPU_SUBTYPE_SH7751) += setup-sh7750.o obj-$(CONFIG_CPU_SUBTYPE_SH7760) += setup-sh7760.o -obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o -obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o -obj-$(CONFIG_CPU_SUBTYPE_SH73180) += setup-sh73180.o -obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o obj-$(CONFIG_CPU_SUBTYPE_SH4_202) += setup-sh4-202.o # Primary on-chip clocks (common) +ifndef CONFIG_CPU_SH4A clock-$(CONFIG_CPU_SH4) := clock-sh4.o -clock-$(CONFIG_CPU_SUBTYPE_SH73180) := clock-sh73180.o -clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o -clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o +endif # Additional clocks by subtype clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c index bfdf5fe8d94..fa2019aabd7 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c @@ -97,7 +97,7 @@ static void shoc_clk_recalc(struct clk *clk) static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate) { - struct clk *bclk = clk_get("bus_clk"); + struct clk *bclk = clk_get(NULL, "bus_clk"); unsigned long bclk_rate = clk_get_rate(bclk); clk_put(bclk); @@ -151,7 +151,7 @@ static struct clk *sh4202_onchip_clocks[] = { static int __init sh4202_clk_init(void) { - struct clk *clk = clk_get("master_clk"); + struct clk *clk = clk_get(NULL, "master_clk"); int i; for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) { diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index f486c07e10e..7624677f662 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -282,11 +282,8 @@ ieee_fpe_handler (struct pt_regs *regs) grab_fpu(regs); restore_fpu(tsk); set_tsk_thread_flag(tsk, TIF_USEDFPU); - } else { - tsk->thread.trap_no = 11; - tsk->thread.error_code = 0; + } else force_sig(SIGFPE, tsk); - } regs->pc = nextpc; return 1; @@ -296,29 +293,29 @@ ieee_fpe_handler (struct pt_regs *regs) } asmlinkage void -do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) +do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, + unsigned long r7, struct pt_regs __regs) { + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); struct task_struct *tsk = current; - if (ieee_fpe_handler (®s)) + if (ieee_fpe_handler(regs)) return; - regs.pc += 2; - save_fpu(tsk, ®s); - tsk->thread.trap_no = 11; - tsk->thread.error_code = 0; + regs->pc += 2; + save_fpu(tsk, regs); force_sig(SIGFPE, tsk); } asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6, - unsigned long r7, struct pt_regs regs) + unsigned long r7, struct pt_regs __regs) { + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); struct task_struct *tsk = current; - grab_fpu(®s); - if (!user_mode(®s)) { + grab_fpu(regs); + if (!user_mode(regs)) { printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); return; } diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index c294de1e14a..9031a22a2ce 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -79,16 +79,16 @@ int __init detect_cpu_and_cache_system(void) case 0x205: cpu_data->type = CPU_SH7750; cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | - CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA; + CPU_HAS_PERF_COUNTER; break; case 0x206: cpu_data->type = CPU_SH7750S; cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | - CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA; + CPU_HAS_PERF_COUNTER; break; case 0x1100: cpu_data->type = CPU_SH7751; - cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; + cpu_data->flags |= CPU_HAS_FPU; break; case 0x2000: cpu_data->type = CPU_SH73180; @@ -119,30 +119,38 @@ int __init detect_cpu_and_cache_system(void) break; case 0x3000: case 0x3003: + case 0x3009: cpu_data->type = CPU_SH7343; cpu_data->icache.ways = 4; cpu_data->dcache.ways = 4; cpu_data->flags |= CPU_HAS_LLSC; break; + case 0x3008: + if (prr == 0xa0) { + cpu_data->type = CPU_SH7722; + cpu_data->icache.ways = 4; + cpu_data->dcache.ways = 4; + cpu_data->flags |= CPU_HAS_LLSC; + } + break; case 0x8000: cpu_data->type = CPU_ST40RA; - cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; + cpu_data->flags |= CPU_HAS_FPU; break; case 0x8100: cpu_data->type = CPU_ST40GX1; - cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; + cpu_data->flags |= CPU_HAS_FPU; break; case 0x700: cpu_data->type = CPU_SH4_501; cpu_data->icache.ways = 2; cpu_data->dcache.ways = 2; - cpu_data->flags |= CPU_HAS_PTEA; break; case 0x600: cpu_data->type = CPU_SH4_202; cpu_data->icache.ways = 2; cpu_data->dcache.ways = 2; - cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; + cpu_data->flags |= CPU_HAS_FPU; break; case 0x500 ... 0x501: switch (prr) { @@ -160,7 +168,7 @@ int __init detect_cpu_and_cache_system(void) cpu_data->icache.ways = 2; cpu_data->dcache.ways = 2; - cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; + cpu_data->flags |= CPU_HAS_FPU; break; default: @@ -173,6 +181,10 @@ int __init detect_cpu_and_cache_system(void) cpu_data->dcache.ways = 1; #endif +#ifdef CONFIG_CPU_HAS_PTEA + cpu_data->flags |= CPU_HAS_PTEA; +#endif + /* * On anything that's not a direct-mapped cache, look to the CVR * for I/D-cache specifics. diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 50812d57c1c..cbac27634c0 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -2,6 +2,7 @@ * SH7750/SH7751 Setup * * Copyright (C) 2006 Paul Mundt + * Copyright (C) 2006 Jamie Lenehan * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -10,8 +11,39 @@ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> +#include <linux/io.h> #include <asm/sci.h> +static struct resource rtc_resources[] = { + [0] = { + .start = 0xffc80000, + .end = 0xffc80000 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Period IRQ */ + .start = 21, + .flags = IORESOURCE_IRQ, + }, + [2] = { + /* Carry IRQ */ + .start = 22, + .flags = IORESOURCE_IRQ, + }, + [3] = { + /* Alarm IRQ */ + .start = 20, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + static struct plat_sci_port sci_platform_data[] = { { .mapbase = 0xffe00000, @@ -37,6 +69,7 @@ static struct platform_device sci_device = { }; static struct platform_device *sh7750_devices[] __initdata = { + &rtc_device, &sci_device, }; @@ -46,3 +79,71 @@ static int __init sh7750_devices_setup(void) ARRAY_SIZE(sh7750_devices)); } __initcall(sh7750_devices_setup); + +static struct ipr_data sh7750_ipr_map[] = { + /* IRQ, IPR-idx, shift, priority */ + { 16, 0, 12, 2 }, /* TMU0 TUNI*/ + { 17, 0, 12, 2 }, /* TMU1 TUNI */ + { 18, 0, 4, 2 }, /* TMU2 TUNI */ + { 19, 0, 4, 2 }, /* TMU2 TIPCI */ + { 27, 1, 12, 2 }, /* WDT ITI */ + { 20, 0, 0, 2 }, /* RTC ATI (alarm) */ + { 21, 0, 0, 2 }, /* RTC PRI (period) */ + { 22, 0, 0, 2 }, /* RTC CUI (carry) */ + { 23, 1, 4, 3 }, /* SCI ERI */ + { 24, 1, 4, 3 }, /* SCI RXI */ + { 25, 1, 4, 3 }, /* SCI TXI */ + { 40, 2, 4, 3 }, /* SCIF ERI */ + { 41, 2, 4, 3 }, /* SCIF RXI */ + { 42, 2, 4, 3 }, /* SCIF BRI */ + { 43, 2, 4, 3 }, /* SCIF TXI */ + { 34, 2, 8, 7 }, /* DMAC DMTE0 */ + { 35, 2, 8, 7 }, /* DMAC DMTE1 */ + { 36, 2, 8, 7 }, /* DMAC DMTE2 */ + { 37, 2, 8, 7 }, /* DMAC DMTE3 */ + { 28, 2, 8, 7 }, /* DMAC DMAE */ +}; + +static struct ipr_data sh7751_ipr_map[] = { + { 44, 2, 8, 7 }, /* DMAC DMTE4 */ + { 45, 2, 8, 7 }, /* DMAC DMTE5 */ + { 46, 2, 8, 7 }, /* DMAC DMTE6 */ + { 47, 2, 8, 7 }, /* DMAC DMTE7 */ + /* The following use INTC_INPRI00 for masking, which is a 32-bit + register, not a 16-bit register like the IPRx registers, so it + would need special support */ + /*{ 72, INTPRI00, 8, ? },*/ /* TMU3 TUNI */ + /*{ 76, INTPRI00, 12, ? },*/ /* TMU4 TUNI */ +}; + +static unsigned long ipr_offsets[] = { + 0xffd00004UL, /* 0: IPRA */ + 0xffd00008UL, /* 1: IPRB */ + 0xffd0000cUL, /* 2: IPRC */ + 0xffd00010UL, /* 3: IPRD */ +}; + +/* given the IPR index return the address of the IPR register */ +unsigned int map_ipridx_to_addr(int idx) +{ + if (idx >= ARRAY_SIZE(ipr_offsets)) + return 0; + return ipr_offsets[idx]; +} + +#define INTC_ICR 0xffd00000UL +#define INTC_ICR_IRLM (1<<7) + +/* enable individual interrupt mode for external interupts */ +void ipr_irq_enable_irlm(void) +{ + ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); +} + +void __init init_IRQ_ipr() +{ + make_ipr_irq(sh7750_ipr_map, ARRAY_SIZE(sh7750_ipr_map)); +#ifdef CONFIG_CPU_SUBTYPE_SH7751 + make_ipr_irq(sh7751_ipr_map, ARRAY_SIZE(sh7751_ipr_map)); +#endif +} diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 7bcc73f9b8d..d7fff752e56 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -19,7 +19,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> -#include <asm/io.h> +#include <linux/io.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/cpu/sq.h> @@ -38,7 +38,7 @@ struct sq_mapping { static struct sq_mapping *sq_mapping_list; static DEFINE_SPINLOCK(sq_mapping_lock); -static kmem_cache_t *sq_cache; +static struct kmem_cache *sq_cache; static unsigned long *sq_bitmap; #define store_queue_barrier() \ @@ -67,6 +67,7 @@ void sq_flush_range(unsigned long start, unsigned int len) /* Wait for completion */ store_queue_barrier(); } +EXPORT_SYMBOL(sq_flush_range); static inline void sq_mapping_list_add(struct sq_mapping *map) { @@ -110,8 +111,9 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags) vma->phys_addr = map->addr; - if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr, - map->size, flags)) { + if (ioremap_page_range((unsigned long)vma->addr, + (unsigned long)vma->addr + map->size, + vma->phys_addr, __pgprot(flags))) { vunmap(vma->addr); return -EAGAIN; } @@ -166,7 +168,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, map->size = size; map->name = name; - page = bitmap_find_free_region(sq_bitmap, 0x04000000, + page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT, get_order(map->size)); if (unlikely(page < 0)) { ret = -ENOSPC; @@ -175,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); - ret = __sq_remap(map, flags); + ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags); if (unlikely(ret != 0)) goto out; @@ -193,6 +195,7 @@ out: kmem_cache_free(sq_cache, map); return ret; } +EXPORT_SYMBOL(sq_remap); /** * sq_unmap - Unmap a Store Queue allocation @@ -234,6 +237,7 @@ void sq_unmap(unsigned long vaddr) kmem_cache_free(sq_cache, map); } +EXPORT_SYMBOL(sq_unmap); /* * Needlessly complex sysfs interface. Unfortunately it doesn't seem like @@ -402,7 +406,3 @@ module_exit(sq_api_exit); MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); MODULE_LICENSE("GPL"); - -EXPORT_SYMBOL(sq_remap); -EXPORT_SYMBOL(sq_unmap); -EXPORT_SYMBOL(sq_flush_range); diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile new file mode 100644 index 00000000000..a8f493f2f21 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the Linux/SuperH SH-4 backends. +# + +# CPU subtype setup +obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o +obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o +obj-$(CONFIG_CPU_SUBTYPE_SH73180) += setup-sh73180.o +obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o +obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o + +# Primary on-chip clocks (common) +clock-$(CONFIG_CPU_SUBTYPE_SH73180) := clock-sh73180.o +clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o +clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o +clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o +clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7343.o + +obj-y += $(clock-y) diff --git a/arch/sh/kernel/cpu/sh4/clock-sh73180.c b/arch/sh/kernel/cpu/sh4a/clock-sh73180.c index 2fa5cb2ae68..2fa5cb2ae68 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh73180.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh73180.c diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c new file mode 100644 index 00000000000..1707a213f0c --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c @@ -0,0 +1,99 @@ +/* + * arch/sh/kernel/cpu/sh4/clock-sh7343.c + * + * SH7343/SH7722 support for the clock framework + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <asm/clock.h> +#include <asm/freq.h> + +/* + * SH7343/SH7722 uses a common set of multipliers and divisors, so this + * is quite simple.. + */ +static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; +static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; + +#define pll_calc() (((ctrl_inl(FRQCR) >> 24) & 0x1f) + 1) + +static void master_clk_init(struct clk *clk) +{ + clk->parent = clk_get(NULL, "cpu_clk"); +} + +static void master_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inl(FRQCR) & 0x000f); + clk->rate *= clk->parent->rate * multipliers[idx] / divisors[idx]; +} + +static struct clk_ops sh7343_master_clk_ops = { + .init = master_clk_init, + .recalc = master_clk_recalc, +}; + +static void module_clk_init(struct clk *clk) +{ + clk->parent = NULL; + clk->rate = CONFIG_SH_PCLK_FREQ; +} + +static struct clk_ops sh7343_module_clk_ops = { + .init = module_clk_init, +}; + +static void bus_clk_init(struct clk *clk) +{ + clk->parent = clk_get(NULL, "cpu_clk"); +} + +static void bus_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inl(FRQCR) >> 8) & 0x000f; + clk->rate = clk->parent->rate * multipliers[idx] / divisors[idx]; +} + +static struct clk_ops sh7343_bus_clk_ops = { + .init = bus_clk_init, + .recalc = bus_clk_recalc, +}; + +static void cpu_clk_init(struct clk *clk) +{ + clk->parent = clk_get(NULL, "module_clk"); + clk->flags |= CLK_RATE_PROPAGATES; + clk_set_rate(clk, clk_get_rate(clk)); +} + +static void cpu_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inl(FRQCR) >> 20) & 0x000f; + clk->rate = clk->parent->rate * pll_calc() * + multipliers[idx] / divisors[idx]; +} + +static struct clk_ops sh7343_cpu_clk_ops = { + .init = cpu_clk_init, + .recalc = cpu_clk_recalc, +}; + +static struct clk_ops *sh7343_clk_ops[] = { + &sh7343_master_clk_ops, + &sh7343_module_clk_ops, + &sh7343_bus_clk_ops, + &sh7343_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7343_clk_ops)) + *ops = sh7343_clk_ops[idx]; +} diff --git a/arch/sh/kernel/cpu/sh4/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c index c8694bac647..c8694bac647 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c diff --git a/arch/sh/kernel/cpu/sh4/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c index 93ad367342c..9e6a216750c 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c @@ -98,7 +98,7 @@ static struct clk *sh7780_onchip_clocks[] = { static int __init sh7780_clk_init(void) { - struct clk *clk = clk_get("master_clk"); + struct clk *clk = clk_get(NULL, "master_clk"); int i; for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) { diff --git a/arch/sh/kernel/cpu/sh4/setup-sh73180.c b/arch/sh/kernel/cpu/sh4a/setup-sh73180.c index cc9ea1e2e5d..cc9ea1e2e5d 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh73180.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh73180.c diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c index 91d61cf91ba..91d61cf91ba 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c new file mode 100644 index 00000000000..1143fbf65fa --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -0,0 +1,80 @@ +/* + * SH7722 Setup + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <asm/sci.h> + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xffe00000, + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 80, 81, 83, 82 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7722_devices[] __initdata = { + &sci_device, +}; + +static int __init sh7722_devices_setup(void) +{ + return platform_add_devices(sh7722_devices, + ARRAY_SIZE(sh7722_devices)); +} +__initcall(sh7722_devices_setup); + +static struct ipr_data sh7722_ipr_map[] = { + /* IRQ, IPR-idx, shift, prio */ + { 16, 0, 12, 2 }, /* TMU0 */ + { 17, 0, 8, 2 }, /* TMU1 */ + { 80, 6, 12, 3 }, /* SCIF ERI */ + { 81, 6, 12, 3 }, /* SCIF RXI */ + { 82, 6, 12, 3 }, /* SCIF BRI */ + { 83, 6, 12, 3 }, /* SCIF TXI */ +}; + +static unsigned long ipr_offsets[] = { + 0xa4080000, /* 0: IPRA */ + 0xa4080004, /* 1: IPRB */ + 0xa4080008, /* 2: IPRC */ + 0xa408000c, /* 3: IPRD */ + 0xa4080010, /* 4: IPRE */ + 0xa4080014, /* 5: IPRF */ + 0xa4080018, /* 6: IPRG */ + 0xa408001c, /* 7: IPRH */ + 0xa4080020, /* 8: IPRI */ + 0xa4080024, /* 9: IPRJ */ + 0xa4080028, /* 10: IPRK */ + 0xa408002c, /* 11: IPRL */ +}; + +unsigned int map_ipridx_to_addr(int idx) +{ + if (unlikely(idx >= ARRAY_SIZE(ipr_offsets))) + return 0; + return ipr_offsets[idx]; +} + +void __init init_IRQ_ipr(void) +{ + make_ipr_irq(sh7722_ipr_map, ARRAY_SIZE(sh7722_ipr_map)); +} diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c index 6a04cc5f5ac..6a04cc5f5ac 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index 814ddb22653..9aeaa2ddaa2 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c @@ -79,25 +79,27 @@ static int __init sh7780_devices_setup(void) __initcall(sh7780_devices_setup); static struct intc2_data intc2_irq_table[] = { - { TIMER_IRQ, 0, 24, 0, INTC_TMU0_MSK, 2 }, - { 21, 1, 0, 0, INTC_RTC_MSK, TIMER_PRIORITY }, - { 22, 1, 1, 0, INTC_RTC_MSK, TIMER_PRIORITY }, - { 23, 1, 2, 0, INTC_RTC_MSK, TIMER_PRIORITY }, - { SCIF0_ERI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, - { SCIF0_RXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, - { SCIF0_BRI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, - { SCIF0_TXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, + { 28, 0, 24, 0, 0, 2 }, /* TMU0 */ - { SCIF1_ERI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, - { SCIF1_RXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, - { SCIF1_BRI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, - { SCIF1_TXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, + { 21, 1, 0, 0, 2, 2 }, + { 22, 1, 1, 0, 2, 2 }, + { 23, 1, 2, 0, 2, 2 }, - { PCIC0_IRQ, 0x10, 8, 0, INTC_PCIC0_MSK, PCIC0_PRIORITY }, - { PCIC1_IRQ, 0x10, 0, 0, INTC_PCIC1_MSK, PCIC1_PRIORITY }, - { PCIC2_IRQ, 0x14, 24, 0, INTC_PCIC2_MSK, PCIC2_PRIORITY }, - { PCIC3_IRQ, 0x14, 16, 0, INTC_PCIC3_MSK, PCIC3_PRIORITY }, - { PCIC4_IRQ, 0x14, 8, 0, INTC_PCIC4_MSK, PCIC4_PRIORITY }, + { 40, 8, 24, 0, 3, 3 }, /* SCIF0 ERI */ + { 41, 8, 24, 0, 3, 3 }, /* SCIF0 RXI */ + { 42, 8, 24, 0, 3, 3 }, /* SCIF0 BRI */ + { 43, 8, 24, 0, 3, 3 }, /* SCIF0 TXI */ + + { 76, 8, 16, 0, 4, 3 }, /* SCIF1 ERI */ + { 77, 8, 16, 0, 4, 3 }, /* SCIF1 RXI */ + { 78, 8, 16, 0, 4, 3 }, /* SCIF1 BRI */ + { 79, 8, 16, 0, 4, 3 }, /* SCIF1 TXI */ + + { 64, 0x10, 8, 0, 14, 2 }, /* PCIC0 */ + { 65, 0x10, 0, 0, 15, 2 }, /* PCIC1 */ + { 66, 0x14, 24, 0, 16, 2 }, /* PCIC2 */ + { 67, 0x14, 16, 0, 17, 2 }, /* PCIC3 */ + { 68, 0x14, 8, 0, 18, 2 }, /* PCIC4 */ }; void __init init_IRQ_intc2(void) diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index a00022722e9..560b91cdd15 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c @@ -12,7 +12,7 @@ #include <linux/console.h> #include <linux/tty.h> #include <linux/init.h> -#include <asm/io.h> +#include <linux/io.h> #ifdef CONFIG_SH_STANDARD_BIOS #include <asm/sh_bios.h> @@ -62,17 +62,9 @@ static struct console bios_console = { #include <linux/serial_core.h> #include "../../../drivers/serial/sh-sci.h" -#ifdef CONFIG_CPU_SH4 -#define SCIF_REG 0xffe80000 -#elif defined(CONFIG_CPU_SUBTYPE_SH72060) -#define SCIF_REG 0xfffe9800 -#else -#error "Undefined SCIF for this subtype" -#endif - static struct uart_port scif_port = { - .mapbase = SCIF_REG, - .membase = (char __iomem *)SCIF_REG, + .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT, + .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT, }; static void scif_sercon_putc(int c) @@ -113,23 +105,29 @@ static struct console scif_console = { .index = -1, }; +#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS) +/* + * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4 + * devices that aren't using sh-ipl+g. + */ static void scif_sercon_init(int baud) { - ctrl_outw(0, SCIF_REG + 8); - ctrl_outw(0, SCIF_REG); + ctrl_outw(0, scif_port.mapbase + 8); + ctrl_outw(0, scif_port.mapbase); /* Set baud rate */ ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) / - (32 * baud) - 1, SCIF_REG + 4); - - ctrl_outw(12, SCIF_REG + 24); - ctrl_outw(8, SCIF_REG + 24); - ctrl_outw(0, SCIF_REG + 32); - ctrl_outw(0x60, SCIF_REG + 16); - ctrl_outw(0, SCIF_REG + 36); - ctrl_outw(0x30, SCIF_REG + 8); + (32 * baud) - 1, scif_port.mapbase + 4); + + ctrl_outw(12, scif_port.mapbase + 24); + ctrl_outw(8, scif_port.mapbase + 24); + ctrl_outw(0, scif_port.mapbase + 32); + ctrl_outw(0x60, scif_port.mapbase + 16); + ctrl_outw(0, scif_port.mapbase + 36); + ctrl_outw(0x30, scif_port.mapbase + 8); } -#endif +#endif /* CONFIG_CPU_SH4 && !CONFIG_SH_STANDARD_BIOS */ +#endif /* CONFIG_EARLY_SCIF_CONSOLE */ /* * Setup a default console, if more than one is compiled in, rely on the @@ -146,16 +144,16 @@ static struct console *early_console = ; static int __initdata keep_early; +static int early_console_initialized; -int __init setup_early_printk(char *opt) +int __init setup_early_printk(char *buf) { - char *space; - char buf[256]; + if (!buf) + return 0; - strlcpy(buf, opt, sizeof(buf)); - space = strchr(buf, ' '); - if (space) - *space = 0; + if (early_console_initialized) + return 0; + early_console_initialized = 1; if (strstr(buf, "keep")) keep_early = 1; @@ -168,7 +166,7 @@ int __init setup_early_printk(char *opt) if (!strncmp(buf, "serial", 6)) { early_console = &scif_console; -#ifdef CONFIG_CPU_SH4 +#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS) scif_sercon_init(115200); #endif } @@ -177,12 +175,14 @@ int __init setup_early_printk(char *opt) if (likely(early_console)) register_console(early_console); - return 1; + return 0; } -__setup("earlyprintk=", setup_early_printk); +early_param("earlyprintk", setup_early_printk); void __init disable_early_printk(void) { + if (!early_console_initialized || !early_console) + return; if (!keep_early) { printk("disabling early console\n"); unregister_console(early_console); diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S new file mode 100644 index 00000000000..fc279aeb73a --- /dev/null +++ b/arch/sh/kernel/entry-common.S @@ -0,0 +1,444 @@ +/* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $ + * + * linux/arch/sh/entry.S + * + * Copyright (C) 1999, 2000, 2002 Niibe Yutaka + * Copyright (C) 2003 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ + +! NOTE: +! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address +! to be jumped is too far, but it causes illegal slot exception. + +/* + * entry.S contains the system-call and fault low-level handling routines. + * This also contains the timer-interrupt handler, as well as all interrupts + * and faults that can result in a task-switch. + * + * NOTE: This code handles signal-recognition, which happens every time + * after a timer-interrupt and after each system call. + * + * NOTE: This code uses a convention that instructions in the delay slot + * of a transfer-control instruction are indented by an extra space, thus: + * + * jmp @k0 ! control-transfer instruction + * ldc k1, ssr ! delay slot + * + * Stack layout in 'ret_from_syscall': + * ptrace needs to have all regs on the stack. + * if the order here is changed, it needs to be + * updated in ptrace.c and ptrace.h + * + * r0 + * ... + * r15 = stack pointer + * spc + * pr + * ssr + * gbr + * mach + * macl + * syscall # + * + */ + +#if defined(CONFIG_PREEMPT) +# define preempt_stop() cli +#else +# define preempt_stop() +# define resume_kernel __restore_all +#endif + +#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) +! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present. +! If both are configured, handle the debug traps (breakpoints) in SW, +! but still allow BIOS traps to FW. + + .align 2 +debug_kernel: +#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB) + /* Force BIOS call to FW (debug_trap put TRA in r8) */ + mov r8,r0 + shlr2 r0 + cmp/eq #0x3f,r0 + bt debug_kernel_fw +#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */ + +debug_enter: +#if defined(CONFIG_SH_KGDB) + /* Jump to kgdb, pass stacked regs as arg */ +debug_kernel_sw: + mov.l 3f, r0 + jmp @r0 + mov r15, r4 + .align 2 +3: .long kgdb_handle_exception +#endif /* CONFIG_SH_KGDB */ +#ifdef CONFIG_SH_STANDARD_BIOS + bra debug_kernel_fw + nop +#endif +#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ + + .align 2 +debug_trap: +#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) + mov r8, r0 + shlr2 r0 + cmp/eq #0x3f, r0 ! sh_bios() trap + bf 1f +#ifdef CONFIG_SH_KGDB + cmp/eq #0xff, r0 ! XXX: KGDB trap, fix for SH-2. + bf 1f +#endif + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + shll r0 + shll r0 ! kernel space? + bt/s debug_kernel +1: +#endif + mov.l @r15, r0 ! Restore R0 value + mov.l 1f, r8 + jmp @r8 + nop + + .align 2 +ENTRY(exception_error) + ! +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 3f, r0 + jsr @r0 + nop +#endif + sti + mov.l 2f, r0 + jmp @r0 + nop + +! + .align 2 +1: .long break_point_trap_software +2: .long do_exception_error +#ifdef CONFIG_TRACE_IRQFLAGS +3: .long trace_hardirqs_on +#endif + + .align 2 +ret_from_exception: + preempt_stop() +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 4f, r0 + jsr @r0 + nop +#endif +ENTRY(ret_from_irq) + ! + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + shll r0 + shll r0 ! kernel space? + get_current_thread_info r8, r0 + bt resume_kernel ! Yes, it's from kernel, go back soon + +#ifdef CONFIG_PREEMPT + bra resume_userspace + nop +ENTRY(resume_kernel) + mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count + tst r0, r0 + bf noresched +need_resched: + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #_TIF_NEED_RESCHED, r0 ! need_resched set? + bt noresched + + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + and #0xf0, r0 ! interrupts off (exception path)? + cmp/eq #0xf0, r0 + bt noresched + + mov.l 1f, r0 + mov.l r0, @(TI_PRE_COUNT,r8) + +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 3f, r0 + jsr @r0 + nop +#endif + sti + mov.l 2f, r0 + jsr @r0 + nop + mov #0, r0 + mov.l r0, @(TI_PRE_COUNT,r8) + cli +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 4f, r0 + jsr @r0 + nop +#endif + + bra need_resched + nop + +noresched: + bra __restore_all + nop + + .align 2 +1: .long PREEMPT_ACTIVE +2: .long schedule +#ifdef CONFIG_TRACE_IRQFLAGS +3: .long trace_hardirqs_on +4: .long trace_hardirqs_off +#endif +#endif + +ENTRY(resume_userspace) + ! r8: current_thread_info + cli +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 5f, r0 + jsr @r0 + nop +#endif + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #_TIF_WORK_MASK, r0 + bt/s __restore_all + tst #_TIF_NEED_RESCHED, r0 + + .align 2 +work_pending: + ! r0: current_thread_info->flags + ! r8: current_thread_info + ! t: result of "tst #_TIF_NEED_RESCHED, r0" + bf/s work_resched + tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 +work_notifysig: + bt/s __restore_all + mov r15, r4 + mov r12, r5 ! set arg1(save_r0) + mov r0, r6 + mov.l 2f, r1 + mov.l 3f, r0 + jmp @r1 + lds r0, pr +work_resched: +#ifndef CONFIG_PREEMPT + ! gUSA handling + mov.l @(OFF_SP,r15), r0 ! get user space stack pointer + mov r0, r1 + shll r0 + bf/s 1f + shll r0 + bf/s 1f + mov #OFF_PC, r0 + ! SP >= 0xc0000000 : gUSA mark + mov.l @(r0,r15), r2 ! get user space PC (program counter) + mov.l @(OFF_R0,r15), r3 ! end point + cmp/hs r3, r2 ! r2 >= r3? + bt 1f + add r3, r1 ! rewind point #2 + mov.l r1, @(r0,r15) ! reset PC to rewind point #2 + ! +1: +#endif + mov.l 1f, r1 + jsr @r1 ! schedule + nop + cli +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 5f, r0 + jsr @r0 + nop +#endif + ! + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #_TIF_WORK_MASK, r0 + bt __restore_all + bra work_pending + tst #_TIF_NEED_RESCHED, r0 + + .align 2 +1: .long schedule +2: .long do_notify_resume +3: .long restore_all +#ifdef CONFIG_TRACE_IRQFLAGS +4: .long trace_hardirqs_on +5: .long trace_hardirqs_off +#endif + + .align 2 +syscall_exit_work: + ! r0: current_thread_info->flags + ! r8: current_thread_info + tst #_TIF_SYSCALL_TRACE, r0 + bt/s work_pending + tst #_TIF_NEED_RESCHED, r0 +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 5f, r0 + jsr @r0 + nop +#endif + sti + ! XXX setup arguments... + mov.l 4f, r0 ! do_syscall_trace + jsr @r0 + nop + bra resume_userspace + nop + + .align 2 +syscall_trace_entry: + ! Yes it is traced. + ! XXX setup arguments... + mov.l 4f, r11 ! Call do_syscall_trace which notifies + jsr @r11 ! superior (will chomp R[0-7]) + nop + ! Reload R0-R4 from kernel stack, where the + ! parent may have modified them using + ! ptrace(POKEUSR). (Note that R0-R2 are + ! used by the system call handler directly + ! from the kernel stack anyway, so don't need + ! to be reloaded here.) This allows the parent + ! to rewrite system calls and args on the fly. + mov.l @(OFF_R4,r15), r4 ! arg0 + mov.l @(OFF_R5,r15), r5 + mov.l @(OFF_R6,r15), r6 + mov.l @(OFF_R7,r15), r7 ! arg3 + mov.l @(OFF_R3,r15), r3 ! syscall_nr + ! + mov.l 2f, r10 ! Number of syscalls + cmp/hs r10, r3 + bf syscall_call + mov #-ENOSYS, r0 + bra syscall_exit + mov.l r0, @(OFF_R0,r15) ! Return value + +__restore_all: + mov.l 1f, r0 + jmp @r0 + nop + + .align 2 +1: .long restore_all + + .align 2 +not_syscall_tra: + bra debug_trap + nop + + .align 2 +syscall_badsys: ! Bad syscall number + mov #-ENOSYS, r0 + bra resume_userspace + mov.l r0, @(OFF_R0,r15) ! Return value + + +/* + * Syscall interface: + * + * Syscall #: R3 + * Arguments #0 to #3: R4--R7 + * Arguments #4 to #6: R0, R1, R2 + * TRA: (number of arguments + 0x10) x 4 + * + * This code also handles delegating other traps to the BIOS/gdb stub + * according to: + * + * Trap number + * (TRA>>2) Purpose + * -------- ------- + * 0x0-0xf old syscall ABI + * 0x10-0x1f new syscall ABI + * 0x20-0xff delegated through debug_trap to BIOS/gdb stub. + * + * Note: When we're first called, the TRA value must be shifted + * right 2 bits in order to get the value that was used as the "trapa" + * argument. + */ + + .align 2 + .globl ret_from_fork +ret_from_fork: + mov.l 1f, r8 + jsr @r8 + mov r0, r4 + bra syscall_exit + nop + .align 2 +1: .long schedule_tail + ! +ENTRY(system_call) +#if !defined(CONFIG_CPU_SH2) + mov.l 1f, r9 + mov.l @r9, r8 ! Read from TRA (Trap Address) Register +#endif + ! + ! Is the trap argument >= 0x20? (TRA will be >= 0x80) + mov #0x7f, r9 + cmp/hi r9, r8 + bt/s not_syscall_tra + mov #OFF_TRA, r9 + add r15, r9 + mov.l r8, @r9 ! set TRA value to tra +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 5f, r10 + jsr @r10 + nop +#endif + sti + + ! + get_current_thread_info r8, r10 + mov.l @(TI_FLAGS,r8), r8 + mov #_TIF_SYSCALL_TRACE, r10 + tst r10, r8 + bf syscall_trace_entry + ! + mov.l 2f, r8 ! Number of syscalls + cmp/hs r8, r3 + bt syscall_badsys + ! +syscall_call: + shll2 r3 ! x4 + mov.l 3f, r8 ! Load the address of sys_call_table + add r8, r3 + mov.l @r3, r8 + jsr @r8 ! jump to specific syscall handler + nop + mov.l @(OFF_R0,r15), r12 ! save r0 + mov.l r0, @(OFF_R0,r15) ! save the return value + ! +syscall_exit: + cli +#ifdef CONFIG_TRACE_IRQFLAGS + mov.l 6f, r0 + jsr @r0 + nop +#endif + ! + get_current_thread_info r8, r0 + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #_TIF_ALLWORK_MASK, r0 + bf syscall_exit_work + bra __restore_all + nop + .align 2 +#if !defined(CONFIG_CPU_SH2) +1: .long TRA +#endif +2: .long NR_syscalls +3: .long sys_call_table +4: .long do_syscall_trace +#ifdef CONFIG_TRACE_IRQFLAGS +5: .long trace_hardirqs_on +6: .long trace_hardirqs_off +#endif diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S index f5f53d14f24..71a3ad7d283 100644 --- a/arch/sh/kernel/head.S +++ b/arch/sh/kernel/head.S @@ -33,7 +33,8 @@ ENTRY(empty_zero_page) .long 0x00360000 /* INITRD_START */ .long 0x000a0000 /* INITRD_SIZE */ .long 0 - .balign 4096,0,4096 +1: + .skip PAGE_SIZE - empty_zero_page - 1b .text /* @@ -53,8 +54,10 @@ ENTRY(_stext) ldc r0, sr ! Initialize global interrupt mask mov #0, r0 +#ifdef CONFIG_CPU_HAS_SR_RB ldc r0, r6_bank - +#endif + /* * Prefetch if possible to reduce cache miss penalty. * @@ -68,11 +71,14 @@ ENTRY(_stext) ! mov.l 2f, r0 mov r0, r15 ! Set initial r15 (stack pointer) - mov #(THREAD_SIZE >> 8), r1 + mov #(THREAD_SIZE >> 10), r1 shll8 r1 ! r1 = THREAD_SIZE + shll2 r1 sub r1, r0 ! +#ifdef CONFIG_CPU_HAS_SR_RB ldc r0, r7_bank ! ... and initial thread_info - +#endif + ! Clear BSS area mov.l 3f, r1 add #4, r1 @@ -95,7 +101,11 @@ ENTRY(_stext) nop .balign 4 +#if defined(CONFIG_CPU_SH2) +1: .long 0x000000F0 ! IMASK=0xF +#else 1: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF +#endif 2: .long init_thread_union+THREAD_SIZE 3: .long __bss_start 4: .long _end diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 944128ce970..67be2b6e8cd 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -12,7 +12,7 @@ #include <linux/kernel_stat.h> #include <linux/seq_file.h> #include <linux/io.h> -#include <asm/irq.h> +#include <linux/irq.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/thread_info.h> @@ -78,15 +78,16 @@ union irq_ctx { u32 stack[THREAD_SIZE/sizeof(u32)]; }; -static union irq_ctx *hardirq_ctx[NR_CPUS]; -static union irq_ctx *softirq_ctx[NR_CPUS]; +static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; +static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; #endif asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { - struct pt_regs *old_regs = set_irq_regs(®s); + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); + struct pt_regs *old_regs = set_irq_regs(regs); int irq; #ifdef CONFIG_4KSTACKS union irq_ctx *curctx, *irqctx; @@ -111,7 +112,7 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, #endif #ifdef CONFIG_CPU_HAS_INTEVT - irq = (ctrl_inl(INTEVT) >> 5) - 16; + irq = evt2irq(ctrl_inl(INTEVT)); #else irq = r4; #endif @@ -135,17 +136,24 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, irqctx->tinfo.task = curctx->tinfo.task; irqctx->tinfo.previous_sp = current_stack_pointer; + /* + * Copy the softirq bits in preempt_count so that the + * softirq checks work in the hardirq context. + */ + irqctx->tinfo.preempt_count = + (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | + (curctx->tinfo.preempt_count & SOFTIRQ_MASK); + __asm__ __volatile__ ( "mov %0, r4 \n" - "mov r15, r9 \n" + "mov r15, r8 \n" "jsr @%1 \n" /* swith to the irq stack */ " mov %2, r15 \n" /* restore the stack (ring zero) */ - "mov r9, r15 \n" + "mov r8, r15 \n" : /* no outputs */ : "r" (irq), "r" (generic_handle_irq), "r" (isp) - /* XXX: A somewhat excessive clobber list? -PFM */ : "memory", "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "t", "pr" ); @@ -193,7 +201,7 @@ void irq_ctx_init(int cpu) irqctx->tinfo.task = NULL; irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.cpu = cpu; - irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET; + irqctx->tinfo.preempt_count = 0; irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); softirq_ctx[cpu] = irqctx; @@ -239,13 +247,38 @@ asmlinkage void do_softirq(void) "mov r9, r15 \n" : /* no outputs */ : "r" (__do_softirq), "r" (isp) - /* XXX: A somewhat excessive clobber list? -PFM */ : "memory", "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" ); + + /* + * Shouldnt happen, we returned above if in_interrupt(): + */ + WARN_ON_ONCE(softirq_count()); } local_irq_restore(flags); } EXPORT_SYMBOL(do_softirq); #endif + +void __init init_IRQ(void) +{ +#ifdef CONFIG_CPU_HAS_PINT_IRQ + init_IRQ_pint(); +#endif + +#ifdef CONFIG_CPU_HAS_INTC2_IRQ + init_IRQ_intc2(); +#endif + +#ifdef CONFIG_CPU_HAS_IPR_IRQ + init_IRQ_ipr(); +#endif + + /* Perform the machine specific initialisation */ + if (sh_mv.mv_init_irq) + sh_mv.mv_init_irq(); + + irq_ctx_init(smp_processor_id()); +} diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index a52b13ac6b7..486c06e1803 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -385,10 +385,11 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne asmlinkage int sys_fork(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); #ifdef CONFIG_MMU - return do_fork(SIGCHLD, regs.regs[15], ®s, 0, NULL, NULL); + return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL); #else /* fork almost works, enough to trick you into looking elsewhere :-( */ return -EINVAL; @@ -398,11 +399,12 @@ asmlinkage int sys_fork(unsigned long r4, unsigned long r5, asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, unsigned long parent_tidptr, unsigned long child_tidptr, - struct pt_regs regs) + struct pt_regs __regs) { + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); if (!newsp) - newsp = regs.regs[15]; - return do_fork(clone_flags, newsp, ®s, 0, + newsp = regs->regs[15]; + return do_fork(clone_flags, newsp, regs, 0, (int __user *)parent_tidptr, (int __user *)child_tidptr); } @@ -418,9 +420,10 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, */ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], ®s, + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs, 0, NULL, NULL); } @@ -429,8 +432,9 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, */ asmlinkage int sys_execve(char *ufilename, char **uargv, char **uenvp, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); int error; char *filename; @@ -442,7 +446,7 @@ asmlinkage int sys_execve(char *ufilename, char **uargv, error = do_execve(filename, (char __user * __user *)uargv, (char __user * __user *)uenvp, - ®s); + regs); if (error == 0) { task_lock(current); current->ptrace &= ~PT_DTRACE; @@ -466,15 +470,14 @@ unsigned long get_wchan(struct task_struct *p) */ pc = thread_saved_pc(p); if (in_sched_functions(pc)) { - schedule_frame = ((unsigned long *)(long)p->thread.sp)[1]; - return (unsigned long)((unsigned long *)schedule_frame)[1]; + schedule_frame = (unsigned long)p->thread.sp; + return ((unsigned long *)schedule_frame)[21]; } + return pc; } -asmlinkage void break_point_trap(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, - struct pt_regs regs) +asmlinkage void break_point_trap(void) { /* Clear tracing. */ #if defined(CONFIG_CPU_SH4A) @@ -492,8 +495,20 @@ asmlinkage void break_point_trap(unsigned long r4, unsigned long r5, asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { - regs.pc -= 2; + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); + + /* Rewind */ + regs->pc -= 2; + +#ifdef CONFIG_BUG + if (__kernel_text_address(instruction_pointer(regs))) { + u16 insn = *(u16 *)instruction_pointer(regs); + if (insn == TRAPA_BUG_OPCODE) + handle_BUG(regs); + } +#endif + force_sig(SIGTRAP, current); } diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S index 8221b37c977..c66cb3209db 100644 --- a/arch/sh/kernel/relocate_kernel.S +++ b/arch/sh/kernel/relocate_kernel.S @@ -7,11 +7,9 @@ * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ - #include <linux/linkage.h> - -#define PAGE_SIZE 4096 /* must be same value as in <asm/page.h> */ - +#include <asm/addrspace.h> +#include <asm/page.h> .globl relocate_new_kernel relocate_new_kernel: @@ -20,8 +18,8 @@ relocate_new_kernel: /* r6 = start_address */ /* r7 = vbr_reg */ - mov.l 10f,r8 /* 4096 */ - mov.l 11f,r9 /* 0xa0000000 */ + mov.l 10f,r8 /* PAGE_SIZE */ + mov.l 11f,r9 /* P2SEG */ /* stack setting */ add r8,r5 @@ -32,7 +30,7 @@ relocate_new_kernel: 0: mov.l @r4+,r0 /* cmd = *ind++ */ -1: /* addr = (cmd | 0xa0000000) & 0xfffffff0 */ +1: /* addr = (cmd | P2SEG) & 0xfffffff0 */ mov r0,r2 or r9,r2 mov #-16,r1 @@ -92,7 +90,7 @@ relocate_new_kernel: 10: .long PAGE_SIZE 11: - .long 0xa0000000 + .long P2SEG relocate_new_kernel_end: diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 36d86f9ac38..225f9ea5cdd 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -84,8 +84,7 @@ unsigned long memory_start, memory_end; static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE], struct sh_machine_vector** mvp, - unsigned long *mv_io_base, - int *mv_mmio_enable) + unsigned long *mv_io_base) { char c = ' ', *to = command_line, *from = COMMAND_LINE; int len = 0; @@ -112,23 +111,6 @@ static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE], } } -#ifdef CONFIG_EARLY_PRINTK - if (c == ' ' && !memcmp(from, "earlyprintk=", 12)) { - char *ep_end; - - if (to != command_line) - to--; - - from += 12; - ep_end = strchr(from, ' '); - - setup_early_printk(from); - printk("early console enabled\n"); - - from = ep_end; - } -#endif - if (c == ' ' && !memcmp(from, "sh_mv=", 6)) { char* mv_end; char* mv_comma; @@ -145,7 +127,6 @@ static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE], int ints[3]; get_options(mv_comma+1, ARRAY_SIZE(ints), ints); *mv_io_base = ints[1]; - *mv_mmio_enable = ints[2]; mv_len = mv_comma - from; } else { mv_len = mv_end - from; @@ -158,6 +139,7 @@ static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE], *mvp = get_mv_byname(mv_name); } + c = *(from++); if (!c) break; @@ -177,9 +159,8 @@ static int __init sh_mv_setup(char **cmdline_p) struct sh_machine_vector *mv = NULL; char mv_name[MV_NAME_SIZE] = ""; unsigned long mv_io_base = 0; - int mv_mmio_enable = 0; - parse_cmdline(cmdline_p, mv_name, &mv, &mv_io_base, &mv_mmio_enable); + parse_cmdline(cmdline_p, mv_name, &mv, &mv_io_base); #ifdef CONFIG_SH_UNKNOWN if (mv == NULL) { @@ -258,6 +239,7 @@ void __init setup_arch(char **cmdline_p) sh_mv_setup(cmdline_p); + /* * Find the highest page frame number we have available */ @@ -305,6 +287,7 @@ void __init setup_arch(char **cmdline_p) PFN_PHYS(pages)); } + /* * Reserve the kernel text and * Reserve the bootmem bitmap. We do this in two steps (first step @@ -325,15 +308,18 @@ void __init setup_arch(char **cmdline_p) ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); if (&__rd_start != &__rd_end) { LOADER_TYPE = 1; - INITRD_START = PHYSADDR((unsigned long)&__rd_start) - __MEMORY_START; - INITRD_SIZE = (unsigned long)&__rd_end - (unsigned long)&__rd_start; + INITRD_START = PHYSADDR((unsigned long)&__rd_start) - + __MEMORY_START; + INITRD_SIZE = (unsigned long)&__rd_end - + (unsigned long)&__rd_start; } if (LOADER_TYPE && INITRD_START) { if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { - reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE); - initrd_start = - INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0; + reserve_bootmem_node(NODE_DATA(0), INITRD_START + + __MEMORY_START, INITRD_SIZE); + initrd_start = INITRD_START + PAGE_OFFSET + + __MEMORY_START; initrd_end = initrd_start + INITRD_SIZE; } else { printk("initrd extends beyond end of memory " @@ -392,6 +378,7 @@ static int __init topology_init(void) subsys_initcall(topology_init); static const char *cpu_name[] = { + [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619", [CPU_SH7604] = "SH7604", [CPU_SH7300] = "SH7300", [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706", [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708", @@ -404,6 +391,7 @@ static const char *cpu_name[] = { [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501", [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343", + [CPU_SH7785] = "SH7785", [CPU_SH7722] = "SH7722", [CPU_SH_NONE] = "Unknown" }; diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c index 9daad70bc30..e6106239a0f 100644 --- a/arch/sh/kernel/sh_ksyms.c +++ b/arch/sh/kernel/sh_ksyms.c @@ -18,7 +18,6 @@ #include <asm/delay.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> -#include <asm/checksum.h> extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); extern struct hw_interrupt_type no_irq_type; @@ -71,15 +70,26 @@ DECLARE_EXPORT(__sdivsi3); DECLARE_EXPORT(__ashrdi3); DECLARE_EXPORT(__ashldi3); DECLARE_EXPORT(__lshrdi3); -DECLARE_EXPORT(__movstr); DECLARE_EXPORT(__movstrSI16); - -EXPORT_SYMBOL(strcpy); +#if __GNUC__ == 4 +DECLARE_EXPORT(__movmem); +#else +DECLARE_EXPORT(__movstr); +#endif #ifdef CONFIG_CPU_SH4 +#if __GNUC__ == 4 +DECLARE_EXPORT(__movmem_i4_even); +DECLARE_EXPORT(__movmem_i4_odd); +DECLARE_EXPORT(__movmemSI12_i4); +DECLARE_EXPORT(__sdivsi3_i4i); +DECLARE_EXPORT(__udiv_qrnnd_16); +DECLARE_EXPORT(__udivsi3_i4i); +#else /* GCC 3.x */ DECLARE_EXPORT(__movstr_i4_even); DECLARE_EXPORT(__movstr_i4_odd); DECLARE_EXPORT(__movstrSI12_i4); +#endif /* __GNUC__ == 4 */ #endif #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) @@ -102,10 +112,6 @@ EXPORT_SYMBOL(__down_trylock); EXPORT_SYMBOL(synchronize_irq); #endif -#ifdef CONFIG_PM -EXPORT_SYMBOL(pm_suspend); -#endif - EXPORT_SYMBOL(csum_partial); #ifdef CONFIG_IPV6 EXPORT_SYMBOL(csum_ipv6_magic); diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c index 5213f5bc6ce..379c88bf5d9 100644 --- a/arch/sh/kernel/signal.c +++ b/arch/sh/kernel/signal.c @@ -23,6 +23,7 @@ #include <linux/elf.h> #include <linux/personality.h> #include <linux/binfmts.h> +#include <linux/freezer.h> #include <asm/ucontext.h> #include <asm/uaccess.h> @@ -37,7 +38,7 @@ asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { mask &= _BLOCKABLE; spin_lock_irq(¤t->sighand->siglock); @@ -52,7 +53,7 @@ sys_sigsuspend(old_sigset_t mask, return -ERESTARTNOHAND; } -asmlinkage int +asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { @@ -87,9 +88,11 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { - return do_sigaltstack(uss, uoss, regs.regs[15]); + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); + + return do_sigaltstack(uss, uoss, regs->regs[15]); } @@ -98,7 +101,11 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, */ #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ -#define TRAP16 0xc310 /* Syscall w/no args (NR in R3) */ +#if defined(CONFIG_CPU_SH2) +#define TRAP_NOARG 0xc320 /* Syscall w/no args (NR in R3) */ +#else +#define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) */ +#endif #define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */ struct sigframe @@ -194,9 +201,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { - struct sigframe __user *frame = (struct sigframe __user *)regs.regs[15]; + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); + struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15]; sigset_t set; int r0; @@ -216,7 +224,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext(®s, &frame->sc, &r0)) + if (restore_sigcontext(regs, &frame->sc, &r0)) goto badframe; return r0; @@ -227,9 +235,10 @@ badframe: asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { - struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.regs[15]; + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); + struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15]; sigset_t set; stack_t st; int r0; @@ -246,14 +255,14 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &r0)) + if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) goto badframe; if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) goto badframe; /* It is more difficult to avoid calling this function than to call it and ignore errors. */ - do_sigaltstack(&st, NULL, regs.regs[15]); + do_sigaltstack(&st, NULL, regs->regs[15]); return r0; @@ -350,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, } else { /* Generate return code (system call to sigreturn) */ err |= __put_user(MOVW(7), &frame->retcode[0]); - err |= __put_user(TRAP16, &frame->retcode[1]); + err |= __put_user(TRAP_NOARG, &frame->retcode[1]); err |= __put_user(OR_R0_R0, &frame->retcode[2]); err |= __put_user(OR_R0_R0, &frame->retcode[3]); err |= __put_user(OR_R0_R0, &frame->retcode[4]); @@ -430,7 +439,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, } else { /* Generate return code (system call to rt_sigreturn) */ err |= __put_user(MOVW(7), &frame->retcode[0]); - err |= __put_user(TRAP16, &frame->retcode[1]); + err |= __put_user(TRAP_NOARG, &frame->retcode[1]); err |= __put_user(OR_R0_R0, &frame->retcode[2]); err |= __put_user(OR_R0_R0, &frame->retcode[3]); err |= __put_user(OR_R0_R0, &frame->retcode[4]); diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c new file mode 100644 index 00000000000..0d5268afe80 --- /dev/null +++ b/arch/sh/kernel/stacktrace.c @@ -0,0 +1,43 @@ +/* + * arch/sh/kernel/stacktrace.c + * + * Stack trace management functions + * + * Copyright (C) 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/sched.h> +#include <linux/stacktrace.h> +#include <linux/thread_info.h> +#include <asm/ptrace.h> + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +void save_stack_trace(struct stack_trace *trace, struct task_struct *task) +{ + unsigned long *sp; + + if (!task) + task = current; + if (task == current) + sp = (unsigned long *)current_stack_pointer; + else + sp = (unsigned long *)task->thread.sp; + + while (!kstack_end(sp)) { + unsigned long addr = *sp++; + + if (__kernel_text_address(addr)) { + if (trace->skip > 0) + trace->skip--; + else + trace->entries[trace->nr_entries++] = addr; + if (trace->nr_entries >= trace->max_entries) + break; + } + } +} diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 8fde95001c3..e18f183e103 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -33,14 +33,15 @@ */ asmlinkage int sys_pipe(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); int fd[2]; int error; error = do_pipe(fd); if (!error) { - regs.regs[1] = fd[1]; + regs->regs[1] = fd[1]; return fd[0]; } return error; @@ -50,6 +51,7 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); +#ifdef CONFIG_MMU /* * To avoid cache aliases, we map the shared page with same color. */ @@ -135,6 +137,7 @@ full_search: addr = COLOUR_ALIGN(addr, pgoff); } } +#endif /* CONFIG_MMU */ static inline long do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, @@ -311,6 +314,12 @@ asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1, #endif } +#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A) +#define SYSCALL_ARG3 "trapa #0x23" +#else +#define SYSCALL_ARG3 "trapa #0x13" +#endif + /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. @@ -321,7 +330,7 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]) register long __sc4 __asm__ ("r4") = (long) filename; register long __sc5 __asm__ ("r5") = (long) argv; register long __sc6 __asm__ ("r6") = (long) envp; - __asm__ __volatile__ ("trapa #0x13" : "=z" (__sc0) + __asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0) : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) : "memory"); return __sc0; diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 57e708d7b52..c206c9504c4 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -13,6 +13,8 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/profile.h> +#include <linux/timex.h> +#include <linux/sched.h> #include <asm/clock.h> #include <asm/rtc.h> #include <asm/timer.h> @@ -50,15 +52,20 @@ unsigned long long __attribute__ ((weak)) sched_clock(void) #ifndef CONFIG_GENERIC_TIME void do_gettimeofday(struct timeval *tv) { + unsigned long flags; unsigned long seq; unsigned long usec, sec; do { - seq = read_seqbegin(&xtime_lock); + /* + * Turn off IRQs when grabbing xtime_lock, so that + * the sys_timer get_offset code doesn't have to handle it. + */ + seq = read_seqbegin_irqsave(&xtime_lock, flags); usec = get_timer_offset(); sec = xtime.tv_sec; - usec += xtime.tv_nsec / 1000; - } while (read_seqretry(&xtime_lock, seq)); + usec += xtime.tv_nsec / NSEC_PER_USEC; + } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); while (usec >= 1000000) { usec -= 1000000; @@ -85,7 +92,7 @@ int do_settimeofday(struct timespec *tv) * wall time. Discover what correction gettimeofday() would have * made, and then undo it! */ - nsec -= 1000 * get_timer_offset(); + nsec -= get_timer_offset() * NSEC_PER_USEC; wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); @@ -169,6 +176,108 @@ static struct sysdev_class timer_sysclass = { .resume = timer_resume, }; +#ifdef CONFIG_NO_IDLE_HZ +static int timer_dyn_tick_enable(void) +{ + struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; + unsigned long flags; + int ret = -ENODEV; + + if (dyn_tick) { + spin_lock_irqsave(&dyn_tick->lock, flags); + ret = 0; + if (!(dyn_tick->state & DYN_TICK_ENABLED)) { + ret = dyn_tick->enable(); + + if (ret == 0) + dyn_tick->state |= DYN_TICK_ENABLED; + } + spin_unlock_irqrestore(&dyn_tick->lock, flags); + } + + return ret; +} + +static int timer_dyn_tick_disable(void) +{ + struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; + unsigned long flags; + int ret = -ENODEV; + + if (dyn_tick) { + spin_lock_irqsave(&dyn_tick->lock, flags); + ret = 0; + if (dyn_tick->state & DYN_TICK_ENABLED) { + ret = dyn_tick->disable(); + + if (ret == 0) + dyn_tick->state &= ~DYN_TICK_ENABLED; + } + spin_unlock_irqrestore(&dyn_tick->lock, flags); + } + + return ret; +} + +/* + * Reprogram the system timer for at least the calculated time interval. + * This function should be called from the idle thread with IRQs disabled, + * immediately before sleeping. + */ +void timer_dyn_reprogram(void) +{ + struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; + unsigned long next, seq, flags; + + if (!dyn_tick) + return; + + spin_lock_irqsave(&dyn_tick->lock, flags); + if (dyn_tick->state & DYN_TICK_ENABLED) { + next = next_timer_interrupt(); + do { + seq = read_seqbegin(&xtime_lock); + dyn_tick->reprogram(next - jiffies); + } while (read_seqretry(&xtime_lock, seq)); + } + spin_unlock_irqrestore(&dyn_tick->lock, flags); +} + +static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) +{ + return sprintf(buf, "%i\n", + (sys_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1); +} + +static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf, + size_t count) +{ + unsigned int enable = simple_strtoul(buf, NULL, 2); + + if (enable) + timer_dyn_tick_enable(); + else + timer_dyn_tick_disable(); + + return count; +} +static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick); + +/* + * dyntick=enable|disable + */ +static char dyntick_str[4] __initdata = ""; + +static int __init dyntick_setup(char *str) +{ + if (str) + strlcpy(dyntick_str, str, sizeof(dyntick_str)); + return 1; +} + +__setup("dyntick=", dyntick_setup); +#endif + static int __init timer_init_sysfs(void) { int ret = sysdev_class_register(&timer_sysclass); @@ -176,7 +285,22 @@ static int __init timer_init_sysfs(void) return ret; sys_timer->dev.cls = &timer_sysclass; - return sysdev_register(&sys_timer->dev); + ret = sysdev_register(&sys_timer->dev); + +#ifdef CONFIG_NO_IDLE_HZ + if (ret == 0 && sys_timer->dyn_tick) { + ret = sysdev_create_file(&sys_timer->dev, &attr_dyn_tick); + + /* + * Turn on dynamic tick after calibrate delay + * for correct bogomips + */ + if (ret == 0 && dyntick_str[0] == 'e') + ret = timer_dyn_tick_enable(); + } +#endif + + return ret; } device_initcall(timer_init_sysfs); @@ -200,6 +324,11 @@ void __init time_init(void) sys_timer = get_sys_timer(); printk(KERN_INFO "Using %s for system timer\n", sys_timer->name); +#ifdef CONFIG_NO_IDLE_HZ + if (sys_timer->dyn_tick) + spin_lock_init(&sys_timer->dyn_tick->lock); +#endif + #if defined(CONFIG_SH_KGDB) /* * Set up kgdb as requested. We do it here because the serial diff --git a/arch/sh/kernel/timers/Makefile b/arch/sh/kernel/timers/Makefile index 151a6a304ce..bcf244ff6a1 100644 --- a/arch/sh/kernel/timers/Makefile +++ b/arch/sh/kernel/timers/Makefile @@ -5,4 +5,6 @@ obj-y := timer.o obj-$(CONFIG_SH_TMU) += timer-tmu.o +obj-$(CONFIG_SH_MTU2) += timer-mtu2.o +obj-$(CONFIG_SH_CMT) += timer-cmt.o diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c new file mode 100644 index 00000000000..a574b93a4e7 --- /dev/null +++ b/arch/sh/kernel/timers/timer-cmt.c @@ -0,0 +1,196 @@ +/* + * arch/sh/kernel/timers/timer-cmt.c - CMT Timer Support + * + * Copyright (C) 2005 Yoshinori Sato + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/seqlock.h> +#include <asm/timer.h> +#include <asm/rtc.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/clock.h> + +#if defined(CONFIG_CPU_SUBTYPE_SH7619) +#define CMT_CMSTR 0xf84a0070 +#define CMT_CMCSR_0 0xf84a0072 +#define CMT_CMCNT_0 0xf84a0074 +#define CMT_CMCOR_0 0xf84a0076 +#define CMT_CMCSR_1 0xf84a0078 +#define CMT_CMCNT_1 0xf84a007a +#define CMT_CMCOR_1 0xf84a007c + +#define STBCR3 0xf80a0000 +#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0) +#define CMT_CMCSR_INIT 0x0040 +#define CMT_CMCSR_CALIB 0x0000 +#elif defined(CONFIG_CPU_SUBTYPE_SH7206) +#define CMT_CMSTR 0xfffec000 +#define CMT_CMCSR_0 0xfffec002 +#define CMT_CMCNT_0 0xfffec004 +#define CMT_CMCOR_0 0xfffec006 + +#define STBCR4 0xfffe040c +#define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR4) & ~0x04, STBCR4); } while(0) +#define CMT_CMCSR_INIT 0x0040 +#define CMT_CMCSR_CALIB 0x0000 +#else +#error "Unknown CPU SUBTYPE" +#endif + +static unsigned long cmt_timer_get_offset(void) +{ + int count; + static unsigned short count_p = 0xffff; /* for the first call after boot */ + static unsigned long jiffies_p = 0; + + /* + * cache volatile jiffies temporarily; we have IRQs turned off. + */ + unsigned long jiffies_t; + + /* timer count may underflow right here */ + count = ctrl_inw(CMT_CMCOR_0); + count -= ctrl_inw(CMT_CMCNT_0); + + jiffies_t = jiffies; + + /* + * avoiding timer inconsistencies (they are rare, but they happen)... + * there is one kind of problem that must be avoided here: + * 1. the timer counter underflows + */ + + if (jiffies_t == jiffies_p) { + if (count > count_p) { + /* the nutcase */ + if (ctrl_inw(CMT_CMCSR_0) & 0x80) { /* Check CMF bit */ + count -= LATCH; + } else { + printk("%s (): hardware timer problem?\n", + __FUNCTION__); + } + } + } else + jiffies_p = jiffies_t; + + count_p = count; + + count = ((LATCH-1) - count) * TICK_SIZE; + count = (count + LATCH/2) / LATCH; + + return count; +} + +static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id) +{ + unsigned long timer_status; + + /* Clear CMF bit */ + timer_status = ctrl_inw(CMT_CMCSR_0); + timer_status &= ~0x80; + ctrl_outw(timer_status, CMT_CMCSR_0); + + /* + * Here we are in the timer irq handler. We just have irqs locally + * disabled but we don't know if the timer_bh is running on the other + * CPU. We need to avoid to SMP race with it. NOTE: we don' t need + * the irq version of write_lock because as just said we have irq + * locally disabled. -arca + */ + write_seqlock(&xtime_lock); + handle_timer_tick(); + write_sequnlock(&xtime_lock); + + return IRQ_HANDLED; +} + +static struct irqaction cmt_irq = { + .name = "timer", + .handler = cmt_timer_interrupt, + .flags = IRQF_DISABLED | IRQF_TIMER, + .mask = CPU_MASK_NONE, +}; + +static void cmt_clk_init(struct clk *clk) +{ + u8 divisor = CMT_CMCSR_INIT & 0x3; + ctrl_inw(CMT_CMCSR_0); + ctrl_outw(CMT_CMCSR_INIT, CMT_CMCSR_0); + clk->parent = clk_get(NULL, "module_clk"); + clk->rate = clk->parent->rate / (8 << (divisor << 1)); +} + +static void cmt_clk_recalc(struct clk *clk) +{ + u8 divisor = ctrl_inw(CMT_CMCSR_0) & 0x3; + clk->rate = clk->parent->rate / (8 << (divisor << 1)); +} + +static struct clk_ops cmt_clk_ops = { + .init = cmt_clk_init, + .recalc = cmt_clk_recalc, +}; + +static struct clk cmt0_clk = { + .name = "cmt0_clk", + .ops = &cmt_clk_ops, +}; + +static int cmt_timer_start(void) +{ + ctrl_outw(ctrl_inw(CMT_CMSTR) | 0x01, CMT_CMSTR); + return 0; +} + +static int cmt_timer_stop(void) +{ + ctrl_outw(ctrl_inw(CMT_CMSTR) & ~0x01, CMT_CMSTR); + return 0; +} + +static int cmt_timer_init(void) +{ + unsigned long interval; + + cmt_clock_enable(); + + setup_irq(CONFIG_SH_TIMER_IRQ, &cmt_irq); + + cmt0_clk.parent = clk_get(NULL, "module_clk"); + + cmt_timer_stop(); + + interval = cmt0_clk.parent->rate / 8 / HZ; + printk(KERN_INFO "Interval = %ld\n", interval); + + ctrl_outw(interval, CMT_CMCOR_0); + + clk_register(&cmt0_clk); + clk_enable(&cmt0_clk); + + cmt_timer_start(); + + return 0; +} + +struct sys_timer_ops cmt_timer_ops = { + .init = cmt_timer_init, + .start = cmt_timer_start, + .stop = cmt_timer_stop, +#ifndef CONFIG_GENERIC_TIME + .get_offset = cmt_timer_get_offset, +#endif +}; + +struct sys_timer cmt_timer = { + .name = "cmt", + .ops = &cmt_timer_ops, +}; diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c new file mode 100644 index 00000000000..fffcd1c0987 --- /dev/null +++ b/arch/sh/kernel/timers/timer-mtu2.c @@ -0,0 +1,200 @@ +/* + * arch/sh/kernel/timers/timer-mtu2.c - MTU2 Timer Support + * + * Copyright (C) 2005 Paul Mundt + * + * Based off of arch/sh/kernel/timers/timer-tmu.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/seqlock.h> +#include <asm/timer.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/clock.h> + +/* + * We use channel 1 for our lowly system timer. Channel 2 would be the other + * likely candidate, but we leave it alone as it has higher divisors that + * would be of more use to other more interesting applications. + * + * TODO: Presently we only implement a 16-bit single-channel system timer. + * However, we can implement channel cascade if we go the overflow route and + * get away with using 2 MTU2 channels as a 32-bit timer. + */ +#define MTU2_TSTR 0xfffe4280 +#define MTU2_TCR_1 0xfffe4380 +#define MTU2_TMDR_1 0xfffe4381 +#define MTU2_TIOR_1 0xfffe4382 +#define MTU2_TIER_1 0xfffe4384 +#define MTU2_TSR_1 0xfffe4385 +#define MTU2_TCNT_1 0xfffe4386 /* 16-bit counter */ +#define MTU2_TGRA_1 0xfffe438a + +#define STBCR3 0xfffe0408 + +#define MTU2_TSTR_CST1 (1 << 1) /* Counter Start 1 */ + +#define MTU2_TSR_TGFA (1 << 0) /* GRA compare match */ + +#define MTU2_TIER_TGIEA (1 << 0) /* GRA compare match interrupt enable */ + +#define MTU2_TCR_INIT 0x22 + +#define MTU2_TCR_CALIB 0x00 + +static unsigned long mtu2_timer_get_offset(void) +{ + int count; + static int count_p = 0x7fff; /* for the first call after boot */ + static unsigned long jiffies_p = 0; + + /* + * cache volatile jiffies temporarily; we have IRQs turned off. + */ + unsigned long jiffies_t; + + /* timer count may underflow right here */ + count = ctrl_inw(MTU2_TCNT_1); /* read the latched count */ + + jiffies_t = jiffies; + + /* + * avoiding timer inconsistencies (they are rare, but they happen)... + * there is one kind of problem that must be avoided here: + * 1. the timer counter underflows + */ + + if (jiffies_t == jiffies_p) { + if (count > count_p) { + if (ctrl_inb(MTU2_TSR_1) & MTU2_TSR_TGFA) { + count -= LATCH; + } else { + printk("%s (): hardware timer problem?\n", + __FUNCTION__); + } + } + } else + jiffies_p = jiffies_t; + + count_p = count; + + count = ((LATCH-1) - count) * TICK_SIZE; + count = (count + LATCH/2) / LATCH; + + return count; +} + +static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id) +{ + unsigned long timer_status; + + /* Clear TGFA bit */ + timer_status = ctrl_inb(MTU2_TSR_1); + timer_status &= ~MTU2_TSR_TGFA; + ctrl_outb(timer_status, MTU2_TSR_1); + + /* Do timer tick */ + write_seqlock(&xtime_lock); + handle_timer_tick(); + write_sequnlock(&xtime_lock); + + return IRQ_HANDLED; +} + +static struct irqaction mtu2_irq = { + .name = "timer", + .handler = mtu2_timer_interrupt, + .flags = IRQF_DISABLED | IRQF_TIMER, + .mask = CPU_MASK_NONE, +}; + +static unsigned int divisors[] = { 1, 4, 16, 64, 1, 1, 256 }; + +static void mtu2_clk_init(struct clk *clk) +{ + u8 idx = MTU2_TCR_INIT & 0x7; + + clk->rate = clk->parent->rate / divisors[idx]; + /* Start TCNT counting */ + ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR); + +} + +static void mtu2_clk_recalc(struct clk *clk) +{ + u8 idx = ctrl_inb(MTU2_TCR_1) & 0x7; + clk->rate = clk->parent->rate / divisors[idx]; +} + +static struct clk_ops mtu2_clk_ops = { + .init = mtu2_clk_init, + .recalc = mtu2_clk_recalc, +}; + +static struct clk mtu2_clk1 = { + .name = "mtu2_clk1", + .ops = &mtu2_clk_ops, +}; + +static int mtu2_timer_start(void) +{ + ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR); + return 0; +} + +static int mtu2_timer_stop(void) +{ + ctrl_outb(ctrl_inb(MTU2_TSTR) & ~MTU2_TSTR_CST1, MTU2_TSTR); + return 0; +} + +static int mtu2_timer_init(void) +{ + u8 tmp; + unsigned long interval; + + setup_irq(CONFIG_SH_TIMER_IRQ, &mtu2_irq); + + mtu2_clk1.parent = clk_get(NULL, "module_clk"); + + ctrl_outb(ctrl_inb(STBCR3) & (~0x20), STBCR3); + + /* Normal operation */ + ctrl_outb(0, MTU2_TMDR_1); + ctrl_outb(MTU2_TCR_INIT, MTU2_TCR_1); + ctrl_outb(0x01, MTU2_TIOR_1); + + /* Enable underflow interrupt */ + ctrl_outb(ctrl_inb(MTU2_TIER_1) | MTU2_TIER_TGIEA, MTU2_TIER_1); + + interval = CONFIG_SH_PCLK_FREQ / 16 / HZ; + printk(KERN_INFO "Interval = %ld\n", interval); + + ctrl_outw(interval, MTU2_TGRA_1); + ctrl_outw(0, MTU2_TCNT_1); + + clk_register(&mtu2_clk1); + clk_enable(&mtu2_clk1); + + return 0; +} + +struct sys_timer_ops mtu2_timer_ops = { + .init = mtu2_timer_init, + .start = mtu2_timer_start, + .stop = mtu2_timer_stop, +#ifndef CONFIG_GENERIC_TIME + .get_offset = mtu2_timer_get_offset, +#endif +}; + +struct sys_timer mtu2_timer = { + .name = "mtu2", + .ops = &mtu2_timer_ops, +}; diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index 24927015dc3..e060e71d078 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -17,7 +17,6 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> -#include <linux/spinlock.h> #include <linux/seqlock.h> #include <asm/timer.h> #include <asm/rtc.h> @@ -31,13 +30,9 @@ #define TMU0_TCR_CALIB 0x0000 -static DEFINE_SPINLOCK(tmu0_lock); - static unsigned long tmu_timer_get_offset(void) { int count; - unsigned long flags; - static int count_p = 0x7fffffff; /* for the first call after boot */ static unsigned long jiffies_p = 0; @@ -46,7 +41,6 @@ static unsigned long tmu_timer_get_offset(void) */ unsigned long jiffies_t; - spin_lock_irqsave(&tmu0_lock, flags); /* timer count may underflow right here */ count = ctrl_inl(TMU0_TCNT); /* read the latched count */ @@ -72,7 +66,6 @@ static unsigned long tmu_timer_get_offset(void) jiffies_p = jiffies_t; count_p = count; - spin_unlock_irqrestore(&tmu0_lock, flags); count = ((LATCH-1) - count) * TICK_SIZE; count = (count + LATCH/2) / LATCH; @@ -106,7 +99,7 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) static struct irqaction tmu_irq = { .name = "timer", .handler = tmu_timer_interrupt, - .flags = IRQF_DISABLED, + .flags = IRQF_DISABLED | IRQF_TIMER, .mask = CPU_MASK_NONE, }; @@ -149,9 +142,9 @@ static int tmu_timer_init(void) { unsigned long interval; - setup_irq(TIMER_IRQ, &tmu_irq); + setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq); - tmu0_clk.parent = clk_get("module_clk"); + tmu0_clk.parent = clk_get(NULL, "module_clk"); /* Start TMU0 */ tmu_timer_stop(); diff --git a/arch/sh/kernel/timers/timer.c b/arch/sh/kernel/timers/timer.c index dc1f631053a..a6bcc913d25 100644 --- a/arch/sh/kernel/timers/timer.c +++ b/arch/sh/kernel/timers/timer.c @@ -17,6 +17,12 @@ static struct sys_timer *sys_timers[] __initdata = { #ifdef CONFIG_SH_TMU &tmu_timer, #endif +#ifdef CONFIG_SH_MTU2 + &mtu2_timer, +#endif +#ifdef CONFIG_SH_CMT + &cmt_timer, +#endif NULL, }; diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 53dfa55f315..ec110157992 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -18,13 +18,15 @@ #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/io.h> +#include <linux/debug_locks.h> +#include <linux/limits.h> #include <asm/system.h> #include <asm/uaccess.h> #ifdef CONFIG_SH_KGDB #include <asm/kgdb.h> -#define CHK_REMOTE_DEBUG(regs) \ -{ \ +#define CHK_REMOTE_DEBUG(regs) \ +{ \ if (kgdb_debug_hook && !user_mode(regs))\ (*kgdb_debug_hook)(regs); \ } @@ -33,8 +35,13 @@ #endif #ifdef CONFIG_CPU_SH2 -#define TRAP_RESERVED_INST 4 -#define TRAP_ILLEGAL_SLOT_INST 6 +# define TRAP_RESERVED_INST 4 +# define TRAP_ILLEGAL_SLOT_INST 6 +# define TRAP_ADDRESS_ERROR 9 +# ifdef CONFIG_CPU_SH2A +# define TRAP_DIVZERO_ERROR 17 +# define TRAP_DIVOVF_ERROR 18 +# endif #else #define TRAP_RESERVED_INST 12 #define TRAP_ILLEGAL_SLOT_INST 13 @@ -88,7 +95,7 @@ void die(const char * str, struct pt_regs * regs, long err) if (!user_mode(regs) || in_interrupt()) dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + - (unsigned long)task_stack_page(current)); + (unsigned long)task_stack_page(current)); bust_spinlocks(0); spin_unlock_irq(&die_lock); @@ -102,8 +109,6 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs, die(str, regs, err); } -static int handle_unaligned_notify_count = 10; - /* * try and fix up kernelspace address errors * - userspace errors just cause EFAULT to be returned, resulting in SEGV @@ -125,6 +130,40 @@ static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err) return -EFAULT; } +#ifdef CONFIG_BUG +#ifdef CONFIG_DEBUG_BUGVERBOSE +static inline void do_bug_verbose(struct pt_regs *regs) +{ + struct bug_frame f; + long len; + + if (__copy_from_user(&f, (const void __user *)regs->pc, + sizeof(struct bug_frame))) + return; + + len = __strnlen_user(f.file, PATH_MAX) - 1; + if (unlikely(len < 0 || len >= PATH_MAX)) + f.file = "<bad filename>"; + len = __strnlen_user(f.func, PATH_MAX) - 1; + if (unlikely(len < 0 || len >= PATH_MAX)) + f.func = "<bad function>"; + + printk(KERN_ALERT "kernel BUG in %s() at %s:%d!\n", + f.func, f.file, f.line); +} +#else +static inline void do_bug_verbose(struct pt_regs *regs) +{ +} +#endif /* CONFIG_DEBUG_BUGVERBOSE */ +#endif /* CONFIG_BUG */ + +void handle_BUG(struct pt_regs *regs) +{ + do_bug_verbose(regs); + die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); +} + /* * handle an instruction that does an unaligned memory access by emulating the * desired behaviour @@ -198,7 +237,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) if (copy_to_user(dst,src,4)) goto fetch_fault; ret = 0; - break; + break; case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ if (instruction & 4) @@ -222,7 +261,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) if (copy_from_user(dst,src,4)) goto fetch_fault; ret = 0; - break; + break; case 6: /* mov.[bwl] from memory, possibly with post-increment */ src = (unsigned char*) *rm; @@ -230,7 +269,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) *rm += count; dst = (unsigned char*) rn; *(unsigned long*)dst = 0; - + #ifdef __LITTLE_ENDIAN__ if (copy_from_user(dst, src, count)) goto fetch_fault; @@ -241,7 +280,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) } #else dst += 4-count; - + if (copy_from_user(dst, src, count)) goto fetch_fault; @@ -320,7 +359,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) return -EFAULT; /* kernel */ - die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0); + die("delay-slot-insn faulting in handle_unaligned_delayslot", + regs, 0); } return handle_unaligned_ins(instruction,regs); @@ -342,6 +382,13 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) +/* + * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit + * opcodes.. + */ +#ifndef CONFIG_CPU_SH2A +static int handle_unaligned_notify_count = 10; + static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) { u_int rm; @@ -354,7 +401,8 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) if (user_mode(regs) && handle_unaligned_notify_count>0) { handle_unaligned_notify_count--; - printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", + printk(KERN_NOTICE "Fixing up unaligned userspace access " + "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", current->comm,current->pid,(u16*)regs->pc,instruction); } @@ -478,32 +526,58 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) regs->pc += 2; return ret; } +#endif /* CONFIG_CPU_SH2A */ + +#ifdef CONFIG_CPU_HAS_SR_RB +#define lookup_exception_vector(x) \ + __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x))) +#else +#define lookup_exception_vector(x) \ + __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x))) +#endif /* - * Handle various address error exceptions + * Handle various address error exceptions: + * - instruction address error: + * misaligned PC + * PC >= 0x80000000 in user mode + * - data address error (read and write) + * misaligned data access + * access to >= 0x80000000 is user mode + * Unfortuntaly we can't distinguish between instruction address error + * and data address errors caused by read acceses. */ -asmlinkage void do_address_error(struct pt_regs *regs, +asmlinkage void do_address_error(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) { - unsigned long error_code; + unsigned long error_code = 0; mm_segment_t oldfs; + siginfo_t info; +#ifndef CONFIG_CPU_SH2A u16 instruction; int tmp; +#endif - asm volatile("stc r2_bank,%0": "=r" (error_code)); + /* Intentional ifdef */ +#ifdef CONFIG_CPU_HAS_SR_RB + lookup_exception_vector(error_code); +#endif oldfs = get_fs(); if (user_mode(regs)) { + int si_code = BUS_ADRERR; + local_irq_enable(); - current->thread.error_code = error_code; - current->thread.trap_no = (writeaccess) ? 8 : 7; /* bad PC is not something we can fix */ - if (regs->pc & 1) + if (regs->pc & 1) { + si_code = BUS_ADRALN; goto uspace_segv; + } +#ifndef CONFIG_CPU_SH2A set_fs(USER_DS); if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { /* Argh. Fault on the instruction itself. @@ -518,14 +592,23 @@ asmlinkage void do_address_error(struct pt_regs *regs, if (tmp==0) return; /* sorted */ +#endif - uspace_segv: - printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm); - force_sig(SIGSEGV, current); +uspace_segv: + printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " + "access (PC %lx PR %lx)\n", current->comm, regs->pc, + regs->pr); + + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = si_code; + info.si_addr = (void *) address; + force_sig_info(SIGBUS, &info, current); } else { if (regs->pc & 1) die("unaligned program counter", regs, error_code); +#ifndef CONFIG_CPU_SH2A set_fs(KERNEL_DS); if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { /* Argh. Fault on the instruction itself. @@ -537,6 +620,12 @@ asmlinkage void do_address_error(struct pt_regs *regs, handle_unaligned_access(instruction, regs); set_fs(oldfs); +#else + printk(KERN_NOTICE "Killing process \"%s\" due to unaligned " + "access\n", current->comm); + + force_sig(SIGSEGV, current); +#endif } } @@ -548,7 +637,7 @@ int is_dsp_inst(struct pt_regs *regs) { unsigned short inst; - /* + /* * Safe guard if DSP mode is already enabled or we're lacking * the DSP altogether. */ @@ -569,27 +658,49 @@ int is_dsp_inst(struct pt_regs *regs) #define is_dsp_inst(regs) (0) #endif /* CONFIG_SH_DSP */ +#ifdef CONFIG_CPU_SH2A +asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs) +{ + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); + siginfo_t info; + + switch (r4) { + case TRAP_DIVZERO_ERROR: + info.si_code = FPE_INTDIV; + break; + case TRAP_DIVOVF_ERROR: + info.si_code = FPE_INTOVF; + break; + } + + force_sig_info(SIGFPE, &info, current); +} +#endif + /* arch/sh/kernel/cpu/sh4/fpu.c */ extern int do_fpu_inst(unsigned short, struct pt_regs *); extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, - unsigned long r6, unsigned long r7, struct pt_regs regs); + unsigned long r6, unsigned long r7, struct pt_regs __regs); asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); unsigned long error_code; struct task_struct *tsk = current; #ifdef CONFIG_SH_FPU_EMU - unsigned short inst; + unsigned short inst = 0; int err; - get_user(inst, (unsigned short*)regs.pc); + get_user(inst, (unsigned short*)regs->pc); - err = do_fpu_inst(inst, ®s); + err = do_fpu_inst(inst, regs); if (!err) { - regs.pc += 2; + regs->pc += 2; return; } /* not a FPU inst. */ @@ -597,20 +708,19 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, #ifdef CONFIG_SH_DSP /* Check if it's a DSP instruction */ - if (is_dsp_inst(®s)) { + if (is_dsp_inst(regs)) { /* Enable DSP mode, and restart instruction. */ - regs.sr |= SR_DSP; + regs->sr |= SR_DSP; return; } #endif - asm volatile("stc r2_bank, %0": "=r" (error_code)); + lookup_exception_vector(error_code); + local_irq_enable(); - tsk->thread.error_code = error_code; - tsk->thread.trap_no = TRAP_RESERVED_INST; - CHK_REMOTE_DEBUG(®s); + CHK_REMOTE_DEBUG(regs); force_sig(SIGILL, tsk); - die_if_no_fixup("reserved instruction", ®s, error_code); + die_if_no_fixup("reserved instruction", regs, error_code); } #ifdef CONFIG_SH_FPU_EMU @@ -658,39 +768,41 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs) asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); unsigned long error_code; struct task_struct *tsk = current; #ifdef CONFIG_SH_FPU_EMU - unsigned short inst; + unsigned short inst = 0; - get_user(inst, (unsigned short *)regs.pc + 1); - if (!do_fpu_inst(inst, ®s)) { - get_user(inst, (unsigned short *)regs.pc); - if (!emulate_branch(inst, ®s)) + get_user(inst, (unsigned short *)regs->pc + 1); + if (!do_fpu_inst(inst, regs)) { + get_user(inst, (unsigned short *)regs->pc); + if (!emulate_branch(inst, regs)) return; /* fault in branch.*/ } /* not a FPU inst. */ #endif - asm volatile("stc r2_bank, %0": "=r" (error_code)); + lookup_exception_vector(error_code); + local_irq_enable(); - tsk->thread.error_code = error_code; - tsk->thread.trap_no = TRAP_RESERVED_INST; - CHK_REMOTE_DEBUG(®s); + CHK_REMOTE_DEBUG(regs); force_sig(SIGILL, tsk); - die_if_no_fixup("illegal slot instruction", ®s, error_code); + die_if_no_fixup("illegal slot instruction", regs, error_code); } asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, - struct pt_regs regs) + struct pt_regs __regs) { + struct pt_regs *regs = RELOC_HIDE(&__regs, 0); long ex; - asm volatile("stc r2_bank, %0" : "=r" (ex)); - die_if_kernel("exception", ®s, ex); + + lookup_exception_vector(ex); + die_if_kernel("exception", regs, ex); } #if defined(CONFIG_SH_STANDARD_BIOS) @@ -735,12 +847,16 @@ void *set_exception_table_vec(unsigned int vec, void *handler) { extern void *exception_handling_table[]; void *old_handler; - + old_handler = exception_handling_table[vec]; exception_handling_table[vec] = handler; return old_handler; } +extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs __regs); + void __init trap_init(void) { set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); @@ -759,7 +875,15 @@ void __init trap_init(void) set_exception_table_evt(0x800, do_fpu_state_restore); set_exception_table_evt(0x820, do_fpu_state_restore); #endif - + +#ifdef CONFIG_CPU_SH2 + set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler); +#endif +#ifdef CONFIG_CPU_SH2A + set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); + set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); +#endif + /* Setup VBR for boot cpu */ per_cpu_trap_init(); } @@ -784,6 +908,11 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, } printk("\n"); + + if (!tsk) + tsk = current; + + debug_show_held_locks(tsk); } void show_stack(struct task_struct *tsk, unsigned long *sp) diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 77b4026d568..f34bdcc33a7 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -51,7 +51,7 @@ SECTIONS } . = ALIGN(PAGE_SIZE); - .data.page_aligned : { *(.data.idt) } + .data.page_aligned : { *(.data.page_aligned) } . = ALIGN(32); __per_cpu_start = .; diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c index 075d6cc1a2d..deb46941f31 100644 --- a/arch/sh/kernel/vsyscall/vsyscall.c +++ b/arch/sh/kernel/vsyscall/vsyscall.c @@ -97,7 +97,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, goto up_fail; } - vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { ret = -ENOMEM; goto up_fail; |