diff options
Diffstat (limited to 'arch/sh/kernel/cpu/init.c')
| -rw-r--r-- | arch/sh/kernel/cpu/init.c | 261 |
1 files changed, 178 insertions, 83 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index 9172e97dc26..0d7360d549c 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -3,7 +3,7 @@ * * CPU init code * - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2009 Paul Mundt * Copyright (C) 2003 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public @@ -13,28 +13,43 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/log2.h> #include <asm/mmu_context.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/page.h> -#include <asm/system.h> #include <asm/cacheflush.h> #include <asm/cache.h> +#include <asm/elf.h> #include <asm/io.h> -#include <asm/ubc.h> +#include <asm/smp.h> +#include <asm/sh_bios.h> +#include <asm/setup.h> + +#ifdef CONFIG_SH_FPU +#define cpu_has_fpu 1 +#else +#define cpu_has_fpu 0 +#endif + +#ifdef CONFIG_SH_DSP +#define cpu_has_dsp 1 +#else +#define cpu_has_dsp 0 +#endif /* * Generic wrapper for command line arguments to disable on-chip * peripherals (nofpu, nodsp, and so forth). */ -#define onchip_setup(x) \ -static int x##_disabled __initdata = 0; \ - \ -static int __init x##_setup(char *opts) \ -{ \ - x##_disabled = 1; \ - return 1; \ -} \ +#define onchip_setup(x) \ +static int x##_disabled = !cpu_has_##x; \ + \ +static int x##_setup(char *opts) \ +{ \ + x##_disabled = 1; \ + return 1; \ +} \ __setup("no" __stringify(x), x##_setup); onchip_setup(fpu); @@ -44,42 +59,60 @@ onchip_setup(dsp); #define CPUOPM 0xff2f0000 #define CPUOPM_RABD (1 << 5) -static void __init speculative_execution_init(void) +static void speculative_execution_init(void) { /* Clear RABD */ - ctrl_outl(ctrl_inl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); + __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); /* Flush the update */ - (void)ctrl_inl(CPUOPM); + (void)__raw_readl(CPUOPM); ctrl_barrier(); } #else #define speculative_execution_init() do { } while (0) #endif -/* - * Generic first-level cache init - */ -static void __init cache_init(void) +#ifdef CONFIG_CPU_SH4A +#define EXPMASK 0xff2f0004 +#define EXPMASK_RTEDS (1 << 0) +#define EXPMASK_BRDSSLP (1 << 1) +#define EXPMASK_MMCAW (1 << 4) + +static void expmask_init(void) { - unsigned long ccr, flags; + unsigned long expmask = __raw_readl(EXPMASK); - /* First setup the rest of the I-cache info */ - current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr - - current_cpu_data.icache.linesz; + /* + * Future proofing. + * + * Disable support for slottable sleep instruction, non-nop + * instructions in the rte delay slot, and associative writes to + * the memory-mapped cache array. + */ + expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW); - current_cpu_data.icache.way_size = current_cpu_data.icache.sets * - current_cpu_data.icache.linesz; + __raw_writel(expmask, EXPMASK); + ctrl_barrier(); +} +#else +#define expmask_init() do { } while (0) +#endif - /* And the D-cache too */ - current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr - - current_cpu_data.dcache.linesz; +/* 2nd-level cache init */ +void __attribute__ ((weak)) l2_cache_init(void) +{ +} - current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets * - current_cpu_data.dcache.linesz; +/* + * Generic first-level cache init + */ +#ifdef CONFIG_SUPERH32 +static void cache_init(void) +{ + unsigned long ccr, flags; - jump_to_P2(); - ccr = ctrl_inl(CCR); + jump_to_uncached(); + ccr = __raw_readl(SH_CCR); /* * At this point we don't know whether the cache is enabled or not - a @@ -123,7 +156,7 @@ static void __init cache_init(void) for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) - ctrl_outl(0, addr); + __raw_writel(0, addr); addrstart += current_cpu_data.dcache.way_incr; } while (--ways); @@ -143,20 +176,61 @@ static void __init cache_init(void) flags &= ~CCR_CACHE_EMODE; #endif -#ifdef CONFIG_SH_WRITETHROUGH - /* Turn on Write-through caching */ +#if defined(CONFIG_CACHE_WRITETHROUGH) + /* Write-through */ flags |= CCR_CACHE_WT; -#else - /* .. or default to Write-back */ +#elif defined(CONFIG_CACHE_WRITEBACK) + /* Write-back */ flags |= CCR_CACHE_CB; +#else + /* Off */ + flags &= ~CCR_CACHE_ENABLE; #endif - ctrl_outl(flags, CCR); - back_to_P1(); + l2_cache_init(); + + __raw_writel(flags, SH_CCR); + back_to_cached(); +} +#else +#define cache_init() do { } while (0) +#endif + +#define CSHAPE(totalsize, linesize, assoc) \ + ((totalsize & ~0xff) | (linesize << 4) | assoc) + +#define CACHE_DESC_SHAPE(desc) \ + CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways) + +static void detect_cache_shape(void) +{ + l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache); + + if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED) + l1i_cache_shape = l1d_cache_shape; + else + l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache); + + if (current_cpu_data.flags & CPU_HAS_L2_CACHE) + l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache); + else + l2_cache_shape = -1; /* No S-cache */ +} + +static void fpu_init(void) +{ + /* Disable the FPU */ + if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { + printk("FPU Disabled\n"); + current_cpu_data.flags &= ~CPU_HAS_FPU; + } + + disable_fpu(); + clear_used_math(); } #ifdef CONFIG_SH_DSP -static void __init release_dsp(void) +static void release_dsp(void) { unsigned long sr; @@ -170,7 +244,7 @@ static void __init release_dsp(void) ); } -static void __init dsp_init(void) +static void dsp_init(void) { unsigned long sr; @@ -192,54 +266,73 @@ static void __init dsp_init(void) if (sr & SR_DSP) current_cpu_data.flags |= CPU_HAS_DSP; + /* Disable the DSP */ + if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) { + printk("DSP Disabled\n"); + current_cpu_data.flags &= ~CPU_HAS_DSP; + } + /* Now that we've determined the DSP status, clear the DSP bit. */ release_dsp(); } +#else +static inline void dsp_init(void) { } #endif /* CONFIG_SH_DSP */ /** - * sh_cpu_init + * cpu_init * - * This is our initial entry point for each CPU, and is invoked on the boot - * CPU prior to calling start_kernel(). For SMP, a combination of this and - * start_secondary() will bring up each processor to a ready state prior - * to hand forking the idle loop. + * This is our initial entry point for each CPU, and is invoked on the + * boot CPU prior to calling start_kernel(). For SMP, a combination of + * this and start_secondary() will bring up each processor to a ready + * state prior to hand forking the idle loop. * - * We do all of the basic processor init here, including setting up the - * caches, FPU, DSP, kicking the UBC, etc. By the time start_kernel() is - * hit (and subsequently platform_setup()) things like determining the - * CPU subtype and initial configuration will all be done. + * We do all of the basic processor init here, including setting up + * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and + * subsequently platform_setup()) things like determining the CPU + * subtype and initial configuration will all be done. * * Each processor family is still responsible for doing its own probing - * and cache configuration in detect_cpu_and_cache_system(). + * and cache configuration in cpu_probe(). */ -asmlinkage void __init sh_cpu_init(void) +asmlinkage void cpu_init(void) { + current_thread_info()->cpu = hard_smp_processor_id(); + /* First, probe the CPU */ - detect_cpu_and_cache_system(); + cpu_probe(); if (current_cpu_data.type == CPU_SH_NONE) panic("Unknown CPU"); + /* First setup the rest of the I-cache info */ + current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr - + current_cpu_data.icache.linesz; + + current_cpu_data.icache.way_size = current_cpu_data.icache.sets * + current_cpu_data.icache.linesz; + + /* And the D-cache too */ + current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr - + current_cpu_data.dcache.linesz; + + current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets * + current_cpu_data.dcache.linesz; + /* Init the cache */ cache_init(); - shm_align_mask = max_t(unsigned long, - current_cpu_data.dcache.way_size - 1, - PAGE_SIZE - 1); + if (raw_smp_processor_id() == 0) { + shm_align_mask = max_t(unsigned long, + current_cpu_data.dcache.way_size - 1, + PAGE_SIZE - 1); - /* Disable the FPU */ - if (fpu_disabled) { - printk("FPU Disabled\n"); - current_cpu_data.flags &= ~CPU_HAS_FPU; - disable_fpu(); + /* Boot CPU sets the cache shape */ + detect_cache_shape(); } - /* FPU initialization */ - if ((current_cpu_data.flags & CPU_HAS_FPU)) { - clear_thread_flag(TIF_USEDFPU); - clear_used_math(); - } + fpu_init(); + dsp_init(); /* * Initialize the per-CPU ASID cache very early, since the @@ -247,24 +340,26 @@ asmlinkage void __init sh_cpu_init(void) */ current_cpu_data.asid_cache = NO_CONTEXT; -#ifdef CONFIG_SH_DSP - /* Probe for DSP */ - dsp_init(); + current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32; - /* Disable the DSP */ - if (dsp_disabled) { - printk("DSP Disabled\n"); - current_cpu_data.flags &= ~CPU_HAS_DSP; - release_dsp(); - } -#endif - - /* - * Some brain-damaged loaders decided it would be a good idea to put - * the UBC to sleep. This causes some issues when it comes to things - * like PTRACE_SINGLESTEP or doing hardware watchpoints in GDB. So .. - * we wake it up and hope that all is well. - */ - ubc_wakeup(); speculative_execution_init(); + expmask_init(); + + /* Do the rest of the boot processor setup */ + if (raw_smp_processor_id() == 0) { + /* Save off the BIOS VBR, if there is one */ + sh_bios_vbr_init(); + + /* + * Setup VBR for boot CPU. Secondary CPUs do this through + * start_secondary(). + */ + per_cpu_trap_init(); + + /* + * Boot processor to setup the FP and extended state + * context info. + */ + init_thread_xstate(); + } } |
