diff options
Diffstat (limited to 'arch/metag/kernel')
| -rw-r--r-- | arch/metag/kernel/cachepart.c | 29 | ||||
| -rw-r--r-- | arch/metag/kernel/clock.c | 59 | ||||
| -rw-r--r-- | arch/metag/kernel/da.c | 2 | ||||
| -rw-r--r-- | arch/metag/kernel/devtree.c | 83 | ||||
| -rw-r--r-- | arch/metag/kernel/dma.c | 9 | ||||
| -rw-r--r-- | arch/metag/kernel/ftrace.c | 5 | ||||
| -rw-r--r-- | arch/metag/kernel/head.S | 8 | ||||
| -rw-r--r-- | arch/metag/kernel/irq.c | 75 | ||||
| -rw-r--r-- | arch/metag/kernel/kick.c | 9 | ||||
| -rw-r--r-- | arch/metag/kernel/metag_ksyms.c | 5 | ||||
| -rw-r--r-- | arch/metag/kernel/perf/perf_event.c | 82 | ||||
| -rw-r--r-- | arch/metag/kernel/process.c | 37 | ||||
| -rw-r--r-- | arch/metag/kernel/ptrace.c | 34 | ||||
| -rw-r--r-- | arch/metag/kernel/setup.c | 34 | ||||
| -rw-r--r-- | arch/metag/kernel/signal.c | 48 | ||||
| -rw-r--r-- | arch/metag/kernel/smp.c | 175 | ||||
| -rw-r--r-- | arch/metag/kernel/time.c | 14 | ||||
| -rw-r--r-- | arch/metag/kernel/topology.c | 1 | ||||
| -rw-r--r-- | arch/metag/kernel/traps.c | 18 |
19 files changed, 429 insertions, 298 deletions
diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c index 3a589dfb966..0a2385fa2a1 100644 --- a/arch/metag/kernel/cachepart.c +++ b/arch/metag/kernel/cachepart.c @@ -24,15 +24,21 @@ unsigned int get_dcache_size(void) { unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); - return 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS) - >> METAC_CORECFG2_DCSZ_S); + unsigned int sz = 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS) + >> METAC_CORECFG2_DCSZ_S); + if (config2 & METAC_CORECFG2_DCSMALL_BIT) + sz >>= 6; + return sz; } unsigned int get_icache_size(void) { unsigned int config2 = metag_in32(METAC_CORE_CONFIG2); - return 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS) - >> METAC_CORE_C2ICSZ_S); + unsigned int sz = 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS) + >> METAC_CORE_C2ICSZ_S); + if (config2 & METAC_CORECFG2_ICSMALL_BIT) + sz >>= 6; + return sz; } unsigned int get_global_dcache_size(void) @@ -61,7 +67,7 @@ static unsigned int get_thread_cache_size(unsigned int cache, int thread_id) return 0; #if PAGE_OFFSET >= LINGLOBAL_BASE /* Checking for global cache */ - cache_size = (cache == DCACHE ? get_global_dache_size() : + cache_size = (cache == DCACHE ? get_global_dcache_size() : get_global_icache_size()); offset = 8; #else @@ -94,22 +100,23 @@ void check_for_cache_aliasing(int thread_id) thread_cache_size = get_thread_cache_size(cache_type, thread_id); if (thread_cache_size < 0) - pr_emerg("Can't read %s cache size", \ + pr_emerg("Can't read %s cache size\n", cache_type ? "DCACHE" : "ICACHE"); else if (thread_cache_size == 0) /* Cache is off. No need to check for aliasing */ continue; if (thread_cache_size / CACHE_ASSOCIATIVITY > PAGE_SIZE) { - pr_emerg("Cache aliasing detected in %s on Thread %d", + pr_emerg("Potential cache aliasing detected in %s on Thread %d\n", cache_type ? "DCACHE" : "ICACHE", thread_id); - pr_warn("Total %s size: %u bytes", - cache_type ? "DCACHE" : "ICACHE ", + pr_warn("Total %s size: %u bytes\n", + cache_type ? "DCACHE" : "ICACHE", cache_type ? get_dcache_size() : get_icache_size()); - pr_warn("Thread %s size: %d bytes", + pr_warn("Thread %s size: %d bytes\n", cache_type ? "CACHE" : "ICACHE", thread_cache_size); - pr_warn("Page Size: %lu bytes", PAGE_SIZE); + pr_warn("Page Size: %lu bytes\n", PAGE_SIZE); + panic("Potential cache aliasing detected"); } } } diff --git a/arch/metag/kernel/clock.c b/arch/metag/kernel/clock.c index defc84056f1..6339c9c6d0a 100644 --- a/arch/metag/kernel/clock.c +++ b/arch/metag/kernel/clock.c @@ -8,8 +8,10 @@ * published by the Free Software Foundation. */ +#include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/of.h> #include <asm/param.h> #include <asm/clock.h> @@ -34,8 +36,63 @@ static unsigned long get_core_freq_default(void) #endif } +static struct clk *clk_core; + +/* Clk based get_core_freq callback. */ +static unsigned long get_core_freq_clk(void) +{ + return clk_get_rate(clk_core); +} + +/** + * init_metag_core_clock() - Set up core clock from devicetree. + * + * Checks to see if a "core" clock is provided in the device tree, and overrides + * the get_core_freq callback to use it. + */ +static void __init init_metag_core_clock(void) +{ + /* + * See if a core clock is provided by the devicetree (and + * registered by the init callback above). + */ + struct device_node *node; + node = of_find_compatible_node(NULL, NULL, "img,meta"); + if (!node) { + pr_warn("%s: no compatible img,meta DT node found\n", + __func__); + return; + } + + clk_core = of_clk_get_by_name(node, "core"); + if (IS_ERR(clk_core)) { + pr_warn("%s: no core clock found in DT\n", + __func__); + return; + } + + /* + * Override the core frequency callback to use + * this clk. + */ + _meta_clock.get_core_freq = get_core_freq_clk; +} + +/** + * init_metag_clocks() - Set up clocks from devicetree. + * + * Set up important clocks from device tree. In particular any needed for clock + * sources. + */ +void __init init_metag_clocks(void) +{ + init_metag_core_clock(); + + pr_info("Core clock frequency: %lu Hz\n", get_coreclock()); +} + /** - * setup_meta_clocks() - Set up the Meta clock. + * setup_meta_clocks() - Early set up of the Meta clock. * @desc: Clock descriptor usually provided by machine description * * Ensures all callbacks are valid. diff --git a/arch/metag/kernel/da.c b/arch/metag/kernel/da.c index 52aabb658fd..a35dbed6fff 100644 --- a/arch/metag/kernel/da.c +++ b/arch/metag/kernel/da.c @@ -5,12 +5,14 @@ */ +#include <linux/export.h> #include <linux/io.h> #include <linux/kernel.h> #include <asm/da.h> #include <asm/metag_mem.h> bool _metag_da_present; +EXPORT_SYMBOL_GPL(_metag_da_present); int __init metag_da_probe(void) { diff --git a/arch/metag/kernel/devtree.c b/arch/metag/kernel/devtree.c index 7cd02529636..18dd7aea9fd 100644 --- a/arch/metag/kernel/devtree.c +++ b/arch/metag/kernel/devtree.c @@ -34,6 +34,19 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return alloc_bootmem_align(size, align); } +static const void * __init arch_get_next_mach(const char *const **match) +{ + static const struct machine_desc *mdesc = __arch_info_begin; + const struct machine_desc *m = mdesc; + + if (m >= __arch_info_end) + return NULL; + + mdesc++; + *match = m->dt_compat; + return m; +} + /** * setup_machine_fdt - Machine setup when an dtb was passed to the kernel * @dt: virtual address pointer to dt blob @@ -41,74 +54,18 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) * If a dtb was passed to the kernel, then use it to choose the correct * machine_desc and to setup the system. */ -struct machine_desc * __init setup_machine_fdt(void *dt) +const struct machine_desc * __init setup_machine_fdt(void *dt) { - struct boot_param_header *devtree = dt; - struct machine_desc *mdesc, *mdesc_best = NULL; - unsigned int score, mdesc_score = ~1; - unsigned long dt_root; - const char *model; + const struct machine_desc *mdesc; /* check device tree validity */ - if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) + if (!early_init_dt_scan(dt)) return NULL; - /* Search the mdescs for the 'best' compatible value match */ - initial_boot_params = devtree; - dt_root = of_get_flat_dt_root(); - - for_each_machine_desc(mdesc) { - score = of_flat_dt_match(dt_root, mdesc->dt_compat); - if (score > 0 && score < mdesc_score) { - mdesc_best = mdesc; - mdesc_score = score; - } - } - if (!mdesc_best) { - const char *prop; - long size; - - pr_err("\nError: unrecognized/unsupported device tree compatible list:\n[ "); - - prop = of_get_flat_dt_prop(dt_root, "compatible", &size); - if (prop) { - while (size > 0) { - printk("'%s' ", prop); - size -= strlen(prop) + 1; - prop += strlen(prop) + 1; - } - } - printk("]\n\n"); - + mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach); + if (!mdesc) dump_machine_table(); /* does not return */ - } - - model = of_get_flat_dt_prop(dt_root, "model", NULL); - if (!model) - model = of_get_flat_dt_prop(dt_root, "compatible", NULL); - if (!model) - model = "<unknown>"; - pr_info("Machine: %s, model: %s\n", mdesc_best->name, model); - - /* Retrieve various information from the /chosen node */ - of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); - - return mdesc_best; -} + pr_info("Machine name: %s\n", mdesc->name); -/** - * copy_fdt - Copy device tree into non-init memory. - * - * We must copy the flattened device tree blob into non-init memory because the - * unflattened device tree will reference the strings in it directly. - */ -void __init copy_fdt(void) -{ - void *alloc = early_init_dt_alloc_memory_arch( - be32_to_cpu(initial_boot_params->totalsize), 0x40); - if (alloc) { - memcpy(alloc, initial_boot_params, - be32_to_cpu(initial_boot_params->totalsize)); - initial_boot_params = alloc; - } + return mdesc; } diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index 8c00dedadc5..c700d625067 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -305,9 +305,7 @@ void dma_free_coherent(struct device *dev, size_t size, if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - - __free_page(page); + __free_reserved_page(page); continue; } } @@ -401,11 +399,6 @@ static int __init dma_alloc_init(void) pgd = pgd_offset(&init_mm, CONSISTENT_START); pud = pud_alloc(&init_mm, pgd, CONSISTENT_START); pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START); - if (!pmd) { - pr_err("%s: no pmd tables\n", __func__); - ret = -ENOMEM; - break; - } WARN_ON(!pmd_none(*pmd)); pte = pte_alloc_kernel(pmd, CONSISTENT_START); diff --git a/arch/metag/kernel/ftrace.c b/arch/metag/kernel/ftrace.c index a774f321643..ed1d685157c 100644 --- a/arch/metag/kernel/ftrace.c +++ b/arch/metag/kernel/ftrace.c @@ -117,10 +117,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) } /* run from kstop_machine */ -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void) { - /* The return code is returned via data */ - writel(0, data); - return 0; } diff --git a/arch/metag/kernel/head.S b/arch/metag/kernel/head.S index 969dffabc03..713f71d1bdf 100644 --- a/arch/metag/kernel/head.S +++ b/arch/metag/kernel/head.S @@ -1,6 +1,7 @@ ! Copyright 2005,2006,2007,2009 Imagination Technologies #include <linux/init.h> +#include <asm/metag_mem.h> #include <generated/asm-offsets.h> #undef __exit @@ -48,6 +49,13 @@ __exit: .global _secondary_startup .type _secondary_startup,function _secondary_startup: +#if CONFIG_PAGE_OFFSET < LINGLOBAL_BASE + ! In case GCOn has just been turned on we need to fence any writes that + ! the boot thread might have performed prior to coherency taking effect. + MOVT D0Re0,#HI(LINSYSEVENT_WR_ATOMIC_UNLOCK) + MOV D1Re0,#0 + SETD [D0Re0], D1Re0 +#endif MOVT A0StP,#HI(_secondary_data_stack) ADD A0StP,A0StP,#LO(_secondary_data_stack) GETD A0StP,[A0StP] diff --git a/arch/metag/kernel/irq.c b/arch/metag/kernel/irq.c index 87707efeb0a..5385dd1216b 100644 --- a/arch/metag/kernel/irq.c +++ b/arch/metag/kernel/irq.c @@ -25,7 +25,7 @@ static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; #endif -struct irq_domain *root_domain; +static struct irq_domain *root_domain; static unsigned int startup_meta_irq(struct irq_data *data) { @@ -159,44 +159,30 @@ void irq_ctx_exit(int cpu) extern asmlinkage void __do_softirq(void); -asmlinkage void do_softirq(void) +void do_softirq_own_stack(void) { - unsigned long flags; struct thread_info *curctx; union irq_ctx *irqctx; u32 *isp; - if (in_interrupt()) - return; - - local_irq_save(flags); - - if (local_softirq_pending()) { - curctx = current_thread_info(); - irqctx = softirq_ctx[smp_processor_id()]; - irqctx->tinfo.task = curctx->task; - - /* build the stack frame on the softirq stack */ - isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info)); - - asm volatile ( - "MOV D0.5,%0\n" - "SWAP A0StP,D0.5\n" - "CALLR D1RtP,___do_softirq\n" - "MOV A0StP,D0.5\n" - : - : "r" (isp) - : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", - "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", - "D0.5" - ); - /* - * Shouldn't happen, we returned above if in_interrupt(): - */ - WARN_ON_ONCE(softirq_count()); - } - - local_irq_restore(flags); + curctx = current_thread_info(); + irqctx = softirq_ctx[smp_processor_id()]; + irqctx->tinfo.task = curctx->task; + + /* build the stack frame on the softirq stack */ + isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info)); + + asm volatile ( + "MOV D0.5,%0\n" + "SWAP A0StP,D0.5\n" + "CALLR D1RtP,___do_softirq\n" + "MOV A0StP,D0.5\n" + : + : "r" (isp) + : "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4", + "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP", + "D0.5" + ); } #endif @@ -275,17 +261,6 @@ int __init arch_probe_nr_irqs(void) } #ifdef CONFIG_HOTPLUG_CPU -static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) -{ - struct irq_desc *desc = irq_to_desc(irq); - struct irq_chip *chip = irq_data_get_irq_chip(data); - - raw_spin_lock_irq(&desc->lock); - if (chip->irq_set_affinity) - chip->irq_set_affinity(data, cpumask_of(cpu), false); - raw_spin_unlock_irq(&desc->lock); -} - /* * The CPU has been marked offline. Migrate IRQs off this CPU. If * the affinity settings do not allow other CPUs, force them onto any @@ -294,10 +269,9 @@ static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu) void migrate_irqs(void) { unsigned int i, cpu = smp_processor_id(); - struct irq_desc *desc; - for_each_irq_desc(i, desc) { - struct irq_data *data = irq_desc_get_irq_data(desc); + for_each_active_irq(i) { + struct irq_data *data = irq_get_irq_data(i); unsigned int newcpu; if (irqd_is_per_cpu(data)) @@ -313,11 +287,8 @@ void migrate_irqs(void) i, cpu); cpumask_setall(data->affinity); - newcpu = cpumask_any_and(data->affinity, - cpu_online_mask); } - - route_irq(data, i, newcpu); + irq_set_affinity(i, data->affinity); } } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/metag/kernel/kick.c b/arch/metag/kernel/kick.c index 50fcbec98cd..beb37762132 100644 --- a/arch/metag/kernel/kick.c +++ b/arch/metag/kernel/kick.c @@ -26,6 +26,8 @@ * pass it as an argument. */ #include <linux/export.h> +#include <linux/hardirq.h> +#include <linux/irq.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/types.h> @@ -66,6 +68,7 @@ EXPORT_SYMBOL(kick_unregister_func); TBIRES kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) { + struct pt_regs *old_regs; struct kick_irq_handler *kh; struct list_head *lh; int handled = 0; @@ -79,6 +82,9 @@ kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) trace_hardirqs_off(); + old_regs = set_irq_regs((struct pt_regs *)State.Sig.pCtx); + irq_enter(); + /* * There is no need to disable interrupts here because we * can't nest KICK interrupts in a KICK interrupt handler. @@ -97,5 +103,8 @@ kick_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) WARN_ON(!handled); + irq_exit(); + set_irq_regs(old_regs); + return tail_end(ret); } diff --git a/arch/metag/kernel/metag_ksyms.c b/arch/metag/kernel/metag_ksyms.c index ec872ef14eb..215c94ad63a 100644 --- a/arch/metag/kernel/metag_ksyms.c +++ b/arch/metag/kernel/metag_ksyms.c @@ -1,5 +1,7 @@ #include <linux/export.h> +#include <linux/types.h> +#include <asm/checksum.h> #include <asm/div64.h> #include <asm/ftrace.h> #include <asm/page.h> @@ -15,6 +17,9 @@ EXPORT_SYMBOL(max_pfn); EXPORT_SYMBOL(min_low_pfn); #endif +/* Network checksum functions */ +EXPORT_SYMBOL(csum_partial); + /* TBI symbols */ EXPORT_SYMBOL(__TBI); EXPORT_SYMBOL(__TBIFindSeg); diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c index a876d5ff389..5cc4d4dcf3c 100644 --- a/arch/metag/kernel/perf/perf_event.c +++ b/arch/metag/kernel/perf/perf_event.c @@ -22,9 +22,9 @@ #include <linux/slab.h> #include <asm/core_reg.h> -#include <asm/hwthread.h> #include <asm/io.h> #include <asm/irq.h> +#include <asm/processor.h> #include "perf_event.h" @@ -40,10 +40,10 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); /* PMU admin */ const char *perf_pmu_name(void) { - if (metag_pmu) - return metag_pmu->pmu.name; + if (!metag_pmu) + return NULL; - return NULL; + return metag_pmu->name; } EXPORT_SYMBOL_GPL(perf_pmu_name); @@ -171,6 +171,7 @@ static int metag_pmu_event_init(struct perf_event *event) switch (event->attr.type) { case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: + case PERF_TYPE_RAW: err = _hw_perf_event_init(event); break; @@ -211,9 +212,10 @@ again: /* * Calculate the delta and add it to the counter. */ - delta = new_raw_count - prev_raw_count; + delta = (new_raw_count - prev_raw_count) & MAX_PERIOD; local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); } int metag_pmu_event_set_period(struct perf_event *event, @@ -223,6 +225,10 @@ int metag_pmu_event_set_period(struct perf_event *event, s64 period = hwc->sample_period; int ret = 0; + /* The period may have been changed */ + if (unlikely(period != hwc->last_period)) + left += period - hwc->last_period; + if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); @@ -240,8 +246,10 @@ int metag_pmu_event_set_period(struct perf_event *event, if (left > (s64)metag_pmu->max_period) left = metag_pmu->max_period; - if (metag_pmu->write) - metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD); + if (metag_pmu->write) { + local64_set(&hwc->prev_count, -(s32)left); + metag_pmu->write(idx, -left & MAX_PERIOD); + } perf_event_update_userpage(event); @@ -549,6 +557,10 @@ static int _hw_perf_event_init(struct perf_event *event) if (err) return err; break; + + case PERF_TYPE_RAW: + mapping = attr->config; + break; } /* Return early if the event is unsupported */ @@ -610,15 +622,13 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) WARN_ONCE((config != 0x100), "invalid configuration (%d) for counter (%d)\n", config, idx); - - /* Reset the cycle count */ - __core_reg_set(TXTACTCYC, 0); + local64_set(&event->prev_count, __core_reg_get(TXTACTCYC)); goto unlock; } /* Check for a core internal or performance channel event. */ if (tmp) { - void *perf_addr = (void *)PERF_COUNT(idx); + void *perf_addr; /* * Anything other than a cycle count will write the low- @@ -632,9 +642,14 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) case 0xf0: perf_addr = (void *)PERF_CHAN(idx); break; + + default: + perf_addr = NULL; + break; } - metag_out32((tmp & 0x0f), perf_addr); + if (perf_addr) + metag_out32((config & 0x0f), perf_addr); /* * Now we use the high nibble as the performance event to @@ -643,13 +658,21 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx) config = tmp >> 4; } - /* - * Enabled counters start from 0. Early cores clear the count on - * write but newer cores don't, so we make sure that the count is - * set to 0. - */ tmp = ((config & 0xf) << 28) | - ((1 << 24) << cpu_2_hwthread_id[get_cpu()]); + ((1 << 24) << hard_processor_id()); + if (metag_pmu->max_period) + /* + * Cores supporting overflow interrupts may have had the counter + * set to a specific value that needs preserving. + */ + tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff; + else + /* + * Older cores reset the counter on write, so prev_count needs + * resetting too so we can calculate a correct delta. + */ + local64_set(&event->prev_count, 0); + metag_out32(tmp, PERF_COUNT(idx)); unlock: raw_spin_unlock_irqrestore(&events->pmu_lock, flags); @@ -693,9 +716,8 @@ static u64 metag_pmu_read_counter(int idx) { u32 tmp = 0; - /* The act of reading the cycle counter also clears it */ if (METAG_INST_COUNTER == idx) { - __core_reg_swap(TXTACTCYC, tmp); + tmp = __core_reg_get(TXTACTCYC); goto out; } @@ -764,10 +786,16 @@ static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev) /* * Enable the counter again once core overflow processing has - * completed. + * completed. Note the counter value may have been modified while it was + * inactive to set it up ready for the next interrupt. */ - if (!perf_event_overflow(event, &sampledata, regs)) + if (!perf_event_overflow(event, &sampledata, regs)) { + __global_lock2(flags); + counter = (counter & 0xff000000) | + (metag_in32(PERF_COUNT(idx)) & 0x00ffffff); metag_out32(counter, PERF_COUNT(idx)); + __global_unlock2(flags); + } return IRQ_HANDLED; } @@ -785,8 +813,8 @@ static struct metag_pmu _metag_pmu = { }; /* PMU CPU hotplug notifier */ -static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b, - unsigned long action, void *hcpu) +static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action, + void *hcpu) { unsigned int cpu = (unsigned int)hcpu; struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); @@ -800,7 +828,7 @@ static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata metag_pmu_notifier = { +static struct notifier_block metag_pmu_notifier = { .notifier_call = metag_pmu_cpu_notify, }; @@ -830,7 +858,7 @@ static int __init init_hw_perf_events(void) metag_pmu->max_period = 0; } - metag_pmu->name = "Meta 2"; + metag_pmu->name = "meta2"; metag_pmu->version = version; metag_pmu->pmu = pmu; } @@ -854,7 +882,7 @@ static int __init init_hw_perf_events(void) } register_cpu_notifier(&metag_pmu_notifier); - ret = perf_pmu_register(&pmu, (char *)metag_pmu->name, PERF_TYPE_RAW); + ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW); out: return ret; } diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c index c6efe62e5b7..483dff986a2 100644 --- a/arch/metag/kernel/process.c +++ b/arch/metag/kernel/process.c @@ -22,6 +22,7 @@ #include <linux/pm.h> #include <linux/syscalls.h> #include <linux/uaccess.h> +#include <linux/smp.h> #include <asm/core_reg.h> #include <asm/user_gateway.h> #include <asm/tcm.h> @@ -31,7 +32,7 @@ /* * Wait for the next interrupt and enable local interrupts */ -static inline void arch_idle(void) +void arch_cpu_idle(void) { int tmp; @@ -59,36 +60,12 @@ static inline void arch_idle(void) : "r" (get_trigger_mask())); } -void cpu_idle(void) -{ - set_thread_flag(TIF_POLLING_NRFLAG); - - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - - while (!need_resched()) { - /* - * We need to disable interrupts here to ensure we don't - * miss a wakeup call. - */ - local_irq_disable(); - if (!need_resched()) { #ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(smp_processor_id())) - cpu_die(); -#endif - arch_idle(); - } else { - local_irq_enable(); - } - } - - rcu_idle_exit(); - tick_nohz_idle_exit(); - schedule_preempt_disabled(); - } +void arch_cpu_idle_dead(void) +{ + cpu_die(); } +#endif void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); @@ -152,6 +129,8 @@ void show_regs(struct pt_regs *regs) "D1.7 " }; + show_regs_print_info(KERN_INFO); + pr_info(" pt_regs @ %p\n", regs); pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask); pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags, diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c index 47a8828615a..7563628822b 100644 --- a/arch/metag/kernel/ptrace.c +++ b/arch/metag/kernel/ptrace.c @@ -288,10 +288,36 @@ static int metag_rp_state_set(struct task_struct *target, return metag_rp_state_copyin(regs, pos, count, kbuf, ubuf); } +static int metag_tls_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + void __user *tls = target->thread.tls_ptr; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); +} + +static int metag_tls_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + void __user *tls; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); + if (ret) + return ret; + + target->thread.tls_ptr = tls; + return ret; +} + enum metag_regset { REGSET_GENERAL, REGSET_CBUF, REGSET_READPIPE, + REGSET_TLS, }; static const struct user_regset metag_regsets[] = { @@ -319,6 +345,14 @@ static const struct user_regset metag_regsets[] = { .get = metag_rp_state_get, .set = metag_rp_state_set, }, + [REGSET_TLS] = { + .core_note_type = NT_METAG_TLS, + .n = 1, + .size = sizeof(void *), + .align = sizeof(void *), + .get = metag_tls_get, + .set = metag_tls_set, + }, }; static const struct user_regset_view user_metag_view = { diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index 879246170ae..31cf53d0eba 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -20,6 +20,7 @@ #include <linux/memblock.h> #include <linux/mm.h> #include <linux/of_fdt.h> +#include <linux/of_platform.h> #include <linux/pfn.h> #include <linux/root_dev.h> #include <linux/sched.h> @@ -41,7 +42,6 @@ #include <asm/mmu.h> #include <asm/mmzone.h> #include <asm/processor.h> -#include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/traps.h> @@ -105,16 +105,12 @@ extern char _heap_start[]; -#ifdef CONFIG_METAG_BUILTIN_DTB -extern u32 __dtb_start[]; -#endif - #ifdef CONFIG_DA_CONSOLE /* Our early channel based console driver */ extern struct console dash_console; #endif -struct machine_desc *machine_desc __initdata; +const struct machine_desc *machine_desc __initdata; /* * Map a Linux CPU number to a hardware thread ID @@ -124,6 +120,7 @@ struct machine_desc *machine_desc __initdata; u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_HWTHREAD_ID }; +EXPORT_SYMBOL_GPL(cpu_2_hwthread_id); /* * Map a hardware thread ID to a Linux CPU number @@ -300,13 +297,9 @@ void __init setup_arch(char **cmdline_p) * rather than the version from the bootloader. This makes call * stacks easier to understand and may allow us to unmap the * bootloader at some point. - * - * We need to keep the LWK handler that TBI installed in order to - * be able to do inter-thread comms. */ for (i = 0; i <= TBID_SIGNUM_MAX; i++) - if (i != TBID_SIGNUM_LWK) - _pTBI->fnSigs[i] = __TBIUnExpXXX; + _pTBI->fnSigs[i] = __TBIUnExpXXX; /* A Meta requirement is that the kernel is loaded (virtually) * at the PAGE_OFFSET. @@ -406,9 +399,7 @@ void __init setup_arch(char **cmdline_p) cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id(); hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id(); - /* Copy device tree blob into non-init memory before unflattening */ - copy_fdt(); - unflatten_device_tree(); + unflatten_and_copy_device_tree(); #ifdef CONFIG_SMP smp_init_cpus(); @@ -423,6 +414,9 @@ static int __init customize_machine(void) /* customizes platform devices, or adds new ones */ if (machine_desc->init_machine) machine_desc->init_machine(); + else + of_platform_populate(NULL, of_default_bus_match_table, NULL, + NULL); return 0; } arch_initcall(customize_machine); @@ -586,20 +580,20 @@ PTBI pTBI_get(unsigned int cpu) EXPORT_SYMBOL(pTBI_get); #if defined(CONFIG_METAG_DSP) && defined(CONFIG_METAG_FPU) -char capabilites[] = "dsp fpu"; +static char capabilities[] = "dsp fpu"; #elif defined(CONFIG_METAG_DSP) -char capabilites[] = "dsp"; +static char capabilities[] = "dsp"; #elif defined(CONFIG_METAG_FPU) -char capabilites[] = "fpu"; +static char capabilities[] = "fpu"; #else -char capabilites[] = ""; +static char capabilities[] = ""; #endif static struct ctl_table caps_kern_table[] = { { .procname = "capabilities", - .data = capabilites, - .maxlen = sizeof(capabilites), + .data = capabilities, + .maxlen = sizeof(capabilities), .mode = 0444, .proc_handler = proc_dostring, }, diff --git a/arch/metag/kernel/signal.c b/arch/metag/kernel/signal.c index 3be61cf0b14..b9e4a82d2bd 100644 --- a/arch/metag/kernel/signal.c +++ b/arch/metag/kernel/signal.c @@ -152,18 +152,18 @@ static void __user *get_sigframe(struct k_sigaction *ka, unsigned long sp, return (void __user *)sp; } -static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs) +static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs) { struct rt_sigframe __user *frame; - int err = -EFAULT; + int err; unsigned long code; - frame = get_sigframe(ka, regs->REG_SP, sizeof(*frame)); + frame = get_sigframe(&ksig->ka, regs->REG_SP, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) - goto out; + return -EFAULT; - err = copy_siginfo_to_user(&frame->info, info); + err = copy_siginfo_to_user(&frame->info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); @@ -174,7 +174,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) - goto out; + return -EFAULT; /* Set up to return from userspace. */ @@ -187,15 +187,15 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, err |= __put_user(code, (unsigned long __user *)(&frame->retcode[1])); if (err) - goto out; + return -EFAULT; /* Set up registers for signal handler */ regs->REG_RTP = (unsigned long) frame->retcode; regs->REG_SP = (unsigned long) frame + sizeof(*frame); - regs->REG_ARG1 = sig; + regs->REG_ARG1 = ksig->sig; regs->REG_ARG2 = (unsigned long) &frame->info; regs->REG_ARG3 = (unsigned long) &frame->uc; - regs->REG_PC = (unsigned long) ka->sa.sa_handler; + regs->REG_PC = (unsigned long) ksig->ka.sa.sa_handler; pr_debug("SIG deliver (%s:%d): sp=%p pc=%08x pr=%08x\n", current->comm, current->pid, frame, regs->REG_PC, @@ -205,24 +205,19 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * effective cache flush - directed rather than 'full flush'. */ flush_cache_sigtramp(regs->REG_RTP, sizeof(frame->retcode)); -out: - if (err) { - force_sigsegv(sig, current); - return -EFAULT; - } + return 0; } -static void handle_signal(unsigned long sig, siginfo_t *info, - struct k_sigaction *ka, struct pt_regs *regs) +static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); + int ret; /* Set up the stack frame */ - if (setup_rt_frame(sig, ka, info, oldset, regs)) - return; + ret = setup_rt_frame(ksig, oldset, regs); - signal_delivered(sig, info, ka, regs, test_thread_flag(TIF_SINGLESTEP)); + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); } /* @@ -235,10 +230,8 @@ static void handle_signal(unsigned long sig, siginfo_t *info, static int do_signal(struct pt_regs *regs, int syscall) { unsigned int retval = 0, continue_addr = 0, restart_addr = 0; - struct k_sigaction ka; - siginfo_t info; - int signr; int restart = 0; + struct ksignal ksig; /* * By the end of rt_sigreturn the context describes the point that the @@ -275,7 +268,8 @@ static int do_signal(struct pt_regs *regs, int syscall) * Get the signal to deliver. When running under ptrace, at this point * the debugger may change all our registers ... */ - signr = get_signal_to_deliver(&info, &ka, regs, NULL); + get_signal(&ksig); + /* * Depending on the signal settings we may need to revert the decision * to restart the system call. But skip this if a debugger has chosen to @@ -283,19 +277,19 @@ static int do_signal(struct pt_regs *regs, int syscall) */ if (regs->REG_PC != restart_addr) restart = 0; - if (signr > 0) { + if (ksig.sig > 0) { if (unlikely(restart)) { if (retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK || (retval == -ERESTARTSYS - && !(ka.sa.sa_flags & SA_RESTART))) { + && !(ksig.ka.sa.sa_flags & SA_RESTART))) { regs->REG_RETVAL = -EINTR; regs->REG_PC = continue_addr; } } /* Whee! Actually deliver the signal. */ - handle_signal(signr, &info, &ka, regs); + handle_signal(&ksig, regs); return 0; } diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c index 4b6d1f14df3..f006d2276f4 100644 --- a/arch/metag/kernel/smp.c +++ b/arch/metag/kernel/smp.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include <linux/atomic.h> +#include <linux/completion.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> @@ -28,6 +29,8 @@ #include <asm/cachepart.h> #include <asm/core_reg.h> #include <asm/cpu.h> +#include <asm/global_lock.h> +#include <asm/metag_mem.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -37,6 +40,9 @@ #include <asm/hwthread.h> #include <asm/traps.h> +#define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n)) +#define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n)) + DECLARE_PER_CPU(PTBI, pTBI); void *secondary_data_stack; @@ -57,10 +63,12 @@ static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { static DEFINE_SPINLOCK(boot_lock); +static DECLARE_COMPLETION(cpu_running); + /* * "thread" is assumed to be a valid Meta hardware thread ID. */ -int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle) +static int boot_secondary(unsigned int thread, struct task_struct *idle) { u32 val; @@ -99,7 +107,113 @@ int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle) return 0; } -int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) +/** + * describe_cachepart_change: describe a change to cache partitions. + * @thread: Hardware thread number. + * @label: Label of cache type, e.g. "dcache" or "icache". + * @sz: Total size of the cache. + * @old: Old cache partition configuration (*CPART* register). + * @new: New cache partition configuration (*CPART* register). + * + * If the cache partition has changed, prints a message to the log describing + * those changes. + */ +static void describe_cachepart_change(unsigned int thread, const char *label, + unsigned int sz, unsigned int old, + unsigned int new) +{ + unsigned int lor1, land1, gor1, gand1; + unsigned int lor2, land2, gor2, gand2; + unsigned int diff = old ^ new; + + if (!diff) + return; + + pr_info("Thread %d: %s partition changed:", thread, label); + if (diff & (SYSC_xCPARTL_OR_BITS | SYSC_xCPARTL_AND_BITS)) { + lor1 = (old & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S; + lor2 = (new & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S; + land1 = (old & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S; + land2 = (new & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S; + pr_cont(" L:%#x+%#x->%#x+%#x", + (lor1 * sz) >> 4, + ((land1 + 1) * sz) >> 4, + (lor2 * sz) >> 4, + ((land2 + 1) * sz) >> 4); + } + if (diff & (SYSC_xCPARTG_OR_BITS | SYSC_xCPARTG_AND_BITS)) { + gor1 = (old & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S; + gor2 = (new & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S; + gand1 = (old & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S; + gand2 = (new & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S; + pr_cont(" G:%#x+%#x->%#x+%#x", + (gor1 * sz) >> 4, + ((gand1 + 1) * sz) >> 4, + (gor2 * sz) >> 4, + ((gand2 + 1) * sz) >> 4); + } + if (diff & SYSC_CWRMODE_BIT) + pr_cont(" %sWR", + (new & SYSC_CWRMODE_BIT) ? "+" : "-"); + if (diff & SYSC_DCPART_GCON_BIT) + pr_cont(" %sGCOn", + (new & SYSC_DCPART_GCON_BIT) ? "+" : "-"); + pr_cont("\n"); +} + +/** + * setup_smp_cache: ensure cache coherency for new SMP thread. + * @thread: New hardware thread number. + * + * Ensures that coherency is enabled and that the threads share the same cache + * partitions. + */ +static void setup_smp_cache(unsigned int thread) +{ + unsigned int this_thread, lflags; + unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new; + unsigned int icsz, icpart_old, icpart_new; + + /* + * Copy over the current thread's cache partition configuration to the + * new thread so that they share cache partitions. + */ + __global_lock2(lflags); + this_thread = hard_processor_id(); + /* Share dcache partition */ + dcpart_this = metag_in32(SYSC_DCPART(this_thread)); + dcpart_old = metag_in32(SYSC_DCPART(thread)); + dcpart_new = dcpart_this; +#if PAGE_OFFSET < LINGLOBAL_BASE + /* + * For the local data cache to be coherent the threads must also have + * GCOn enabled. + */ + dcpart_new |= SYSC_DCPART_GCON_BIT; + metag_out32(dcpart_new, SYSC_DCPART(this_thread)); +#endif + metag_out32(dcpart_new, SYSC_DCPART(thread)); + /* Share icache partition too */ + icpart_new = metag_in32(SYSC_ICPART(this_thread)); + icpart_old = metag_in32(SYSC_ICPART(thread)); + metag_out32(icpart_new, SYSC_ICPART(thread)); + __global_unlock2(lflags); + + /* + * Log if the cache partitions were altered so the user is aware of any + * potential unintentional cache wastage. + */ + dcsz = get_dcache_size(); + icsz = get_dcache_size(); + describe_cachepart_change(this_thread, "dcache", dcsz, + dcpart_this, dcpart_new); + describe_cachepart_change(thread, "dcache", dcsz, + dcpart_old, dcpart_new); + describe_cachepart_change(thread, "icache", icsz, + icpart_old, icpart_new); +} + +int __cpu_up(unsigned int cpu, struct task_struct *idle) { unsigned int thread = cpu_2_hwthread_id[cpu]; int ret; @@ -108,6 +222,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) flush_tlb_all(); + setup_smp_cache(thread); + /* * Tell the secondary CPU where to find its idle thread's stack. */ @@ -120,20 +236,12 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) */ ret = boot_secondary(thread, idle); if (ret == 0) { - unsigned long timeout; - /* * CPU was successfully started, wait for it * to come online or time out. */ - timeout = jiffies + HZ; - while (time_before(jiffies, timeout)) { - if (cpu_online(cpu)) - break; - - udelay(10); - barrier(); - } + wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(1000)); if (!cpu_online(cpu)) ret = -EIO; @@ -158,10 +266,9 @@ static DECLARE_COMPLETION(cpu_killed); /* * __cpu_disable runs on the processor to be shutdown. */ -int __cpuexit __cpu_disable(void) +int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); - struct task_struct *p; /* * Take this CPU offline. Once we clear this, we can't return, @@ -181,12 +288,7 @@ int __cpuexit __cpu_disable(void) flush_cache_all(); local_flush_tlb_all(); - read_lock(&tasklist_lock); - for_each_process(p) { - if (p->mm) - cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); - } - read_unlock(&tasklist_lock); + clear_tasks_mm_cpumask(cpu); return 0; } @@ -195,7 +297,7 @@ int __cpuexit __cpu_disable(void) * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ -void __cpuexit __cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1))) pr_err("CPU%u: unable to kill\n", cpu); @@ -207,7 +309,7 @@ void __cpuexit __cpu_die(unsigned int cpu) * Note that we do not return from this function. If this cpu is * brought online again it will need to run secondary_startup(). */ -void __cpuexit cpu_die(void) +void cpu_die(void) { local_irq_disable(); idle_task_exit(); @@ -222,7 +324,7 @@ void __cpuexit cpu_die(void) * Called by both boot and secondaries to move global data into * per-processor storage. */ -void __cpuinit smp_store_cpu_info(unsigned int cpuid) +void smp_store_cpu_info(unsigned int cpuid) { struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid); @@ -270,12 +372,7 @@ asmlinkage void secondary_start_kernel(void) setup_priv(); - /* - * Enable local interrupts. - */ - tbi_startup_interrupt(TBID_SIGNUM_TRT); notify_cpu_starting(cpu); - local_irq_enable(); pr_info("CPU%u (thread %u): Booted secondary processor\n", cpu, cpu_2_hwthread_id[cpu]); @@ -287,17 +384,18 @@ asmlinkage void secondary_start_kernel(void) * OK, now it's safe to let the boot CPU continue */ set_cpu_online(cpu, true); + complete(&cpu_running); /* - * Check for cache aliasing. - * Preemption is disabled + * Enable local interrupts. */ - check_for_cache_aliasing(cpu); + tbi_startup_interrupt(TBID_SIGNUM_TRT); + local_irq_enable(); /* * OK, it's off to the idle thread for us */ - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } void __init smp_cpus_done(unsigned int max_cpus) @@ -393,7 +491,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_single_ipi(int cpu) { - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } void show_ipi_list(struct seq_file *p) @@ -419,11 +517,10 @@ static DEFINE_SPINLOCK(stop_lock); * * Bit 0 - Inter-processor function call */ -static int do_IPI(struct pt_regs *regs) +static int do_IPI(void) { unsigned int cpu = smp_processor_id(); struct ipi_data *ipi = &per_cpu(ipi_data, cpu); - struct pt_regs *old_regs = set_irq_regs(regs); unsigned long msgs, nextmsg; int handled = 0; @@ -448,10 +545,6 @@ static int do_IPI(struct pt_regs *regs) generic_smp_call_function_interrupt(); break; - case IPI_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; - default: pr_crit("CPU%u: Unknown IPI message 0x%lx\n", cpu, nextmsg); @@ -459,8 +552,6 @@ static int do_IPI(struct pt_regs *regs) } } - set_irq_regs(old_regs); - return handled; } @@ -526,7 +617,7 @@ static void kick_raise_softirq(cpumask_t callmap, unsigned int irq) static TBIRES ipi_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI, int *handled) { - *handled = do_IPI((struct pt_regs *)State.Sig.pCtx); + *handled = do_IPI(); return State; } diff --git a/arch/metag/kernel/time.c b/arch/metag/kernel/time.c index 17dc10733b2..f1c8c53dace 100644 --- a/arch/metag/kernel/time.c +++ b/arch/metag/kernel/time.c @@ -5,11 +5,21 @@ * */ -#include <linux/init.h> - #include <clocksource/metag_generic.h> +#include <linux/clk-provider.h> +#include <linux/init.h> +#include <asm/clock.h> void __init time_init(void) { +#ifdef CONFIG_COMMON_CLK + /* Init clocks from device tree */ + of_clk_init(NULL); +#endif + + /* Init meta clocks, particularly the core clock */ + init_metag_clocks(); + + /* Set up the timer clock sources */ metag_generic_timer_init(); } diff --git a/arch/metag/kernel/topology.c b/arch/metag/kernel/topology.c index bec3dec4922..4ba595701f7 100644 --- a/arch/metag/kernel/topology.c +++ b/arch/metag/kernel/topology.c @@ -19,6 +19,7 @@ DEFINE_PER_CPU(struct cpuinfo_metag, cpu_data); cpumask_t cpu_core_map[NR_CPUS]; +EXPORT_SYMBOL(cpu_core_map); static cpumask_t cpu_coregroup_map(unsigned int cpu) { diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c index 8961f247b50..17b2e2e38d5 100644 --- a/arch/metag/kernel/traps.c +++ b/arch/metag/kernel/traps.c @@ -33,6 +33,7 @@ #include <asm/siginfo.h> #include <asm/traps.h> #include <asm/hwthread.h> +#include <asm/setup.h> #include <asm/switch.h> #include <asm/user_gateway.h> #include <asm/syscall.h> @@ -87,8 +88,8 @@ const char *trap_name(int trapno) static DEFINE_SPINLOCK(die_lock); -void die(const char *str, struct pt_regs *regs, long err, - unsigned long addr) +void __noreturn die(const char *str, struct pt_regs *regs, + long err, unsigned long addr) { static int die_counter; @@ -811,15 +812,14 @@ static void set_trigger_mask(unsigned int mask) } #endif -void __cpuinit per_cpu_trap_init(unsigned long cpu) +void per_cpu_trap_init(unsigned long cpu) { TBIRES int_context; unsigned int thread = cpu_2_hwthread_id[cpu]; set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */ TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */ - TBI_TRIG_BIT(TBID_SIGNUM_SW1) | - TBI_TRIG_BIT(TBID_SIGNUM_SWS)); + TBI_TRIG_BIT(TBID_SIGNUM_SW1)); /* non-priv - use current stack */ int_context.Sig.pCtx = NULL; @@ -841,7 +841,7 @@ void __init trap_init(void) _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler; _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler; _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler; - _pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_handler; + _pTBI->fnSigs[TBID_SIGNUM_LWK] = kick_handler; #ifdef CONFIG_METAG_META21 _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR; @@ -987,9 +987,3 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) show_trace(tsk, sp, NULL); } - -void dump_stack(void) -{ - show_stack(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); |
