diff options
Diffstat (limited to 'arch/arc/include')
25 files changed, 362 insertions, 169 deletions
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index d8dd660898b..e76fd79f32b 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -1,16 +1,18 @@  generic-y += auxvec.h -generic-y += bugs.h +generic-y += barrier.h  generic-y += bitsperlong.h +generic-y += bugs.h  generic-y += clkdev.h  generic-y += cputime.h  generic-y += device.h  generic-y += div64.h  generic-y += emergency-restart.h  generic-y += errno.h -generic-y += fcntl.h  generic-y += fb.h +generic-y += fcntl.h  generic-y += ftrace.h  generic-y += hardirq.h +generic-y += hash.h  generic-y += hw_irq.h  generic-y += ioctl.h  generic-y += ioctls.h @@ -20,6 +22,7 @@ generic-y += kmap_types.h  generic-y += kvm_para.h  generic-y += local.h  generic-y += local64.h +generic-y += mcs_spinlock.h  generic-y += mman.h  generic-y += msgbuf.h  generic-y += param.h @@ -28,6 +31,7 @@ generic-y += pci.h  generic-y += percpu.h  generic-y += poll.h  generic-y += posix_types.h +generic-y += preempt.h  generic-y += resource.h  generic-y += scatterlist.h  generic-y += sembuf.h diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h deleted file mode 100644 index f6cb7c4ffb3..00000000000 --- a/arch/arc/include/asm/barrier.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __ASM_BARRIER_H -#define __ASM_BARRIER_H - -#ifndef __ASSEMBLY__ - -/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */ -#define mb() __asm__ __volatile__ ("" : : : "memory") -#define rmb() mb() -#define wmb() mb() -#define set_mb(var, value)  do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) -#define read_barrier_depends()  mb() - -/* TODO-vineetg verify the correctness of macros here */ -#ifdef CONFIG_SMP -#define smp_mb()        mb() -#define smp_rmb()       rmb() -#define smp_wmb()       wmb() -#else -#define smp_mb()        barrier() -#define smp_rmb()       barrier() -#define smp_wmb()       barrier() -#endif - -#define smp_mb__before_atomic_dec()	barrier() -#define smp_mb__after_atomic_dec()	barrier() -#define smp_mb__before_atomic_inc()	barrier() -#define smp_mb__after_atomic_inc()	barrier() - -#define smp_read_barrier_depends()      do { } while (0) - -#endif - -#endif diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 647a83a8e75..ebc0cf3164d 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -19,6 +19,7 @@  #include <linux/types.h>  #include <linux/compiler.h> +#include <asm/barrier.h>  /*   * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns. @@ -496,10 +497,6 @@ static inline __attribute__ ((const)) int __ffs(unsigned long word)   */  #define ffz(x)	__ffs(~(x)) -/* TODO does this affect uni-processor code */ -#define smp_mb__before_clear_bit()  barrier() -#define smp_mb__after_clear_bit()   barrier() -  #include <asm-generic/bitops/hweight.h>  #include <asm-generic/bitops/fls64.h>  #include <asm-generic/bitops/sched.h> diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index e4abdaac6f9..b3c750979aa 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -17,13 +17,7 @@  #endif  #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT) - -/* For a rare case where customers have differently config I/D */ -#define ARC_ICACHE_LINE_LEN	L1_CACHE_BYTES -#define ARC_DCACHE_LINE_LEN	L1_CACHE_BYTES - -#define ICACHE_LINE_MASK	(~(ARC_ICACHE_LINE_LEN - 1)) -#define DCACHE_LINE_MASK	(~(ARC_DCACHE_LINE_LEN - 1)) +#define CACHE_LINE_MASK		(~(L1_CACHE_BYTES - 1))  /*   * ARC700 doesn't cache any access in top 256M. @@ -61,4 +55,31 @@ extern void read_decode_cache_bcr(void);  #endif	/* !__ASSEMBLY__ */ +/* Instruction cache related Auxiliary registers */ +#define ARC_REG_IC_BCR		0x77	/* Build Config reg */ +#define ARC_REG_IC_IVIC		0x10 +#define ARC_REG_IC_CTRL		0x11 +#define ARC_REG_IC_IVIL		0x19 +#if defined(CONFIG_ARC_MMU_V3) +#define ARC_REG_IC_PTAG		0x1E +#endif + +/* Bit val in IC_CTRL */ +#define IC_CTRL_CACHE_DISABLE   0x1 + +/* Data cache related Auxiliary registers */ +#define ARC_REG_DC_BCR		0x72	/* Build Config reg */ +#define ARC_REG_DC_IVDC		0x47 +#define ARC_REG_DC_CTRL		0x48 +#define ARC_REG_DC_IVDL		0x4A +#define ARC_REG_DC_FLSH		0x4B +#define ARC_REG_DC_FLDL		0x4C +#if defined(CONFIG_ARC_MMU_V3) +#define ARC_REG_DC_PTAG		0x5C +#endif + +/* Bit val in DC_CTRL */ +#define DC_CTRL_INV_MODE_FLUSH  0x40 +#define DC_CTRL_FLUSH_STATUS    0x100 +  #endif /* _ASM_CACHE_H */ diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h index c0a72105ee0..fb4efb64897 100644 --- a/arch/arc/include/asm/irq.h +++ b/arch/arc/include/asm/irq.h @@ -18,9 +18,7 @@  #include <asm-generic/irq.h> -extern void __init arc_init_IRQ(void); -extern int __init get_hw_config_num_irq(void); - -void arc_local_timer_setup(unsigned int cpu); +extern void arc_init_IRQ(void); +void arc_local_timer_setup(void);  #endif diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index b68b53f458d..cb7efc29f16 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -151,16 +151,38 @@ static inline void arch_unmask_irq(unsigned int irq)  #else +#ifdef CONFIG_TRACE_IRQFLAGS + +.macro TRACE_ASM_IRQ_DISABLE +	bl	trace_hardirqs_off +.endm + +.macro TRACE_ASM_IRQ_ENABLE +	bl	trace_hardirqs_on +.endm + +#else + +.macro TRACE_ASM_IRQ_DISABLE +.endm + +.macro TRACE_ASM_IRQ_ENABLE +.endm + +#endif +  .macro IRQ_DISABLE  scratch  	lr	\scratch, [status32]  	bic	\scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)  	flag	\scratch +	TRACE_ASM_IRQ_DISABLE  .endm  .macro IRQ_ENABLE  scratch  	lr	\scratch, [status32]  	or	\scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)  	flag	\scratch +	TRACE_ASM_IRQ_ENABLE  .endm  #endif	/* __ASSEMBLY__ */ diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h index 0283e9e44e0..5faad17118b 100644 --- a/arch/arc/include/asm/linkage.h +++ b/arch/arc/include/asm/linkage.h @@ -11,19 +11,7 @@  #ifdef __ASSEMBLY__ -/* Can't use the ENTRY macro in linux/linkage.h - * gas considers ';' as comment vs. newline - */ -.macro ARC_ENTRY name -	.global \name -	.align 4 -	\name: -.endm - -.macro ARC_EXIT name -#define ASM_PREV_SYM_ADDR(name)  .-##name -	.size \ name, ASM_PREV_SYM_ADDR(\name) -.endm +#define ASM_NL		 `	/* use '`' to mark new line in macro */  /* annotation for data we want in DCCM - if enabled in .config */  .macro ARCFP_DATA nm diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index 9998dc846eb..e8993a2be6c 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h @@ -51,22 +51,12 @@ struct machine_desc {  /*   * Current machine - only accessible during boot.   */ -extern struct machine_desc *machine_desc; +extern const struct machine_desc *machine_desc;  /*   * Machine type table - also only accessible during boot   */ -extern struct machine_desc __arch_info_begin[], __arch_info_end[]; -#define for_each_machine_desc(p)			\ -	for (p = __arch_info_begin; p < __arch_info_end; p++) - -static inline struct machine_desc *default_machine_desc(void) -{ -	/* the default machine is the last one linked in */ -	if (__arch_info_end - 1 < __arch_info_begin) -		return NULL; -	return __arch_info_end - 1; -} +extern const struct machine_desc __arch_info_begin[], __arch_info_end[];  /*   * Set of macros to define architecture features. @@ -81,7 +71,6 @@ __attribute__((__section__(".arch.info.init"))) = {	\  #define MACHINE_END				\  }; -extern struct machine_desc *setup_machine_fdt(void *dt); -extern void __init copy_devtree(void); +extern const struct machine_desc *setup_machine_fdt(void *dt);  #endif diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h index c2663b32866..8c84ae98c33 100644 --- a/arch/arc/include/asm/mmu.h +++ b/arch/arc/include/asm/mmu.h @@ -48,7 +48,7 @@  #ifndef __ASSEMBLY__  typedef struct { -	unsigned long asid;	/* 8 bit MMU PID + Generation cycle */ +	unsigned long asid[NR_CPUS];	/* 8 bit MMU PID + Generation cycle */  } mm_context_t;  #ifdef CONFIG_ARC_DBG_TLB_PARANOIA diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index 43a1b51bb8c..1fd467ef658 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h @@ -30,13 +30,13 @@   * "Fast Context Switch" i.e. no TLB flush on ctxt-switch   *   * Linux assigns each task a unique ASID. A simple round-robin allocation - * of H/w ASID is done using software tracker @asid_cache. + * of H/w ASID is done using software tracker @asid_cpu.   * When it reaches max 255, the allocation cycle starts afresh by flushing   * the entire TLB and wrapping ASID back to zero.   *   * A new allocation cycle, post rollover, could potentially reassign an ASID   * to a different task. Thus the rule is to refresh the ASID in a new cycle. - * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits + * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits   * serve as cycle/generation indicator and natural 32 bit unsigned math   * automagically increments the generation when lower 8 bits rollover.   */ @@ -47,9 +47,11 @@  #define MM_CTXT_FIRST_CYCLE	(MM_CTXT_ASID_MASK + 1)  #define MM_CTXT_NO_ASID		0UL -#define hw_pid(mm)		(mm->context.asid & MM_CTXT_ASID_MASK) +#define asid_mm(mm, cpu)	mm->context.asid[cpu] +#define hw_pid(mm, cpu)		(asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) -extern unsigned int asid_cache; +DECLARE_PER_CPU(unsigned int, asid_cache); +#define asid_cpu(cpu)		per_cpu(asid_cache, cpu)  /*   * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) @@ -57,6 +59,7 @@ extern unsigned int asid_cache;   */  static inline void get_new_mmu_context(struct mm_struct *mm)  { +	const unsigned int cpu = smp_processor_id();  	unsigned long flags;  	local_irq_save(flags); @@ -71,28 +74,28 @@ static inline void get_new_mmu_context(struct mm_struct *mm)  	 * 	 first need to destroy the context, setting it to invalid  	 * 	 value.  	 */ -	if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK)) +	if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK))  		goto set_hw;  	/* move to new ASID and handle rollover */ -	if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) { +	if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { -		flush_tlb_all(); +		local_flush_tlb_all();  		/*  		 * Above checke for rollover of 8 bit ASID in 32 bit container.  		 * If the container itself wrapped around, set it to a non zero  		 * "generation" to distinguish from no context  		 */ -		if (!asid_cache) -			asid_cache = MM_CTXT_FIRST_CYCLE; +		if (!asid_cpu(cpu)) +			asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE;  	}  	/* Assign new ASID to tsk */ -	mm->context.asid = asid_cache; +	asid_mm(mm, cpu) = asid_cpu(cpu);  set_hw: -	write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE); +	write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);  	local_irq_restore(flags);  } @@ -104,16 +107,45 @@ set_hw:  static inline int  init_new_context(struct task_struct *tsk, struct mm_struct *mm)  { -	mm->context.asid = MM_CTXT_NO_ASID; +	int i; + +	for_each_possible_cpu(i) +		asid_mm(mm, i) = MM_CTXT_NO_ASID; +  	return 0;  } +static inline void destroy_context(struct mm_struct *mm) +{ +	unsigned long flags; + +	/* Needed to elide CONFIG_DEBUG_PREEMPT warning */ +	local_irq_save(flags); +	asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; +	local_irq_restore(flags); +} +  /* Prepare the MMU for task: setup PID reg with allocated ASID      If task doesn't have an ASID (never alloc or stolen, get a new ASID)  */  static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,  			     struct task_struct *tsk)  { +	const int cpu = smp_processor_id(); + +	/* +	 * Note that the mm_cpumask is "aggregating" only, we don't clear it +	 * for the switched-out task, unlike some other arches. +	 * It is used to enlist cpus for sending TLB flush IPIs and not sending +	 * it to CPUs where a task once ran-on, could cause stale TLB entry +	 * re-use, specially for a multi-threaded task. +	 * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps. +	 *      For a non-aggregating mm_cpumask, IPI not sent C1, and if T1 +	 *      were to re-migrate to C1, it could access the unmapped region +	 *      via any existing stale TLB entries. +	 */ +	cpumask_set_cpu(cpu, mm_cpumask(next)); +  #ifndef CONFIG_SMP  	/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */  	write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); @@ -131,11 +163,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,   */  #define activate_mm(prev, next)		switch_mm(prev, next, NULL) -static inline void destroy_context(struct mm_struct *mm) -{ -	mm->context.asid = MM_CTXT_NO_ASID; -} -  /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping   * for retiring-mm. However destroy_context( ) still needs to do that because   * between mm_release( ) = >deactive_mm( ) and diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 115ad96480e..cbf755e32a0 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -1,5 +1,7 @@  /* - * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com) + * Linux performance counter support for ARC + * + * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License version 2 as @@ -10,4 +12,204 @@  #ifndef __ASM_PERF_EVENT_H  #define __ASM_PERF_EVENT_H +/* real maximum varies per CPU, this is the maximum supported by the driver */ +#define ARC_PMU_MAX_HWEVENTS	64 + +#define ARC_REG_CC_BUILD	0xF6 +#define ARC_REG_CC_INDEX	0x240 +#define ARC_REG_CC_NAME0	0x241 +#define ARC_REG_CC_NAME1	0x242 + +#define ARC_REG_PCT_BUILD	0xF5 +#define ARC_REG_PCT_COUNTL	0x250 +#define ARC_REG_PCT_COUNTH	0x251 +#define ARC_REG_PCT_SNAPL	0x252 +#define ARC_REG_PCT_SNAPH	0x253 +#define ARC_REG_PCT_CONFIG	0x254 +#define ARC_REG_PCT_CONTROL	0x255 +#define ARC_REG_PCT_INDEX	0x256 + +#define ARC_REG_PCT_CONTROL_CC	(1 << 16)	/* clear counts */ +#define ARC_REG_PCT_CONTROL_SN	(1 << 17)	/* snapshot */ + +struct arc_reg_pct_build { +#ifdef CONFIG_CPU_BIG_ENDIAN +	unsigned int m:8, c:8, r:6, s:2, v:8; +#else +	unsigned int v:8, s:2, r:6, c:8, m:8; +#endif +}; + +struct arc_reg_cc_build { +#ifdef CONFIG_CPU_BIG_ENDIAN +	unsigned int c:16, r:8, v:8; +#else +	unsigned int v:8, r:8, c:16; +#endif +}; + +#define PERF_COUNT_ARC_DCLM	(PERF_COUNT_HW_MAX + 0) +#define PERF_COUNT_ARC_DCSM	(PERF_COUNT_HW_MAX + 1) +#define PERF_COUNT_ARC_ICM	(PERF_COUNT_HW_MAX + 2) +#define PERF_COUNT_ARC_BPOK	(PERF_COUNT_HW_MAX + 3) +#define PERF_COUNT_ARC_EDTLB	(PERF_COUNT_HW_MAX + 4) +#define PERF_COUNT_ARC_EITLB	(PERF_COUNT_HW_MAX + 5) +#define PERF_COUNT_ARC_HW_MAX	(PERF_COUNT_HW_MAX + 6) + +/* + * The "generalized" performance events seem to really be a copy + * of the available events on x86 processors; the mapping to ARC + * events is not always possible 1-to-1. Fortunately, there doesn't + * seem to be an exact definition for these events, so we can cheat + * a bit where necessary. + * + * In particular, the following PERF events may behave a bit differently + * compared to other architectures: + * + * PERF_COUNT_HW_CPU_CYCLES + *	Cycles not in halted state + * + * PERF_COUNT_HW_REF_CPU_CYCLES + *	Reference cycles not in halted state, same as PERF_COUNT_HW_CPU_CYCLES + *	for now as we don't do Dynamic Voltage/Frequency Scaling (yet) + * + * PERF_COUNT_HW_BUS_CYCLES + *	Unclear what this means, Intel uses 0x013c, which according to + *	their datasheet means "unhalted reference cycles". It sounds similar + *	to PERF_COUNT_HW_REF_CPU_CYCLES, and we use the same counter for it. + * + * PERF_COUNT_HW_STALLED_CYCLES_BACKEND + * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND + *	The ARC 700 can either measure stalls per pipeline stage, or all stalls + *	combined; for now we assign all stalls to STALLED_CYCLES_BACKEND + *	and all pipeline flushes (e.g. caused by mispredicts, etc.) to + *	STALLED_CYCLES_FRONTEND. + * + *	We could start multiple performance counters and combine everything + *	afterwards, but that makes it complicated. + * + *	Note that I$ cache misses aren't counted by either of the two! + */ + +static const char * const arc_pmu_ev_hw_map[] = { +	[PERF_COUNT_HW_CPU_CYCLES] = "crun", +	[PERF_COUNT_HW_REF_CPU_CYCLES] = "crun", +	[PERF_COUNT_HW_BUS_CYCLES] = "crun", +	[PERF_COUNT_HW_INSTRUCTIONS] = "iall", +	[PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", +	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", +	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush", +	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall", +	[PERF_COUNT_ARC_DCLM] = "dclm", +	[PERF_COUNT_ARC_DCSM] = "dcsm", +	[PERF_COUNT_ARC_ICM] = "icm", +	[PERF_COUNT_ARC_BPOK] = "bpok", +	[PERF_COUNT_ARC_EDTLB] = "edtlb", +	[PERF_COUNT_ARC_EITLB] = "eitlb", +}; + +#define C(_x)			PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED	0xffff + +static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { +	[C(L1D)] = { +		[C(OP_READ)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= PERF_COUNT_ARC_DCLM, +		}, +		[C(OP_WRITE)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= PERF_COUNT_ARC_DCSM, +		}, +		[C(OP_PREFETCH)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +	}, +	[C(L1I)] = { +		[C(OP_READ)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= PERF_COUNT_ARC_ICM, +		}, +		[C(OP_WRITE)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +		[C(OP_PREFETCH)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +	}, +	[C(LL)] = { +		[C(OP_READ)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +		[C(OP_WRITE)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +		[C(OP_PREFETCH)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +	}, +	[C(DTLB)] = { +		[C(OP_READ)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= PERF_COUNT_ARC_EDTLB, +		}, +		[C(OP_WRITE)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +		[C(OP_PREFETCH)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +	}, +	[C(ITLB)] = { +		[C(OP_READ)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= PERF_COUNT_ARC_EITLB, +		}, +		[C(OP_WRITE)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +		[C(OP_PREFETCH)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +	}, +	[C(BPU)] = { +		[C(OP_READ)] = { +			[C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS, +			[C(RESULT_MISS)]	= PERF_COUNT_HW_BRANCH_MISSES, +		}, +		[C(OP_WRITE)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +		[C(OP_PREFETCH)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +	}, +	[C(NODE)] = { +		[C(OP_READ)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +		[C(OP_WRITE)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +		[C(OP_PREFETCH)] = { +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED, +		}, +	}, +}; +  #endif /* __ASM_PERF_EVENT_H */ diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index 36a9f20c21a..81208bfd9dc 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h @@ -105,11 +105,16 @@ static inline pgtable_t  pte_alloc_one(struct mm_struct *mm, unsigned long address)  {  	pgtable_t pte_pg; +	struct page *page;  	pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); -	if (pte_pg) { -		memzero((void *)pte_pg, PTRS_PER_PTE * 4); -		pgtable_page_ctor(virt_to_page(pte_pg)); +	if (!pte_pg) +		return 0; +	memzero((void *)pte_pg, PTRS_PER_PTE * 4); +	page = virt_to_page(pte_pg); +	if (!pgtable_page_ctor(page)) { +		__free_page(page); +		return 0;  	}  	return pte_pg; diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 15334ab66b5..d99f9b37cd1 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -18,7 +18,6 @@  #ifndef __ASSEMBLY__ -#include <asm/arcregs.h>	/* for STATUS_E1_MASK et all */  #include <asm/ptrace.h>  /* Arch specific stuff which needs to be saved per task. @@ -41,15 +40,13 @@ struct thread_struct {  /* Forward declaration, a strange C thing */  struct task_struct; -/* - * Return saved PC of a blocked thread. - */ +/* Return saved PC of a blocked thread  */  unsigned long thread_saved_pc(struct task_struct *t);  #define task_pt_regs(p) \  	((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) -/* Free all resources held by a thread. */ +/* Free all resources held by a thread */  #define release_thread(thread) do { } while (0)  /* Prepare to copy thread state - unlazy all lazy status */ @@ -82,26 +79,8 @@ unsigned long thread_saved_pc(struct task_struct *t);  #define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)  #define KSTK_FP(tsk)    KSTK_REG(tsk, 0) -/* - * Do necessary setup to start up a newly executed thread. - * - * E1,E2 so that Interrupts are enabled in user mode - * L set, so Loop inhibited to begin with - * lp_start and lp_end seeded with bogus non-zero values so to easily catch - * the ARC700 sr to lp_start hardware bug - */ -#define start_thread(_regs, _pc, _usp)				\ -do {								\ -	set_fs(USER_DS); /* reads from user space */		\ -	(_regs)->ret = (_pc);					\ -	/* Interrupts enabled in User Mode */			\ -	(_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK	\ -		| STATUS_E1_MASK | STATUS_E2_MASK;		\ -	(_regs)->sp = (_usp);					\ -	/* bogus seed values for debugging */			\ -	(_regs)->lp_start = 0x10;				\ -	(_regs)->lp_end = 0x80;					\ -} while (0) +extern void start_thread(struct pt_regs * regs, unsigned long pc, +			 unsigned long usp);  extern unsigned int get_wchan(struct task_struct *p); diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h deleted file mode 100644 index 692d0d0789a..00000000000 --- a/arch/arc/include/asm/prom.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_ARC_PROM_H_ -#define _ASM_ARC_PROM_H_ - -#define HAVE_ARCH_DEVTREE_FIXUPS - -#endif diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h index 764f1e3ba75..09db952e14b 100644 --- a/arch/arc/include/asm/sections.h +++ b/arch/arc/include/asm/sections.h @@ -12,6 +12,5 @@  #include <asm-generic/sections.h>  extern char __arc_dccm_base[]; -extern char __dtb_start[];  #endif diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h index 229e5068149..e10f8cef56a 100644 --- a/arch/arc/include/asm/setup.h +++ b/arch/arc/include/asm/setup.h @@ -31,7 +31,7 @@ struct cpuinfo_data {  extern int root_mountflags, end_mem;  extern int running_on_hw; -void __init setup_processor(void); +void setup_processor(void);  void __init setup_arch_memory(void);  #endif /* __ASMARC_SETUP_H */ diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index c4fb211dcd2..5d06eee43ea 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h @@ -30,7 +30,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);   * APIs provided by arch SMP code to rest of arch code   */  extern void __init smp_init_cpus(void); -extern void __init first_lines_of_secondary(void); +extern void first_lines_of_secondary(void);  extern const char *arc_platform_smp_cpuinfo(void);  /* @@ -46,14 +46,14 @@ extern int smp_ipi_irq_setup(int cpu, int irq);   *   * @info:		SoC SMP specific info for /proc/cpuinfo etc   * @cpu_kick:		For Master to kickstart a cpu (optionally at a PC) - * @ipi_send:		To send IPI to a @cpumask - * @ips_clear:		To clear IPI received by @cpu at @irq + * @ipi_send:		To send IPI to a @cpu + * @ips_clear:		To clear IPI received at @irq   */  struct plat_smp_ops {  	const char 	*info;  	void		(*cpu_kick)(int cpu, unsigned long pc); -	void		(*ipi_send)(void *callmap); -	void		(*ipi_clear)(int cpu, int irq); +	void		(*ipi_send)(int cpu); +	void		(*ipi_clear)(int irq);  };  /* TBD: stop exporting it for direct population by platform */ diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index f158197ac5b..b6a8c2dfbe6 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)  static inline void arch_spin_unlock(arch_spinlock_t *lock)  { -	lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; +	unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; + +	__asm__ __volatile__( +	"	ex  %0, [%1]		\n" +	: "+r" (tmp) +	: "r"(&(lock->slock)) +	: "memory"); +  	smp_mb();  } diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 2d50a4cdd7f..45be2167201 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -80,8 +80,6 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)  #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE      0x10000000 -  /*   * thread information flags   * - these are process state flags that various assembly files may need to diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h index b2f9bc7f68c..71c7b2e4b87 100644 --- a/arch/arc/include/asm/tlbflush.h +++ b/arch/arc/include/asm/tlbflush.h @@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);  void local_flush_tlb_range(struct vm_area_struct *vma,  			   unsigned long start, unsigned long end); -/* XXX: Revisit for SMP */ +#ifndef CONFIG_SMP  #define flush_tlb_range(vma, s, e)	local_flush_tlb_range(vma, s, e)  #define flush_tlb_page(vma, page)	local_flush_tlb_page(vma, page)  #define flush_tlb_kernel_range(s, e)	local_flush_tlb_kernel_range(s, e)  #define flush_tlb_all()			local_flush_tlb_all()  #define flush_tlb_mm(mm)		local_flush_tlb_mm(mm) - +#else +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, +							 unsigned long end); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +#endif /* CONFIG_SMP */  #endif diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 32420824375..30c9baffa96 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -43,7 +43,7 @@   * Because it essentially checks if buffer end is within limit and @len is   * non-ngeative, which implies that buffer start will be within limit too.   * - * The reason for rewriting being, for majorit yof cases, @len is generally + * The reason for rewriting being, for majority of cases, @len is generally   * compile time constant, causing first sub-expression to be compile time   * subsumed.   * @@ -53,7 +53,7 @@   *   */  #define __user_ok(addr, sz)	(((sz) <= TASK_SIZE) && \ -				 (((addr)+(sz)) <= get_fs())) +				 ((addr) <= (get_fs() - (sz))))  #define __access_ok(addr, sz)	(unlikely(__kernel_ok) || \  				 likely(__user_ok((addr), (sz)))) diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h index 60702f3751d..3e5f071bc00 100644 --- a/arch/arc/include/asm/unaligned.h +++ b/arch/arc/include/asm/unaligned.h @@ -22,7 +22,8 @@ static inline int  misaligned_fixup(unsigned long address, struct pt_regs *regs,  		 struct callee_regs *cregs)  { -	return 0; +	/* Not fixed */ +	return 1;  }  #endif diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild index 18fefaea73f..f50d02df78d 100644 --- a/arch/arc/include/uapi/asm/Kbuild +++ b/arch/arc/include/uapi/asm/Kbuild @@ -2,11 +2,4 @@  include include/uapi/asm-generic/Kbuild.asm  header-y += elf.h  header-y += page.h -header-y += setup.h -header-y += byteorder.h  header-y += cachectl.h -header-y += ptrace.h -header-y += sigcontext.h -header-y += signal.h -header-y += swab.h -header-y += unistd.h diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 2618cc13ba7..76a7739aab1 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h @@ -11,6 +11,7 @@  #ifndef _UAPI__ASM_ARC_PTRACE_H  #define _UAPI__ASM_ARC_PTRACE_H +#define PTRACE_GET_THREAD_AREA	25  #ifndef __ASSEMBLY__  /* diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 6f30484f34b..39e58d1cdf9 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h @@ -8,6 +8,13 @@  /******** no-legacy-syscalls-ABI *******/ +/* + * Non-typical guard macro to enable inclusion twice in ARCH sys.c + * That is how the Generic syscall wrapper generator works + */ +#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) +#define _UAPI_ASM_ARC_UNISTD_H +  #define __ARCH_WANT_SYS_EXECVE  #define __ARCH_WANT_SYS_CLONE  #define __ARCH_WANT_SYS_VFORK @@ -32,3 +39,7 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)  /* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */  #define __NR_sysfs		(__NR_arch_specific_syscall + 3)  __SYSCALL(__NR_sysfs, sys_sysfs) + +#undef __SYSCALL + +#endif  | 
