diff options
Diffstat (limited to 'arch/arm/include')
104 files changed, 1825 insertions, 757 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index d3db39860b9..f5a35760198 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -7,23 +7,28 @@ generic-y += current.h  generic-y += emergency-restart.h  generic-y += errno.h  generic-y += exec.h +generic-y += hash.h  generic-y += ioctl.h  generic-y += ipcbuf.h  generic-y += irq_regs.h  generic-y += kdebug.h  generic-y += local.h  generic-y += local64.h +generic-y += mcs_spinlock.h  generic-y += msgbuf.h  generic-y += param.h  generic-y += parport.h  generic-y += poll.h +generic-y += preempt.h  generic-y += resource.h +generic-y += rwsem.h  generic-y += sections.h  generic-y += segment.h  generic-y += sembuf.h  generic-y += serial.h  generic-y += shmbuf.h  generic-y += siginfo.h +generic-y += simd.h  generic-y += sizes.h  generic-y += socket.h  generic-y += sockios.h @@ -31,5 +36,4 @@ generic-y += termbits.h  generic-y += termios.h  generic-y += timex.h  generic-y += trace_clock.h -generic-y += types.h  generic-y += unaligned.h diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index 5665134bfa3..0704e0cf557 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h @@ -87,17 +87,43 @@ static inline u64 arch_counter_get_cntvct(void)  	return cval;  } -static inline void arch_counter_set_user_access(void) +static inline u32 arch_timer_get_cntkctl(void)  {  	u32 cntkctl; -  	asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl)); +	return cntkctl; +} -	/* disable user access to everything */ -	cntkctl &= ~((3 << 8) | (7 << 0)); - +static inline void arch_timer_set_cntkctl(u32 cntkctl) +{  	asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));  } + +static inline void arch_counter_set_user_access(void) +{ +	u32 cntkctl = arch_timer_get_cntkctl(); + +	/* Disable user access to both physical/virtual counters/timers */ +	/* Also disable virtual event stream */ +	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN +			| ARCH_TIMER_USR_VT_ACCESS_EN +			| ARCH_TIMER_VIRT_EVT_EN +			| ARCH_TIMER_USR_VCT_ACCESS_EN +			| ARCH_TIMER_USR_PCT_ACCESS_EN); +	arch_timer_set_cntkctl(cntkctl); +} + +static inline void arch_timer_evtstrm_enable(int divider) +{ +	u32 cntkctl = arch_timer_get_cntkctl(); +	cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; +	/* Set the divider and enable virtual event stream */ +	cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) +			| ARCH_TIMER_VIRT_EVT_EN; +	arch_timer_set_cntkctl(cntkctl); +	elf_hwcap |= HWCAP_EVTSTRM; +} +  #endif  #endif diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index fcc1b5bf697..57f0584e8d9 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -23,6 +23,7 @@  #include <asm/ptrace.h>  #include <asm/domain.h>  #include <asm/opcodes-virt.h> +#include <asm/asm-offsets.h>  #define IOMEM(x)	(x) @@ -30,8 +31,8 @@   * Endian independent macros for shifting bytes within registers.   */  #ifndef __ARMEB__ -#define pull            lsr -#define push            lsl +#define lspull          lsr +#define lspush          lsl  #define get_byte_0      lsl #0  #define get_byte_1	lsr #8  #define get_byte_2	lsr #16 @@ -41,8 +42,8 @@  #define put_byte_2	lsl #16  #define put_byte_3	lsl #24  #else -#define pull            lsl -#define push            lsr +#define lspull          lsl +#define lspush          lsr  #define get_byte_0	lsr #24  #define get_byte_1	lsr #16  #define get_byte_2	lsr #8 @@ -53,6 +54,13 @@  #define put_byte_3      lsl #0  #endif +/* Select code for any configuration running in BE8 mode */ +#ifdef CONFIG_CPU_ENDIAN_BE8 +#define ARM_BE8(code...) code +#else +#define ARM_BE8(code...) +#endif +  /*   * Data preload for architectures that support it   */ @@ -167,6 +175,47 @@  	restore_irqs_notrace \oldcpsr  	.endm +/* + * Get current thread_info. + */ +	.macro	get_thread_info, rd + ARM(	mov	\rd, sp, lsr #13	) + THUMB(	mov	\rd, sp			) + THUMB(	lsr	\rd, \rd, #13		) +	mov	\rd, \rd, lsl #13 +	.endm + +/* + * Increment/decrement the preempt count. + */ +#ifdef CONFIG_PREEMPT_COUNT +	.macro	inc_preempt_count, ti, tmp +	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count +	add	\tmp, \tmp, #1			@ increment it +	str	\tmp, [\ti, #TI_PREEMPT] +	.endm + +	.macro	dec_preempt_count, ti, tmp +	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count +	sub	\tmp, \tmp, #1			@ decrement it +	str	\tmp, [\ti, #TI_PREEMPT] +	.endm + +	.macro	dec_preempt_count_ti, ti, tmp +	get_thread_info \ti +	dec_preempt_count \ti, \tmp +	.endm +#else +	.macro	inc_preempt_count, ti, tmp +	.endm + +	.macro	dec_preempt_count, ti, tmp +	.endm + +	.macro	dec_preempt_count_ti, ti, tmp +	.endm +#endif +  #define USER(x...)				\  9999:	x;					\  	.pushsection __ex_table,"a";		\ @@ -263,7 +312,7 @@   * you cannot return to the original mode.   */  .macro safe_svcmode_maskall reg:req -#if __LINUX_ARM_ARCH__ >= 6 +#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)  	mrs	\reg , cpsr  	eor	\reg, \reg, #HYP_MODE  	tst	\reg, #MODE_MASK diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index da1c77d3932..3040359094d 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -12,6 +12,7 @@  #define __ASM_ARM_ATOMIC_H  #include <linux/compiler.h> +#include <linux/prefetch.h>  #include <linux/types.h>  #include <linux/irqflags.h>  #include <asm/barrier.h> @@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)  	unsigned long tmp;  	int result; +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic_add\n"  "1:	ldrex	%0, [%3]\n"  "	add	%0, %0, %4\n" @@ -58,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)  	int result;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic_add_return\n"  "1:	ldrex	%0, [%3]\n" @@ -79,6 +82,7 @@ static inline void atomic_sub(int i, atomic_t *v)  	unsigned long tmp;  	int result; +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic_sub\n"  "1:	ldrex	%0, [%3]\n"  "	sub	%0, %0, %4\n" @@ -96,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)  	int result;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic_sub_return\n"  "1:	ldrex	%0, [%3]\n" @@ -114,9 +119,11 @@ static inline int atomic_sub_return(int i, atomic_t *v)  static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)  { -	unsigned long oldval, res; +	int oldval; +	unsigned long res;  	smp_mb(); +	prefetchw(&ptr->counter);  	do {  		__asm__ __volatile__("@ atomic_cmpxchg\n" @@ -134,19 +141,31 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)  	return oldval;  } -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +static inline int __atomic_add_unless(atomic_t *v, int a, int u)  { -	unsigned long tmp, tmp2; +	int oldval, newval; +	unsigned long tmp; -	__asm__ __volatile__("@ atomic_clear_mask\n" -"1:	ldrex	%0, [%3]\n" -"	bic	%0, %0, %4\n" -"	strex	%1, %0, [%3]\n" -"	teq	%1, #0\n" -"	bne	1b" -	: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) -	: "r" (addr), "Ir" (mask) +	smp_mb(); +	prefetchw(&v->counter); + +	__asm__ __volatile__ ("@ atomic_add_unless\n" +"1:	ldrex	%0, [%4]\n" +"	teq	%0, %5\n" +"	beq	2f\n" +"	add	%1, %0, %6\n" +"	strex	%2, %1, [%4]\n" +"	teq	%2, #0\n" +"	bne	1b\n" +"2:" +	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) +	: "r" (&v->counter), "r" (u), "r" (a)  	: "cc"); + +	if (oldval != u) +		smp_mb(); + +	return oldval;  }  #else /* ARM_ARCH_6 */ @@ -197,19 +216,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)  	return ret;  } -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) -{ -	unsigned long flags; - -	raw_local_irq_save(flags); -	*addr &= ~mask; -	raw_local_irq_restore(flags); -} - -#endif /* __LINUX_ARM_ARCH__ */ - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -  static inline int __atomic_add_unless(atomic_t *v, int a, int u)  {  	int c, old; @@ -220,6 +226,10 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)  	return c;  } +#endif /* __LINUX_ARM_ARCH__ */ + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +  #define atomic_inc(v)		atomic_add(1, v)  #define atomic_dec(v)		atomic_sub(1, v) @@ -231,22 +241,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)  #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) -#define smp_mb__before_atomic_dec()	smp_mb() -#define smp_mb__after_atomic_dec()	smp_mb() -#define smp_mb__before_atomic_inc()	smp_mb() -#define smp_mb__after_atomic_inc()	smp_mb() -  #ifndef CONFIG_GENERIC_ATOMIC64  typedef struct { -	u64 __aligned(8) counter; +	long long counter;  } atomic64_t;  #define ATOMIC64_INIT(i) { (i) }  #ifdef CONFIG_ARM_LPAE -static inline u64 atomic64_read(const atomic64_t *v) +static inline long long atomic64_read(const atomic64_t *v)  { -	u64 result; +	long long result;  	__asm__ __volatile__("@ atomic64_read\n"  "	ldrd	%0, %H0, [%1]" @@ -257,7 +262,7 @@ static inline u64 atomic64_read(const atomic64_t *v)  	return result;  } -static inline void atomic64_set(atomic64_t *v, u64 i) +static inline void atomic64_set(atomic64_t *v, long long i)  {  	__asm__ __volatile__("@ atomic64_set\n"  "	strd	%2, %H2, [%1]" @@ -266,9 +271,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)  	);  }  #else -static inline u64 atomic64_read(const atomic64_t *v) +static inline long long atomic64_read(const atomic64_t *v)  { -	u64 result; +	long long result;  	__asm__ __volatile__("@ atomic64_read\n"  "	ldrexd	%0, %H0, [%1]" @@ -279,10 +284,11 @@ static inline u64 atomic64_read(const atomic64_t *v)  	return result;  } -static inline void atomic64_set(atomic64_t *v, u64 i) +static inline void atomic64_set(atomic64_t *v, long long i)  { -	u64 tmp; +	long long tmp; +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_set\n"  "1:	ldrexd	%0, %H0, [%2]\n"  "	strexd	%0, %3, %H3, [%2]\n" @@ -294,15 +300,16 @@ static inline void atomic64_set(atomic64_t *v, u64 i)  }  #endif -static inline void atomic64_add(u64 i, atomic64_t *v) +static inline void atomic64_add(long long i, atomic64_t *v)  { -	u64 result; +	long long result;  	unsigned long tmp; +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_add\n"  "1:	ldrexd	%0, %H0, [%3]\n" -"	adds	%0, %0, %4\n" -"	adc	%H0, %H0, %H4\n" +"	adds	%Q0, %Q0, %Q4\n" +"	adc	%R0, %R0, %R4\n"  "	strexd	%1, %0, %H0, [%3]\n"  "	teq	%1, #0\n"  "	bne	1b" @@ -311,17 +318,18 @@ static inline void atomic64_add(u64 i, atomic64_t *v)  	: "cc");  } -static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +static inline long long atomic64_add_return(long long i, atomic64_t *v)  { -	u64 result; +	long long result;  	unsigned long tmp;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_add_return\n"  "1:	ldrexd	%0, %H0, [%3]\n" -"	adds	%0, %0, %4\n" -"	adc	%H0, %H0, %H4\n" +"	adds	%Q0, %Q0, %Q4\n" +"	adc	%R0, %R0, %R4\n"  "	strexd	%1, %0, %H0, [%3]\n"  "	teq	%1, #0\n"  "	bne	1b" @@ -334,15 +342,16 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)  	return result;  } -static inline void atomic64_sub(u64 i, atomic64_t *v) +static inline void atomic64_sub(long long i, atomic64_t *v)  { -	u64 result; +	long long result;  	unsigned long tmp; +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_sub\n"  "1:	ldrexd	%0, %H0, [%3]\n" -"	subs	%0, %0, %4\n" -"	sbc	%H0, %H0, %H4\n" +"	subs	%Q0, %Q0, %Q4\n" +"	sbc	%R0, %R0, %R4\n"  "	strexd	%1, %0, %H0, [%3]\n"  "	teq	%1, #0\n"  "	bne	1b" @@ -351,17 +360,18 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)  	: "cc");  } -static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) +static inline long long atomic64_sub_return(long long i, atomic64_t *v)  { -	u64 result; +	long long result;  	unsigned long tmp;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_sub_return\n"  "1:	ldrexd	%0, %H0, [%3]\n" -"	subs	%0, %0, %4\n" -"	sbc	%H0, %H0, %H4\n" +"	subs	%Q0, %Q0, %Q4\n" +"	sbc	%R0, %R0, %R4\n"  "	strexd	%1, %0, %H0, [%3]\n"  "	teq	%1, #0\n"  "	bne	1b" @@ -374,12 +384,14 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)  	return result;  } -static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) +static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, +					long long new)  { -	u64 oldval; +	long long oldval;  	unsigned long res;  	smp_mb(); +	prefetchw(&ptr->counter);  	do {  		__asm__ __volatile__("@ atomic64_cmpxchg\n" @@ -398,12 +410,13 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)  	return oldval;  } -static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) +static inline long long atomic64_xchg(atomic64_t *ptr, long long new)  { -	u64 result; +	long long result;  	unsigned long tmp;  	smp_mb(); +	prefetchw(&ptr->counter);  	__asm__ __volatile__("@ atomic64_xchg\n"  "1:	ldrexd	%0, %H0, [%3]\n" @@ -419,18 +432,19 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)  	return result;  } -static inline u64 atomic64_dec_if_positive(atomic64_t *v) +static inline long long atomic64_dec_if_positive(atomic64_t *v)  { -	u64 result; +	long long result;  	unsigned long tmp;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_dec_if_positive\n"  "1:	ldrexd	%0, %H0, [%3]\n" -"	subs	%0, %0, #1\n" -"	sbc	%H0, %H0, #0\n" -"	teq	%H0, #0\n" +"	subs	%Q0, %Q0, #1\n" +"	sbc	%R0, %R0, #0\n" +"	teq	%R0, #0\n"  "	bmi	2f\n"  "	strexd	%1, %0, %H0, [%3]\n"  "	teq	%1, #0\n" @@ -445,13 +459,14 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)  	return result;  } -static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)  { -	u64 val; +	long long val;  	unsigned long tmp;  	int ret = 1;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_add_unless\n"  "1:	ldrexd	%0, %H0, [%4]\n" @@ -459,8 +474,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)  "	teqeq	%H0, %H5\n"  "	moveq	%1, #0\n"  "	beq	2f\n" -"	adds	%0, %0, %6\n" -"	adc	%H0, %H0, %H6\n" +"	adds	%Q0, %Q0, %Q6\n" +"	adc	%R0, %R0, %R6\n"  "	strexd	%2, %0, %H0, [%4]\n"  "	teq	%2, #0\n"  "	bne	1b\n" diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h new file mode 100644 index 00000000000..1714800fa11 --- /dev/null +++ b/arch/arm/include/asm/bL_switcher.h @@ -0,0 +1,77 @@ +/* + * arch/arm/include/asm/bL_switcher.h + * + * Created by:  Nicolas Pitre, April 2012 + * Copyright:   (C) 2012-2013  Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef ASM_BL_SWITCHER_H +#define ASM_BL_SWITCHER_H + +#include <linux/compiler.h> +#include <linux/types.h> + +typedef void (*bL_switch_completion_handler)(void *cookie); + +int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, +			 bL_switch_completion_handler completer, +			 void *completer_cookie); +static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id) +{ +	return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL); +} + +/* + * Register here to be notified about runtime enabling/disabling of + * the switcher. + * + * The notifier chain is called with the switcher activation lock held: + * the switcher will not be enabled or disabled during callbacks. + * Callbacks must not call bL_switcher_{get,put}_enabled(). + */ +#define BL_NOTIFY_PRE_ENABLE	0 +#define BL_NOTIFY_POST_ENABLE	1 +#define BL_NOTIFY_PRE_DISABLE	2 +#define BL_NOTIFY_POST_DISABLE	3 + +#ifdef CONFIG_BL_SWITCHER + +int bL_switcher_register_notifier(struct notifier_block *nb); +int bL_switcher_unregister_notifier(struct notifier_block *nb); + +/* + * Use these functions to temporarily prevent enabling/disabling of + * the switcher. + * bL_switcher_get_enabled() returns true if the switcher is currently + * enabled.  Each call to bL_switcher_get_enabled() must be followed + * by a call to bL_switcher_put_enabled().  These functions are not + * recursive. + */ +bool bL_switcher_get_enabled(void); +void bL_switcher_put_enabled(void); + +int bL_switcher_trace_trigger(void); +int bL_switcher_get_logical_index(u32 mpidr); + +#else +static inline int bL_switcher_register_notifier(struct notifier_block *nb) +{ +	return 0; +} + +static inline int bL_switcher_unregister_notifier(struct notifier_block *nb) +{ +	return 0; +} + +static inline bool bL_switcher_get_enabled(void) { return false; } +static inline void bL_switcher_put_enabled(void) { } +static inline int bL_switcher_trace_trigger(void) { return 0; } +static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; } +#endif /* CONFIG_BL_SWITCHER */ + +#endif diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 60f15e274e6..c6a3e73a6e2 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -59,10 +59,28 @@  #define smp_wmb()	dmb(ishst)  #endif +#define smp_store_release(p, v)						\ +do {									\ +	compiletime_assert_atomic_type(*p);				\ +	smp_mb();							\ +	ACCESS_ONCE(*p) = (v);						\ +} while (0) + +#define smp_load_acquire(p)						\ +({									\ +	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\ +	compiletime_assert_atomic_type(*p);				\ +	smp_mb();							\ +	___p1;								\ +}) +  #define read_barrier_depends()		do { } while(0)  #define smp_read_barrier_depends()	do { } while(0)  #define set_mb(var, value)	do { var = value; smp_mb(); } while (0) +#define smp_mb__before_atomic()	smp_mb() +#define smp_mb__after_atomic()	smp_mb() +  #endif /* !__ASSEMBLY__ */  #endif /* __ASM_BARRIER_H */ diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index e691ec91e4d..56380995f4c 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -25,9 +25,7 @@  #include <linux/compiler.h>  #include <linux/irqflags.h> - -#define smp_mb__before_clear_bit()	smp_mb() -#define smp_mb__after_clear_bit()	smp_mb() +#include <asm/barrier.h>  /*   * These functions are the basis of our bit ops. @@ -254,25 +252,59 @@ static inline int constant_fls(int x)  }  /* - * On ARMv5 and above those functions can be implemented around - * the clz instruction for much better code efficiency. + * On ARMv5 and above those functions can be implemented around the + * clz instruction for much better code efficiency.  __clz returns + * the number of leading zeros, zero input will return 32, and + * 0x80000000 will return 0.   */ +static inline unsigned int __clz(unsigned int x) +{ +	unsigned int ret; + +	asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); + +	return ret; +} +/* + * fls() returns zero if the input is zero, otherwise returns the bit + * position of the last set bit, where the LSB is 1 and MSB is 32. + */  static inline int fls(int x)  { -	int ret; -  	if (__builtin_constant_p(x))  	       return constant_fls(x); -	asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); -       	ret = 32 - ret; -	return ret; +	return 32 - __clz(x); +} + +/* + * __fls() returns the bit position of the last bit set, where the + * LSB is 0 and MSB is 31.  Zero input is undefined. + */ +static inline unsigned long __fls(unsigned long x) +{ +	return fls(x) - 1; +} + +/* + * ffs() returns zero if the input was zero, otherwise returns the bit + * position of the first set bit, where the LSB is 1 and MSB is 32. + */ +static inline int ffs(int x) +{ +	return fls(x & -x); +} + +/* + * __ffs() returns the bit position of the first bit set, where the + * LSB is 0 and MSB is 31.  Zero input is undefined. + */ +static inline unsigned long __ffs(unsigned long x) +{ +	return ffs(x) - 1;  } -#define __fls(x) (fls(x) - 1) -#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) -#define __ffs(x) (ffs(x) - 1)  #define ffz(x) __ffs( ~(x) )  #endif diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 7af5c6c3653..b274bde2490 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -2,6 +2,8 @@  #define _ASMARM_BUG_H  #include <linux/linkage.h> +#include <linux/types.h> +#include <asm/opcodes.h>  #ifdef CONFIG_BUG @@ -12,10 +14,10 @@   */  #ifdef CONFIG_THUMB2_KERNEL  #define BUG_INSTR_VALUE 0xde02 -#define BUG_INSTR_TYPE ".hword " +#define BUG_INSTR(__value) __inst_thumb16(__value)  #else  #define BUG_INSTR_VALUE 0xe7f001f2 -#define BUG_INSTR_TYPE ".word " +#define BUG_INSTR(__value) __inst_arm(__value)  #endif @@ -33,7 +35,7 @@  #define __BUG(__file, __line, __value)				\  do {								\ -	asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n"	\ +	asm volatile("1:\t" BUG_INSTR(__value) "\n"  \  		".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \  		"2:\t.asciz " #__file "\n" 			\  		".popsection\n" 				\ @@ -48,7 +50,7 @@ do {								\  #define __BUG(__file, __line, __value)				\  do {								\ -	asm volatile(BUG_INSTR_TYPE #__value);			\ +	asm volatile(BUG_INSTR(__value) "\n");			\  	unreachable();						\  } while (0)  #endif  /* CONFIG_DEBUG_BUGVERBOSE */ diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 15f2d5bf887..fd43f7f55b7 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,  static inline void __flush_icache_all(void)  {  	__flush_icache_preferred(); +	dsb(ishst);  }  /* @@ -435,4 +436,57 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)  #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))  #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) +/* + * Disabling cache access for one CPU in an ARMv7 SMP system is tricky. + * To do so we must: + * + * - Clear the SCTLR.C bit to prevent further cache allocations + * - Flush the desired level of cache + * - Clear the ACTLR "SMP" bit to disable local coherency + * + * ... and so without any intervening memory access in between those steps, + * not even to the stack. + * + * WARNING -- After this has been called: + * + * - No ldrex/strex (and similar) instructions must be used. + * - The CPU is obviously no longer coherent with the other CPUs. + * - This is unlikely to work as expected if Linux is running non-secure. + * + * Note: + * + * - This is known to apply to several ARMv7 processor implementations, + *   however some exceptions may exist.  Caveat emptor. + * + * - The clobber list is dictated by the call to v7_flush_dcache_*. + *   fp is preserved to the stack explicitly prior disabling the cache + *   since adding it to the clobber list is incompatible with having + *   CONFIG_FRAME_POINTER=y.  ip is saved as well if ever r12-clobbering + *   trampoline are inserted by the linker and to keep sp 64-bit aligned. + */ +#define v7_exit_coherency_flush(level) \ +	asm volatile( \ +	"stmfd	sp!, {fp, ip} \n\t" \ +	"mrc	p15, 0, r0, c1, c0, 0	@ get SCTLR \n\t" \ +	"bic	r0, r0, #"__stringify(CR_C)" \n\t" \ +	"mcr	p15, 0, r0, c1, c0, 0	@ set SCTLR \n\t" \ +	"isb	\n\t" \ +	"bl	v7_flush_dcache_"__stringify(level)" \n\t" \ +	"clrex	\n\t" \ +	"mrc	p15, 0, r0, c1, c0, 1	@ get ACTLR \n\t" \ +	"bic	r0, r0, #(1 << 6)	@ disable local coherency \n\t" \ +	"mcr	p15, 0, r0, c1, c0, 1	@ set ACTLR \n\t" \ +	"isb	\n\t" \ +	"dsb	\n\t" \ +	"ldmfd	sp!, {fp, ip}" \ +	: : : "r0","r1","r2","r3","r4","r5","r6","r7", \ +	      "r9","r10","lr","memory" ) + +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); + +void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, +			     void *kaddr, unsigned long len);  #endif diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h index 6dcc1643086..52331511547 100644 --- a/arch/arm/include/asm/checksum.h +++ b/arch/arm/include/asm/checksum.h @@ -87,19 +87,33 @@ static inline __wsum  csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,  		   unsigned short proto, __wsum sum)  { -	__asm__( -	"adds	%0, %1, %2		@ csum_tcpudp_nofold	\n\ -	adcs	%0, %0, %3					\n" +	u32 lenprot = len | proto << 16; +	if (__builtin_constant_p(sum) && sum == 0) { +		__asm__( +		"adds	%0, %1, %2	@ csum_tcpudp_nofold0	\n\t"  #ifdef __ARMEB__ -	"adcs	%0, %0, %4					\n" +		"adcs	%0, %0, %3				\n\t"  #else -	"adcs	%0, %0, %4, lsl #8				\n" +		"adcs	%0, %0, %3, ror #8			\n\t"  #endif -	"adcs	%0, %0, %5					\n\ -	adc	%0, %0, #0" -	: "=&r"(sum) -	: "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto)) -	: "cc"); +		"adc	%0, %0, #0" +		: "=&r" (sum) +		: "r" (daddr), "r" (saddr), "r" (lenprot) +		: "cc"); +	} else { +		__asm__( +		"adds	%0, %1, %2	@ csum_tcpudp_nofold	\n\t" +		"adcs	%0, %0, %3				\n\t" +#ifdef __ARMEB__ +		"adcs	%0, %0, %4				\n\t" +#else +		"adcs	%0, %0, %4, ror #8			\n\t" +#endif +		"adc	%0, %0, #0" +		: "=&r"(sum) +		: "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot) +		: "cc"); +	}  	return sum;  }	  /* diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index 80751c15c30..4e8a4b27d7c 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -14,12 +14,14 @@  #include <linux/slab.h> +#ifndef CONFIG_COMMON_CLK  #ifdef CONFIG_HAVE_MACH_CLKDEV  #include <mach/clkdev.h>  #else  #define __clk_get(clk)	({ 1; })  #define __clk_put(clk)	do { } while (0)  #endif +#endif  static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)  { diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 4f009c10540..abb2c3769b0 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -2,6 +2,7 @@  #define __ASM_ARM_CMPXCHG_H  #include <linux/irqflags.h> +#include <linux/prefetch.h>  #include <asm/barrier.h>  #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) @@ -35,6 +36,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size  #endif  	smp_mb(); +	prefetchw((const void *)ptr);  	switch (size) {  #if __LINUX_ARM_ARCH__ >= 6 @@ -138,6 +140,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,  {  	unsigned long oldval, res; +	prefetchw((const void *)ptr); +  	switch (size) {  #ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */  	case 1: @@ -223,6 +227,44 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,  	return ret;  } +static inline unsigned long long __cmpxchg64(unsigned long long *ptr, +					     unsigned long long old, +					     unsigned long long new) +{ +	unsigned long long oldval; +	unsigned long res; + +	prefetchw(ptr); + +	__asm__ __volatile__( +"1:	ldrexd		%1, %H1, [%3]\n" +"	teq		%1, %4\n" +"	teqeq		%H1, %H4\n" +"	bne		2f\n" +"	strexd		%0, %5, %H5, [%3]\n" +"	teq		%0, #0\n" +"	bne		1b\n" +"2:" +	: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) +	: "r" (ptr), "r" (old), "r" (new) +	: "cc"); + +	return oldval; +} + +static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr, +						unsigned long long old, +						unsigned long long new) +{ +	unsigned long long ret; + +	smp_mb(); +	ret = __cmpxchg64(ptr, old, new); +	smp_mb(); + +	return ret; +} +  #define cmpxchg_local(ptr,o,n)						\  	((__typeof__(*(ptr)))__cmpxchg_local((ptr),			\  				       (unsigned long)(o),		\ @@ -230,18 +272,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,  				       sizeof(*(ptr))))  #define cmpxchg64(ptr, o, n)						\ -	((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr),	\ -						atomic64_t,		\ -						counter),		\ -					      (unsigned long long)(o),	\ -					      (unsigned long long)(n))) - -#define cmpxchg64_local(ptr, o, n)					\ -	((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr),	\ -						local64_t,		\ -						a),			\ -					     (unsigned long long)(o),	\ -					     (unsigned long long)(n))) +	((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\ +					(unsigned long long)(o),	\ +					(unsigned long long)(n))) + +#define cmpxchg64_relaxed(ptr, o, n)					\ +	((__typeof__(*(ptr)))__cmpxchg64((ptr),				\ +					(unsigned long long)(o),	\ +					(unsigned long long)(n))) + +#define cmpxchg64_local(ptr, o, n)	cmpxchg64_relaxed((ptr), (o), (n))  #endif	/* __LINUX_ARM_ARCH__ >= 6 */ diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 6493802f880..c3f11524f10 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -42,24 +42,23 @@  #ifndef __ASSEMBLY__  #if __LINUX_ARM_ARCH__ >= 4 -#define vectors_high()	(cr_alignment & CR_V) +#define vectors_high()	(get_cr() & CR_V)  #else  #define vectors_high()	(0)  #endif  #ifdef CONFIG_CPU_CP15 -extern unsigned long cr_no_alignment;	/* defined in entry-armv.S */  extern unsigned long cr_alignment;	/* defined in entry-armv.S */ -static inline unsigned int get_cr(void) +static inline unsigned long get_cr(void)  { -	unsigned int val; +	unsigned long val;  	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc");  	return val;  } -static inline void set_cr(unsigned int val) +static inline void set_cr(unsigned long val)  {  	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR"  	  : : "r" (val) : "cc"); @@ -80,10 +79,6 @@ static inline void set_auxcr(unsigned int val)  	isb();  } -#ifndef CONFIG_SMP -extern void adjust_cr(unsigned long mask, unsigned long set); -#endif -  #define CPACC_FULL(n)		(3 << (n * 2))  #define CPACC_SVC(n)		(1 << (n * 2))  #define CPACC_DISABLE(n)	(0 << (n * 2)) @@ -106,13 +101,17 @@ static inline void set_copro_access(unsigned int val)  #else /* ifdef CONFIG_CPU_CP15 */  /* - * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the - * minds of the developers). Yielding 0 for machines without a cp15 (and making - * it read-only) is fine for most cases and saves quite some #ifdeffery. + * cr_alignment is tightly coupled to cp15 (at least in the minds of the + * developers). Yielding 0 for machines without a cp15 (and making it + * read-only) is fine for most cases and saves quite some #ifdeffery.   */ -#define cr_no_alignment	UL(0)  #define cr_alignment	UL(0) +static inline unsigned long get_cr(void) +{ +	return 0; +} +  #endif /* ifdef CONFIG_CPU_CP15 / else */  #endif /* ifndef __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 9672e978d50..8c2b7321a47 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -10,6 +10,7 @@  #define CPUID_TLBTYPE	3  #define CPUID_MPUIR	4  #define CPUID_MPIDR	5 +#define CPUID_REVIDR	6  #ifdef CONFIG_CPU_V7M  #define CPUID_EXT_PFR0	0x40 @@ -70,6 +71,8 @@  #define ARM_CPU_PART_CORTEX_A5		0xC050  #define ARM_CPU_PART_CORTEX_A15		0xC0F0  #define ARM_CPU_PART_CORTEX_A7		0xC070 +#define ARM_CPU_PART_CORTEX_A12		0xC0D0 +#define ARM_CPU_PART_CORTEX_A17		0xC0E0  #define ARM_CPU_XSCALE_ARCH_MASK	0xe000  #define ARM_CPU_XSCALE_ARCH_V1		0x2000 @@ -219,4 +222,23 @@ static inline int cpu_is_xsc3(void)  #define	cpu_is_xscale()	1  #endif +/* + * Marvell's PJ4 and PJ4B cores are based on V7 version, + * but require a specical sequence for enabling coprocessors. + * For this reason, we need a way to distinguish them. + */ +#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) +static inline int cpu_is_pj4(void) +{ +	unsigned int id; + +	id = read_cpuid_id(); +	if ((id & 0xff0fff00) == 0x560f5800) +		return 1; + +	return 0; +} +#else +#define cpu_is_pj4()	0 +#endif  #endif diff --git a/arch/arm/include/asm/dcc.h b/arch/arm/include/asm/dcc.h new file mode 100644 index 00000000000..b74899de077 --- /dev/null +++ b/arch/arm/include/asm/dcc.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#include <asm/barrier.h> + +static inline u32 __dcc_getstatus(void) +{ +	u32 __ret; +	asm volatile("mrc p14, 0, %0, c0, c1, 0	@ read comms ctrl reg" +		: "=r" (__ret) : : "cc"); + +	return __ret; +} + +static inline char __dcc_getchar(void) +{ +	char __c; + +	asm volatile("mrc p14, 0, %0, c0, c5, 0	@ read comms data reg" +		: "=r" (__c)); +	isb(); + +	return __c; +} + +static inline void __dcc_putchar(char c) +{ +	asm volatile("mcr p14, 0, %0, c0, c5, 0	@ write a char" +		: /* no output register */ +		: "r" (c)); +	isb(); +} diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index 191ada6e4d2..662c7bd0610 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -156,7 +156,7 @@  		/* Select the best insn combination to perform the   */	\  		/* actual __m * __n / (__p << 64) operation.         */	\  		if (!__c) {						\ -			asm (	"umull	%Q0, %R0, %1, %Q2\n\t"		\ +			asm (	"umull	%Q0, %R0, %Q1, %Q2\n\t"		\  				"mov	%Q0, #0"			\  				: "=&r" (__res)				\  				: "r" (__m), "r" (__n)			\ diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index a8c56acc8c9..8e3fcb924db 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -13,9 +13,11 @@ struct dma_iommu_mapping {  	/* iommu specific data */  	struct iommu_domain	*domain; -	void			*bitmap; -	size_t			bits; -	unsigned int		order; +	unsigned long		**bitmaps;	/* array of bitmaps */ +	unsigned int		nr_bitmaps;	/* nr of elements in array */ +	unsigned int		extensions; +	size_t			bitmap_size;	/* size of a single bitmap */ +	size_t			bits;		/* per bitmap */  	dma_addr_t		base;  	spinlock_t		lock; @@ -23,8 +25,7 @@ struct dma_iommu_mapping {  };  struct dma_iommu_mapping * -arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, -			 int order); +arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);  void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 5b579b95150..c45b61a4b4a 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -11,17 +11,28 @@  #include <asm-generic/dma-coherent.h>  #include <asm/memory.h> +#include <xen/xen.h> +#include <asm/xen/hypervisor.h> +  #define DMA_ERROR_CODE	(~0)  extern struct dma_map_ops arm_dma_ops;  extern struct dma_map_ops arm_coherent_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)  {  	if (dev && dev->archdata.dma_ops)  		return dev->archdata.dma_ops;  	return &arm_dma_ops;  } +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ +	if (xen_initial_domain()) +		return xen_dma_ops; +	else +		return __generic_dma_ops(dev); +} +  static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)  {  	BUG_ON(!dev); @@ -47,23 +58,40 @@ static inline int dma_set_mask(struct device *dev, u64 mask)  #ifndef __arch_pfn_to_dma  static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)  { +	if (dev) +		pfn -= dev->dma_pfn_offset;  	return (dma_addr_t)__pfn_to_bus(pfn);  }  static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)  { -	return __bus_to_pfn(addr); +	unsigned long pfn = __bus_to_pfn(addr); + +	if (dev) +		pfn += dev->dma_pfn_offset; + +	return pfn;  }  static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)  { +	if (dev) { +		unsigned long pfn = dma_to_pfn(dev, addr); + +		return phys_to_virt(__pfn_to_phys(pfn)); +	} +  	return (void *)__bus_to_virt((unsigned long)addr);  }  static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)  { +	if (dev) +		return pfn_to_dma(dev, virt_to_pfn(addr)); +  	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));  } +  #else  static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)  { @@ -86,6 +114,53 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)  }  #endif +/* The ARM override for dma_max_pfn() */ +static inline unsigned long dma_max_pfn(struct device *dev) +{ +	return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask); +} +#define dma_max_pfn(dev) dma_max_pfn(dev) + +static inline int set_arch_dma_coherent_ops(struct device *dev) +{ +	set_dma_ops(dev, &arm_coherent_dma_ops); +	return 0; +} +#define set_arch_dma_coherent_ops(dev)	set_arch_dma_coherent_ops(dev) + +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ +	unsigned int offset = paddr & ~PAGE_MASK; +	return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) +{ +	unsigned int offset = dev_addr & ~PAGE_MASK; +	return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; +} + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ +	u64 limit, mask; + +	if (!dev->dma_mask) +		return 0; + +	mask = *dev->dma_mask; + +	limit = (mask + 1) & ~mask; +	if (limit && size > limit) +		return 0; + +	if ((addr | (addr + size - 1)) & ~mask) +		return 0; + +	return 1; +} + +static inline void dma_mark_clean(void *addr, size_t size) { } +  /*   * DMA errors are defined by all-bits-set in the DMA address.   */ diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 58b8c6a0ab1..99084431d6a 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -8,8 +8,8 @@  #define MAX_DMA_ADDRESS	0xffffffffUL  #else  #define MAX_DMA_ADDRESS	({ \ -	extern unsigned long arm_dma_zone_size; \ -	arm_dma_zone_size ? \ +	extern phys_addr_t arm_dma_zone_size; \ +	arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \  		(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })  #endif diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h index 15631300c23..2c9f10df756 100644 --- a/arch/arm/include/asm/firmware.h +++ b/arch/arm/include/asm/firmware.h @@ -22,6 +22,10 @@   */  struct firmware_ops {  	/* +	 * Inform the firmware we intend to enter CPU idle mode +	 */ +	int (*prepare_idle)(void); +	/*  	 * Enters CPU idle mode  	 */  	int (*do_idle)(void); diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index bbae919bceb..74124b0d0d7 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -1,24 +1,11 @@  #ifndef _ASM_FIXMAP_H  #define _ASM_FIXMAP_H -/* - * Nothing too fancy for now. - * - * On ARM we already have well known fixed virtual addresses imposed by - * the architecture such as the vector page which is located at 0xffff0000, - * therefore a second level page table is already allocated covering - * 0xfff00000 upwards. - * - * The cache flushing code in proc-xscale.S uses the virtual area between - * 0xfffe0000 and 0xfffeffff. - */ - -#define FIXADDR_START		0xfff00000UL -#define FIXADDR_TOP		0xfffe0000UL +#define FIXADDR_START		0xffc00000UL +#define FIXADDR_TOP		0xffe00000UL  #define FIXADDR_SIZE		(FIXADDR_TOP - FIXADDR_START) -#define FIX_KMAP_BEGIN		0 -#define FIX_KMAP_END		(FIXADDR_SIZE >> PAGE_SHIFT) +#define FIX_KMAP_NR_PTES	(FIXADDR_SIZE >> PAGE_SHIFT)  #define __fix_to_virt(x)	(FIXADDR_START + ((x) << PAGE_SHIFT))  #define __virt_to_fix(x)	(((x) - FIXADDR_START) >> PAGE_SHIFT) @@ -27,7 +14,7 @@ extern void __this_fixmap_does_not_exist(void);  static inline unsigned long fix_to_virt(const unsigned int idx)  { -	if (idx >= FIX_KMAP_END) +	if (idx >= FIX_KMAP_NR_PTES)  		__this_fixmap_does_not_exist();  	return __fix_to_virt(idx);  } diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h index c9f03eccc9d..f4882553fbb 100644 --- a/arch/arm/include/asm/floppy.h +++ b/arch/arm/include/asm/floppy.h @@ -25,7 +25,7 @@  #define fd_inb(port)		inb((port))  #define fd_request_irq()	request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ -					    IRQF_DISABLED,"floppy",NULL) +					    0,"floppy",NULL)  #define fd_free_irq()		free_irq(IRQ_FLOPPYDISK,NULL)  #define fd_disable_irq()	disable_irq(IRQ_FLOPPYDISK)  #define fd_enable_irq()		enable_irq(IRQ_FLOPPYDISK) diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index f89515adac6..39eb16b0066 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -52,15 +52,7 @@ extern inline void *return_address(unsigned int level)  #endif -#define HAVE_ARCH_CALLER_ADDR - -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 ((unsigned long)return_address(1)) -#define CALLER_ADDR2 ((unsigned long)return_address(2)) -#define CALLER_ADDR3 ((unsigned long)return_address(3)) -#define CALLER_ADDR4 ((unsigned long)return_address(4)) -#define CALLER_ADDR5 ((unsigned long)return_address(5)) -#define CALLER_ADDR6 ((unsigned long)return_address(6)) +#define ftrace_return_address(n) return_address(n)  #endif /* ifndef __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index e42cf597f6e..53e69dae796 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -3,11 +3,6 @@  #ifdef __KERNEL__ -#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP) -/* ARM doesn't provide unprivileged exclusive memory accessors */ -#include <asm-generic/futex.h> -#else -  #include <linux/futex.h>  #include <linux/uaccess.h>  #include <asm/errno.h> @@ -28,6 +23,7 @@  #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\  	smp_mb();						\ +	prefetchw(uaddr);					\  	__asm__ __volatile__(					\  	"1:	ldrex	%1, [%3]\n"				\  	"	" insn "\n"					\ @@ -51,6 +47,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,  		return -EFAULT;  	smp_mb(); +	/* Prefetching cannot fault */ +	prefetchw(uaddr);  	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"  	"1:	ldrex	%1, [%4]\n"  	"	teq	%1, %2\n" @@ -164,6 +162,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)  	return ret;  } -#endif /* !(CPU_USE_DOMAINS && SMP) */  #endif /* __KERNEL__ */  #endif /* _ASM_ARM_FUTEX_H */ diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index c81adc08b3f..a3c24cd5b7c 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h @@ -130,22 +130,22 @@  #endif  #ifndef __ASSEMBLER__ -extern inline void nop_flush_icache_all(void) { } -extern inline void nop_flush_kern_cache_all(void) { } -extern inline void nop_flush_kern_cache_louis(void) { } -extern inline void nop_flush_user_cache_all(void) { } -extern inline void nop_flush_user_cache_range(unsigned long a, +static inline void nop_flush_icache_all(void) { } +static inline void nop_flush_kern_cache_all(void) { } +static inline void nop_flush_kern_cache_louis(void) { } +static inline void nop_flush_user_cache_all(void) { } +static inline void nop_flush_user_cache_range(unsigned long a,  		unsigned long b, unsigned int c) { } -extern inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { } -extern inline int nop_coherent_user_range(unsigned long a, +static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { } +static inline int nop_coherent_user_range(unsigned long a,  		unsigned long b) { return 0; } -extern inline void nop_flush_kern_dcache_area(void *a, size_t s) { } +static inline void nop_flush_kern_dcache_area(void *a, size_t s) { } -extern inline void nop_dma_flush_range(const void *a, const void *b) { } +static inline void nop_dma_flush_range(const void *a, const void *b) { } -extern inline void nop_dma_map_area(const void *s, size_t l, int f) { } -extern inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } +static inline void nop_dma_map_area(const void *s, size_t l, int f) { } +static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }  #endif  #ifndef MULTI_CACHE diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index 6b70f1b46a6..04e18b65665 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h @@ -31,14 +31,6 @@  #undef CPU_DABORT_HANDLER  #undef MULTI_DABORT -#if defined(CONFIG_CPU_ARM710) -# ifdef CPU_DABORT_HANDLER -#  define MULTI_DABORT 1 -# else -#  define CPU_DABORT_HANDLER cpu_arm7_data_abort -# endif -#endif -  #ifdef CONFIG_CPU_ABRT_EV4  # ifdef CPU_DABORT_HANDLER  #  define MULTI_DABORT 1 diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 2740c2a2df6..fe3ea776dc3 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -5,7 +5,7 @@  #include <linux/threads.h>  #include <asm/irq.h> -#define NR_IPI	6 +#define NR_IPI	8  typedef struct {  	unsigned int __softirq_pending; diff --git a/arch/arm/include/asm/hardware/cache-feroceon-l2.h b/arch/arm/include/asm/hardware/cache-feroceon-l2.h new file mode 100644 index 00000000000..12e1588dc4f --- /dev/null +++ b/arch/arm/include/asm/hardware/cache-feroceon-l2.h @@ -0,0 +1,13 @@ +/* + * arch/arm/include/asm/hardware/cache-feroceon-l2.h + * + * Copyright (C) 2008 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +extern void __init feroceon_l2_init(int l2_wt_override); +extern int __init feroceon_of_init(void); + diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 3b2c40b5bfa..3a5ec1c2565 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -26,8 +26,8 @@  #define L2X0_CACHE_TYPE			0x004  #define L2X0_CTRL			0x100  #define L2X0_AUX_CTRL			0x104 -#define L2X0_TAG_LATENCY_CTRL		0x108 -#define L2X0_DATA_LATENCY_CTRL		0x10C +#define L310_TAG_LATENCY_CTRL		0x108 +#define L310_DATA_LATENCY_CTRL		0x10C  #define L2X0_EVENT_CNT_CTRL		0x200  #define L2X0_EVENT_CNT1_CFG		0x204  #define L2X0_EVENT_CNT0_CFG		0x208 @@ -54,53 +54,93 @@  #define L2X0_LOCKDOWN_WAY_D_BASE	0x900  #define L2X0_LOCKDOWN_WAY_I_BASE	0x904  #define L2X0_LOCKDOWN_STRIDE		0x08 -#define L2X0_ADDR_FILTER_START		0xC00 -#define L2X0_ADDR_FILTER_END		0xC04 +#define L310_ADDR_FILTER_START		0xC00 +#define L310_ADDR_FILTER_END		0xC04  #define L2X0_TEST_OPERATION		0xF00  #define L2X0_LINE_DATA			0xF10  #define L2X0_LINE_TAG			0xF30  #define L2X0_DEBUG_CTRL			0xF40 -#define L2X0_PREFETCH_CTRL		0xF60 -#define L2X0_POWER_CTRL			0xF80 -#define   L2X0_DYNAMIC_CLK_GATING_EN	(1 << 1) -#define   L2X0_STNDBY_MODE_EN		(1 << 0) +#define L310_PREFETCH_CTRL		0xF60 +#define L310_POWER_CTRL			0xF80 +#define   L310_DYNAMIC_CLK_GATING_EN	(1 << 1) +#define   L310_STNDBY_MODE_EN		(1 << 0)  /* Registers shifts and masks */  #define L2X0_CACHE_ID_PART_MASK		(0xf << 6)  #define L2X0_CACHE_ID_PART_L210		(1 << 6) +#define L2X0_CACHE_ID_PART_L220		(2 << 6)  #define L2X0_CACHE_ID_PART_L310		(3 << 6)  #define L2X0_CACHE_ID_RTL_MASK          0x3f -#define L2X0_CACHE_ID_RTL_R0P0          0x0 -#define L2X0_CACHE_ID_RTL_R1P0          0x2 -#define L2X0_CACHE_ID_RTL_R2P0          0x4 -#define L2X0_CACHE_ID_RTL_R3P0          0x5 -#define L2X0_CACHE_ID_RTL_R3P1          0x6 -#define L2X0_CACHE_ID_RTL_R3P2          0x8 +#define L210_CACHE_ID_RTL_R0P2_02	0x00 +#define L210_CACHE_ID_RTL_R0P1		0x01 +#define L210_CACHE_ID_RTL_R0P2_01	0x02 +#define L210_CACHE_ID_RTL_R0P3		0x03 +#define L210_CACHE_ID_RTL_R0P4		0x0b +#define L210_CACHE_ID_RTL_R0P5		0x0f +#define L220_CACHE_ID_RTL_R1P7_01REL0	0x06 +#define L310_CACHE_ID_RTL_R0P0		0x00 +#define L310_CACHE_ID_RTL_R1P0		0x02 +#define L310_CACHE_ID_RTL_R2P0		0x04 +#define L310_CACHE_ID_RTL_R3P0		0x05 +#define L310_CACHE_ID_RTL_R3P1		0x06 +#define L310_CACHE_ID_RTL_R3P1_50REL0	0x07 +#define L310_CACHE_ID_RTL_R3P2		0x08 +#define L310_CACHE_ID_RTL_R3P3		0x09 -#define L2X0_AUX_CTRL_MASK			0xc0000fff +/* L2C auxiliary control register - bits common to L2C-210/220/310 */ +#define L2C_AUX_CTRL_WAY_SIZE_SHIFT		17 +#define L2C_AUX_CTRL_WAY_SIZE_MASK		(7 << 17) +#define L2C_AUX_CTRL_WAY_SIZE(n)		((n) << 17) +#define L2C_AUX_CTRL_EVTMON_ENABLE		BIT(20) +#define L2C_AUX_CTRL_PARITY_ENABLE		BIT(21) +#define L2C_AUX_CTRL_SHARED_OVERRIDE		BIT(22) +/* L2C-210/220 common bits */  #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT	0 -#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK	0x7 +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK	(7 << 0)  #define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT	3 -#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK	(0x7 << 3) +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK	(7 << 3)  #define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT		6 -#define L2X0_AUX_CTRL_TAG_LATENCY_MASK		(0x7 << 6) +#define L2X0_AUX_CTRL_TAG_LATENCY_MASK		(7 << 6)  #define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT	9 -#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK	(0x7 << 9) -#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT	16 -#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT		17 -#define L2X0_AUX_CTRL_WAY_SIZE_MASK		(0x7 << 17) -#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT	22 -#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT		26 -#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT		27 -#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT	28 -#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT	29 -#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT		30 +#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK	(7 << 9) +#define L2X0_AUX_CTRL_ASSOC_SHIFT		13 +#define L2X0_AUX_CTRL_ASSOC_MASK		(15 << 13) +/* L2C-210 specific bits */ +#define L210_AUX_CTRL_WRAP_DISABLE		BIT(12) +#define L210_AUX_CTRL_WA_OVERRIDE		BIT(23) +#define L210_AUX_CTRL_EXCLUSIVE_ABORT		BIT(24) +/* L2C-220 specific bits */ +#define L220_AUX_CTRL_EXCLUSIVE_CACHE		BIT(12) +#define L220_AUX_CTRL_FWA_SHIFT			23 +#define L220_AUX_CTRL_FWA_MASK			(3 << 23) +#define L220_AUX_CTRL_NS_LOCKDOWN		BIT(26) +#define L220_AUX_CTRL_NS_INT_CTRL		BIT(27) +/* L2C-310 specific bits */ +#define L310_AUX_CTRL_FULL_LINE_ZERO		BIT(0)	/* R2P0+ */ +#define L310_AUX_CTRL_HIGHPRIO_SO_DEV		BIT(10)	/* R2P0+ */ +#define L310_AUX_CTRL_STORE_LIMITATION		BIT(11)	/* R2P0+ */ +#define L310_AUX_CTRL_EXCLUSIVE_CACHE		BIT(12) +#define L310_AUX_CTRL_ASSOCIATIVITY_16		BIT(16) +#define L310_AUX_CTRL_CACHE_REPLACE_RR		BIT(25)	/* R2P0+ */ +#define L310_AUX_CTRL_NS_LOCKDOWN		BIT(26) +#define L310_AUX_CTRL_NS_INT_CTRL		BIT(27) +#define L310_AUX_CTRL_DATA_PREFETCH		BIT(28) +#define L310_AUX_CTRL_INSTR_PREFETCH		BIT(29) +#define L310_AUX_CTRL_EARLY_BRESP		BIT(30)	/* R2P0+ */ -#define L2X0_LATENCY_CTRL_SETUP_SHIFT	0 -#define L2X0_LATENCY_CTRL_RD_SHIFT	4 -#define L2X0_LATENCY_CTRL_WR_SHIFT	8 +#define L310_LATENCY_CTRL_SETUP(n)		((n) << 0) +#define L310_LATENCY_CTRL_RD(n)			((n) << 4) +#define L310_LATENCY_CTRL_WR(n)			((n) << 8) -#define L2X0_ADDR_FILTER_EN		1 +#define L310_ADDR_FILTER_EN		1 + +#define L310_PREFETCH_CTRL_OFFSET_MASK		0x1f +#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR	BIT(23) +#define L310_PREFETCH_CTRL_PREFETCH_DROP	BIT(24) +#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP	BIT(27) +#define L310_PREFETCH_CTRL_DATA_PREFETCH	BIT(28) +#define L310_PREFETCH_CTRL_INSTR_PREFETCH	BIT(29) +#define L310_PREFETCH_CTRL_DBL_LINEFILL		BIT(30)  #define L2X0_CTRL_EN			1 @@ -131,6 +171,7 @@ struct l2x0_regs {  	unsigned long prefetch_ctrl;  	unsigned long pwr_ctrl;  	unsigned long ctrl; +	unsigned long aux2_ctrl;  };  extern struct l2x0_regs l2x0_saved_regs; diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h index 0cf7a6b842f..ad774f37c47 100644 --- a/arch/arm/include/asm/hardware/coresight.h +++ b/arch/arm/include/asm/hardware/coresight.h @@ -24,8 +24,8 @@  #define TRACER_TIMEOUT 10000  #define etm_writel(t, v, x) \ -	(__raw_writel((v), (t)->etm_regs + (x))) -#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x))) +	(writel_relaxed((v), (t)->etm_regs + (x))) +#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))  /* CoreSight Management Registers */  #define CSMR_LOCKACCESS 0xfb0 @@ -142,8 +142,8 @@  #define ETBFF_TRIGFL		BIT(10)  #define etb_writel(t, v, x) \ -	(__raw_writel((v), (t)->etb_regs + (x))) -#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x))) +	(writel_relaxed((v), (t)->etb_regs + (x))) +#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))  #define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)  #define etm_unlock(t) \ diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 9b28f1243bd..240b29ef17d 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,  	return slot_cnt;  } -static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) -{ -	return 0; -} - -static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, -					struct iop_adma_chan *chan) -{ -	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - -	switch (chan->device->id) { -	case DMA0_ID: -	case DMA1_ID: -		return hw_desc.dma->dest_addr; -	case AAU_ID: -		return hw_desc.aau->dest_addr; -	default: -		BUG(); -	} -	return 0; -} - - -static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, -					  struct iop_adma_chan *chan) -{ -	BUG(); -	return 0; -} -  static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,  					struct iop_adma_chan *chan)  { diff --git a/arch/arm/include/asm/hardware/iop3xx-gpio.h b/arch/arm/include/asm/hardware/iop3xx-gpio.h deleted file mode 100644 index 9eda7dc92ad..00000000000 --- a/arch/arm/include/asm/hardware/iop3xx-gpio.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * arch/arm/include/asm/hardware/iop3xx-gpio.h - * - * IOP3xx GPIO wrappers - * - * Copyright (c) 2008 Arnaud Patard <arnaud.patard@rtp-net.org> - * Based on IXP4XX gpio.h file - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef __ASM_ARM_HARDWARE_IOP3XX_GPIO_H -#define __ASM_ARM_HARDWARE_IOP3XX_GPIO_H - -#include <mach/hardware.h> -#include <asm-generic/gpio.h> - -#define __ARM_GPIOLIB_COMPLEX - -#define IOP3XX_N_GPIOS	8 - -static inline int gpio_get_value(unsigned gpio) -{ -	if (gpio > IOP3XX_N_GPIOS) -		return __gpio_get_value(gpio); - -	return gpio_line_get(gpio); -} - -static inline void gpio_set_value(unsigned gpio, int value) -{ -	if (gpio > IOP3XX_N_GPIOS) { -		__gpio_set_value(gpio, value); -		return; -	} -	gpio_line_set(gpio, value); -} - -static inline int gpio_cansleep(unsigned gpio) -{ -	if (gpio < IOP3XX_N_GPIOS) -		return 0; -	else -		return __gpio_cansleep(gpio); -} - -/* - * The GPIOs are not generating any interrupt - * Note : manuals are not clear about this - */ -static inline int gpio_to_irq(int gpio) -{ -	return -EINVAL; -} - -static inline int irq_to_gpio(int gpio) -{ -	return -EINVAL; -} - -#endif - diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 423744bf18e..2594a95ff19 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -18,16 +18,9 @@  /*   * IOP3XX GPIO handling   */ -#define GPIO_IN			0 -#define GPIO_OUT		1 -#define GPIO_LOW		0 -#define GPIO_HIGH		1  #define IOP3XX_GPIO_LINE(x)	(x)  #ifndef __ASSEMBLY__ -extern void gpio_line_config(int line, int direction); -extern int  gpio_line_get(int line); -extern void gpio_line_set(int line, int value);  extern int init_atu;  extern int iop3xx_get_init_atu(void);  #endif @@ -168,11 +161,6 @@ extern int iop3xx_get_init_atu(void);  /* PERCR0 DOESN'T EXIST - index from 1! */  #define IOP3XX_PERCR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0710) -/* General Purpose I/O  */ -#define IOP3XX_GPOE		(volatile u32 *)IOP3XX_GPIO_REG(0x0000) -#define IOP3XX_GPID		(volatile u32 *)IOP3XX_GPIO_REG(0x0004) -#define IOP3XX_GPOD		(volatile u32 *)IOP3XX_GPIO_REG(0x0008) -  /* Timers  */  #define IOP3XX_TU_TMR0		(volatile u32 *)IOP3XX_TIMER_REG(0x0000)  #define IOP3XX_TU_TMR1		(volatile u32 *)IOP3XX_TIMER_REG(0x0004) diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 122f86d8c99..250760e0810 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h @@ -82,8 +82,6 @@ struct iop_adma_chan {   * @slot_cnt: total slots used in an transaction (group of operations)   * @slots_per_op: number of slots per operation   * @idx: pool index - * @unmap_src_cnt: number of xor sources - * @unmap_len: transaction bytecount   * @tx_list: list of descriptors that are associated with one operation   * @async_tx: support for the async_tx api   * @group_list: list of slots that make up a multi-descriptor transaction @@ -99,8 +97,6 @@ struct iop_adma_desc_slot {  	u16 slot_cnt;  	u16 slots_per_op;  	u16 idx; -	u16 unmap_src_cnt; -	size_t unmap_len;  	struct list_head tx_list;  	struct dma_async_tx_descriptor async_tx;  	union { diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 91b99abe7a9..535579511ed 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -18,6 +18,7 @@  	} while (0)  extern pte_t *pkmap_page_table; +extern pte_t *fixmap_page_table;  extern void *kmap_high(struct page *page);  extern void kunmap_high(struct page *page); diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index eef55ea9ef0..8e427c7b442 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -51,6 +51,7 @@ static inline void decode_ctrl_reg(u32 reg,  #define ARM_DEBUG_ARCH_V7_ECP14	3  #define ARM_DEBUG_ARCH_V7_MM	4  #define ARM_DEBUG_ARCH_V7_1	5 +#define ARM_DEBUG_ARCH_V8	6  /* Breakpoint */  #define ARM_BREAKPOINT_EXECUTE	0 diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index 6ff56eca3f1..6e183fd269f 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -9,6 +9,7 @@   * instruction set this cpu supports.   */  #define ELF_HWCAP	(elf_hwcap) -extern unsigned int elf_hwcap; +#define ELF_HWCAP2	(elf_hwcap2) +extern unsigned int elf_hwcap, elf_hwcap2;  #endif  #endif diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d070741b2b3..3d23418cbdd 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -24,9 +24,11 @@  #ifdef __KERNEL__  #include <linux/types.h> +#include <linux/blk_types.h>  #include <asm/byteorder.h>  #include <asm/memory.h>  #include <asm-generic/pci_iomap.h> +#include <xen/xen.h>  /*   * ISA I/O bus memory addresses are 1:1 with the physical address. @@ -36,6 +38,12 @@  #define isa_bus_to_virt phys_to_virt  /* + * Atomic MMIO-wide IO modify + */ +extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set); +extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set); + +/*   * Generic IO read/write.  These perform native-endian accesses.  Note   * that some architectures will want to re-define __raw_{read,write}w.   */ @@ -171,6 +179,12 @@ static inline void __iomem *__typesafe_io(unsigned long addr)  /* PCI fixed i/o mapping */  #define PCI_IO_VIRT_BASE	0xfee00000 +#if defined(CONFIG_PCI) +void pci_ioremap_set_mem_type(int mem_type); +#else +static inline void pci_ioremap_set_mem_type(int mem_type) {} +#endif +  extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);  /* @@ -327,7 +341,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);   */  #define ioremap(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE)  #define ioremap_nocache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_cached(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_CACHED) +#define ioremap_cache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_CACHED)  #define ioremap_wc(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE_WC)  #define iounmap				__arm_iounmap @@ -372,6 +386,13 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);  #define BIOVEC_MERGEABLE(vec1, vec2)	\  	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) +struct bio_vec; +extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, +				      const struct bio_vec *vec2); +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\ +	(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&				\ +	 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) +  #ifdef CONFIG_MMU  #define ARCH_HAS_VALID_PHYS_ADDR_RANGE  extern int valid_phys_addr_range(phys_addr_t addr, size_t size); diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h index bfc198c7591..70f9b9bfb1f 100644 --- a/arch/arm/include/asm/jump_label.h +++ b/arch/arm/include/asm/jump_label.h @@ -4,7 +4,6 @@  #ifdef __KERNEL__  #include <linux/types.h> -#include <asm/system.h>  #define JUMP_LABEL_NOP_SIZE 4 @@ -16,7 +15,7 @@  static __always_inline bool arch_static_branch(struct static_key *key)  { -	asm goto("1:\n\t" +	asm_volatile_goto("1:\n\t"  		 JUMP_LABEL_NOP "\n\t"  		 ".pushsection __jump_table,  \"aw\"\n\t"  		 ".word 1b, %l[l_yes], %c0\n\t" diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h index 48066ce9ea3..0a9d5dd9329 100644 --- a/arch/arm/include/asm/kgdb.h +++ b/arch/arm/include/asm/kgdb.h @@ -11,6 +11,7 @@  #define __ARM_KGDB_H__  #include <linux/ptrace.h> +#include <asm/opcodes.h>  /*   * GDB assumes that we're a user process being debugged, so @@ -41,7 +42,7 @@  static inline void arch_kgdb_breakpoint(void)  { -	asm(".word 0xe7ffdeff"); +	asm(__inst_arm(0xe7ffdeff));  }  extern void kgdb_handle_bus_error(void); diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index f82ec22eeb1..49fa0dfaad3 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -18,7 +18,7 @@  #include <linux/types.h>  #include <linux/ptrace.h> -#include <linux/percpu.h> +#include <linux/notifier.h>  #define __ARCH_WANT_KPROBES_INSN_SLOT  #define MAX_INSN_SIZE			2 @@ -28,21 +28,10 @@  #define kretprobe_blacklist_size	0  typedef u32 kprobe_opcode_t; -  struct kprobe; -typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); -typedef unsigned long (kprobe_check_cc)(unsigned long); -typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *); -typedef void (kprobe_insn_fn_t)(void); +#include <asm/probes.h> -/* Architecture specific copy of original instruction. */ -struct arch_specific_insn { -	kprobe_opcode_t			*insn; -	kprobe_insn_handler_t		*insn_handler; -	kprobe_check_cc			*insn_check_cc; -	kprobe_insn_singlestep_t	*insn_singlestep; -	kprobe_insn_fn_t		*insn_fn; -}; +#define	arch_specific_insn	arch_probes_insn  struct prev_kprobe {  	struct kprobe *kp; diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 64e96960de2..816db0bf2dd 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -55,8 +55,10 @@   * The bits we set in HCR:   * TAC:		Trap ACTLR   * TSC:		Trap SMC + * TVM:		Trap VM ops (until MMU and caches are on)   * TSW:		Trap cache operations by set/way   * TWI:		Trap WFI + * TWE:		Trap WFE   * TIDCP:	Trap L2CTLR/L2ECTLR   * BSU_IS:	Upgrade barriers to the inner shareable domain   * FB:		Force broadcast of all maintainance operations @@ -67,8 +69,7 @@   */  #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \  			HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ -			HCR_SWIO | HCR_TIDCP) -#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) +			HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)  /* System Control Register (SCTLR) bits */  #define SCTLR_TE	(1 << 30) @@ -95,12 +96,12 @@  #define TTBCR_IRGN1	(3 << 24)  #define TTBCR_EPD1	(1 << 23)  #define TTBCR_A1	(1 << 22) -#define TTBCR_T1SZ	(3 << 16) +#define TTBCR_T1SZ	(7 << 16)  #define TTBCR_SH0	(3 << 12)  #define TTBCR_ORGN0	(3 << 10)  #define TTBCR_IRGN0	(3 << 8)  #define TTBCR_EPD0	(1 << 7) -#define TTBCR_T0SZ	3 +#define TTBCR_T0SZ	(7 << 0)  #define HTCR_MASK	(TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)  /* Hyp System Trap Register */ @@ -208,6 +209,8 @@  #define HSR_EC_DABT	(0x24)  #define HSR_EC_DABT_HYP	(0x25) +#define HSR_WFI_IS_WFE		(1U << 0) +  #define HSR_HVC_IMM_MASK	((1UL << 16) - 1)  #define HSR_DABT_S1PTW		(1U << 7) diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index a2f43ddcc30..53b3c4a50d5 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -39,7 +39,7 @@  #define c6_IFAR		17	/* Instruction Fault Address Register */  #define c7_PAR		18	/* Physical Address Register */  #define c7_PAR_high	19	/* PAR top 32 bits */ -#define c9_L2CTLR	20	/* Cortex A15 L2 Control Register */ +#define c9_L2CTLR	20	/* Cortex A15/A7 L2 Control Register */  #define c10_PRRR	21	/* Primary Region Remap Register */  #define c10_NMRR	22	/* Normal Memory Remap Register */  #define c12_VBAR	23	/* Vector Base Address Register */ @@ -48,7 +48,9 @@  #define c13_TID_URO	26	/* Thread ID, User R/O */  #define c13_TID_PRIV	27	/* Thread ID, Privileged */  #define c14_CNTKCTL	28	/* Timer Control Register (PL1) */ -#define NR_CP15_REGS	29	/* Number of regs (incl. invalid) */ +#define c10_AMAIR0	29	/* Auxilary Memory Attribute Indirection Reg0 */ +#define c10_AMAIR1	30	/* Auxilary Memory Attribute Indirection Reg1 */ +#define NR_CP15_REGS	31	/* Number of regs (incl. invalid) */  #define ARM_EXCEPTION_RESET	  0  #define ARM_EXCEPTION_UNDEFINED   1 diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index a464e8d7b6c..0fa90c962ac 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -157,4 +157,55 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)  	return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;  } +static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.cp15[c0_MPIDR]; +} + +static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) +{ +	*vcpu_cpsr(vcpu) |= PSR_E_BIT; +} + +static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) +{ +	return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT); +} + +static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, +						    unsigned long data, +						    unsigned int len) +{ +	if (kvm_vcpu_is_be(vcpu)) { +		switch (len) { +		case 1: +			return data & 0xff; +		case 2: +			return be16_to_cpu(data & 0xffff); +		default: +			return be32_to_cpu(data); +		} +	} + +	return data;		/* Leave LE untouched */ +} + +static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, +						    unsigned long data, +						    unsigned int len) +{ +	if (kvm_vcpu_is_be(vcpu)) { +		switch (len) { +		case 1: +			return data & 0xff; +		case 2: +			return cpu_to_be16(data & 0xffff); +		default: +			return cpu_to_be32(data); +		} +	} + +	return data;		/* Leave LE untouched */ +} +  #endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 7d22517d807..193ceaf01bf 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -36,12 +36,7 @@  #define KVM_COALESCED_MMIO_PAGE_OFFSET 1  #define KVM_HAVE_ONE_REG -#define KVM_VCPU_MAX_FEATURES 1 - -/* We don't currently support large pages. */ -#define KVM_HPAGE_GFN_SHIFT(x)	0 -#define KVM_NR_PAGE_SIZES	1 -#define KVM_PAGES_PER_HPAGE(x)	(1UL<<31) +#define KVM_VCPU_MAX_FEATURES 2  #include <kvm/arm_vgic.h> @@ -106,6 +101,12 @@ struct kvm_vcpu_arch {  	/* The CPU type we expose to the VM */  	u32 midr; +	/* HYP trapping configuration */ +	u32 hcr; + +	/* Interrupt related fields */ +	u32 irq_lines;		/* IRQ and FIQ levels */ +  	/* Exception Information */  	struct kvm_vcpu_fault_info fault; @@ -133,9 +134,6 @@ struct kvm_vcpu_arch {  	/* IO related fields */  	struct kvm_decode mmio_decode; -	/* Interrupt related fields */ -	u32 irq_lines;		/* IRQ and FIQ levels */ -  	/* Cache some mmu pages needed inside spinlock regions */  	struct kvm_mmu_memory_cache mmu_page_cache; @@ -154,6 +152,7 @@ struct kvm_vcpu_stat {  struct kvm_vcpu_init;  int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,  			const struct kvm_vcpu_init *init); +int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);  unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);  int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);  struct kvm_one_reg; @@ -229,4 +228,7 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext)  int kvm_perf_init(void);  int kvm_perf_teardown(void); +u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); +int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); +  #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 9b28c41f4ba..5c7aa3c1519 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -62,6 +62,12 @@ phys_addr_t kvm_get_idmap_vector(void);  int kvm_mmu_init(void);  void kvm_clear_hyp_idmap(void); +static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) +{ +	*pmd = new_pmd; +	flush_pmd_entry(pmd); +} +  static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)  {  	*pte = new_pte; @@ -103,10 +109,39 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)  	pte_val(*pte) |= L_PTE_S2_RDWR;  } +static inline void kvm_set_s2pmd_writable(pmd_t *pmd) +{ +	pmd_val(*pmd) |= L_PMD_S2_RDWR; +} + +/* Open coded p*d_addr_end that can deal with 64bit addresses */ +#define kvm_pgd_addr_end(addr, end)					\ +({	u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;		\ +	(__boundary - 1 < (end) - 1)? __boundary: (end);		\ +}) + +#define kvm_pud_addr_end(addr,end)		(end) + +#define kvm_pmd_addr_end(addr, end)					\ +({	u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK;		\ +	(__boundary - 1 < (end) - 1)? __boundary: (end);		\ +}) +  struct kvm; -static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) +#define kvm_flush_dcache_to_poc(a,l)	__cpuc_flush_dcache_area((a), (l)) + +static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) +{ +	return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; +} + +static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, +					     unsigned long size)  { +	if (!vcpu_has_cache_enabled(vcpu)) +		kvm_flush_dcache_to_poc((void *)hva, size); +	  	/*  	 * If we are going to insert an instruction page and the icache is  	 * either VIPT or PIPT, there is a potential problem where the host @@ -120,15 +155,16 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)  	 * need any kind of flushing (DDI 0406C.b - Page B3-1392).  	 */  	if (icache_is_pipt()) { -		unsigned long hva = gfn_to_hva(kvm, gfn); -		__cpuc_coherent_user_range(hva, hva + PAGE_SIZE); +		__cpuc_coherent_user_range(hva, hva + size);  	} else if (!icache_is_vivt_asid_tagged()) {  		/* any kind of VIPT cache */  		__flush_icache_all();  	}  } -#define kvm_flush_dcache_to_poc(a,l)	__cpuc_flush_dcache_area((a), (l)) +#define kvm_virt_to_phys(x)		virt_to_idmap((unsigned long)(x)) + +void stage2_flush_vm(struct kvm *kvm);  #endif	/* !__ASSEMBLY__ */ diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h index 9a83d98bf17..6bda945d31f 100644 --- a/arch/arm/include/asm/kvm_psci.h +++ b/arch/arm/include/asm/kvm_psci.h @@ -18,6 +18,10 @@  #ifndef __ARM_KVM_PSCI_H__  #define __ARM_KVM_PSCI_H__ -bool kvm_psci_call(struct kvm_vcpu *vcpu); +#define KVM_ARM_PSCI_0_1	1 +#define KVM_ARM_PSCI_0_2	2 + +int kvm_psci_version(struct kvm_vcpu *vcpu); +int kvm_psci_call(struct kvm_vcpu *vcpu);  #endif /* __ARM_KVM_PSCI_H__ */ diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 402a2bc6aa6..0406cb3f1af 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -14,7 +14,6 @@  #include <linux/reboot.h>  struct tag; -struct meminfo;  struct pt_regs;  struct smp_operations;  #ifdef CONFIG_SMP @@ -45,10 +44,14 @@ struct machine_desc {  	unsigned char		reserve_lp1 :1;	/* never has lp1	*/  	unsigned char		reserve_lp2 :1;	/* never has lp2	*/  	enum reboot_mode	reboot_mode;	/* default restart mode	*/ +	unsigned		l2c_aux_val;	/* L2 cache aux value	*/ +	unsigned		l2c_aux_mask;	/* L2 cache aux mask	*/ +	void			(*l2c_write_sec)(unsigned long, unsigned);  	struct smp_operations	*smp;		/* SMP operations	*/  	bool			(*smp_init)(void); -	void			(*fixup)(struct tag *, char **, -					 struct meminfo *); +	void			(*fixup)(struct tag *, char **); +	void			(*dt_fixup)(void); +	void			(*init_meminfo)(void);  	void			(*reserve)(void);/* reserve mem blocks	*/  	void			(*map_io)(void);/* IO mapping function	*/  	void			(*init_early)(void); diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 2fe141fcc8d..f98c7f32c9c 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -22,18 +22,21 @@ struct map_desc {  };  /* types 0-3 are defined in asm/io.h */ -#define MT_UNCACHED		4 -#define MT_CACHECLEAN		5 -#define MT_MINICLEAN		6 -#define MT_LOW_VECTORS		7 -#define MT_HIGH_VECTORS		8 -#define MT_MEMORY		9 -#define MT_ROM			10 -#define MT_MEMORY_NONCACHED	11 -#define MT_MEMORY_DTCM		12 -#define MT_MEMORY_ITCM		13 -#define MT_MEMORY_SO		14 -#define MT_MEMORY_DMA_READY	15 +enum { +	MT_UNCACHED = 4, +	MT_CACHECLEAN, +	MT_MINICLEAN, +	MT_LOW_VECTORS, +	MT_HIGH_VECTORS, +	MT_MEMORY_RWX, +	MT_MEMORY_RW, +	MT_ROM, +	MT_MEMORY_RWX_NONCACHED, +	MT_MEMORY_RW_DTCM, +	MT_MEMORY_RWX_ITCM, +	MT_MEMORY_RW_SO, +	MT_MEMORY_DMA_READY, +};  #ifdef CONFIG_MMU  extern void iotable_init(struct map_desc *, int); diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 454d642a407..7fc42784bec 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -106,8 +106,4 @@ extern int dc21285_setup(int nr, struct pci_sys_data *);  extern void dc21285_preinit(void);  extern void dc21285_postinit(void); -extern struct pci_ops via82c505_ops; -extern int via82c505_setup(int nr, struct pci_sys_data *); -extern void via82c505_init(void *sysdata); -  #endif /* __ASM_MACH_PCI_H */ diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 0f7b7620e9a..94060adba17 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -42,10 +42,25 @@ extern void mcpm_entry_point(void);  void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);  /* + * This sets an early poke i.e a value to be poked into some address + * from very early assembly code before the CPU is ungated.  The + * address must be physical, and if 0 then nothing will happen. + */ +void mcpm_set_early_poke(unsigned cpu, unsigned cluster, +			 unsigned long poke_phys_addr, unsigned long poke_val); + +/*   * CPU/cluster power operations API for higher subsystems to use.   */  /** + * mcpm_is_available - returns whether MCPM is initialized and available + * + * This returns true or false accordingly. + */ +bool mcpm_is_available(void); + +/**   * mcpm_cpu_power_up - make given CPU in given cluster runable   *   * @cpu: CPU number within given cluster @@ -76,12 +91,45 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);   *   * This must be called with interrupts disabled.   * - * This does not return.  Re-entry in the kernel is expected via - * mcpm_entry_point. + * On success this does not return.  Re-entry in the kernel is expected + * via mcpm_entry_point. + * + * This will return if mcpm_platform_register() has not been called + * previously in which case the caller should take appropriate action. + * + * On success, the CPU is not guaranteed to be truly halted until + * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the + * specified cpu.  Until then, other CPUs should make sure they do not + * trash memory the target CPU might be executing/accessing.   */  void mcpm_cpu_power_down(void);  /** + * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and + *	make sure it is powered off + * + * @cpu: CPU number within given cluster + * @cluster: cluster number for the CPU + * + * Call this function to ensure that a pending powerdown has taken + * effect and the CPU is safely parked before performing non-mcpm + * operations that may affect the CPU (such as kexec trashing the + * kernel text). + * + * It is *not* necessary to call this function if you only need to + * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup + * event. + * + * Do not call this function unless the specified CPU has already + * called mcpm_cpu_power_down() or has committed to doing so. + * + * @return: + *	- zero if the CPU is in a safely parked state + *	- nonzero otherwise (e.g., timeout) + */ +int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); + +/**   * mcpm_cpu_suspend - bring the calling CPU in a suspended state   *   * @expected_residency: duration in microseconds the CPU is expected @@ -98,8 +146,11 @@ void mcpm_cpu_power_down(void);   *   * This must be called with interrupts disabled.   * - * This does not return.  Re-entry in the kernel is expected via - * mcpm_entry_point. + * On success this does not return.  Re-entry in the kernel is expected + * via mcpm_entry_point. + * + * This will return if mcpm_platform_register() has not been called + * previously in which case the caller should take appropriate action.   */  void mcpm_cpu_suspend(u64 expected_residency); @@ -120,6 +171,7 @@ int mcpm_cpu_powered_up(void);  struct mcpm_platform_ops {  	int (*power_up)(unsigned int cpu, unsigned int cluster);  	void (*power_down)(void); +	int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);  	void (*suspend)(u64);  	void (*powered_up)(void);  }; @@ -156,8 +208,6 @@ struct sync_struct {  	struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];  }; -extern unsigned long sync_phys;	/* physical address of *mcpm_sync */ -  void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);  void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);  void __mcpm_outbound_leave_critical(unsigned int cluster, int state); diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h index c2f5102ae65..bf47a6c110a 100644 --- a/arch/arm/include/asm/memblock.h +++ b/arch/arm/include/asm/memblock.h @@ -1,10 +1,9 @@  #ifndef _ASM_ARM_MEMBLOCK_H  #define _ASM_ARM_MEMBLOCK_H -struct meminfo;  struct machine_desc; -void arm_memblock_init(struct meminfo *, const struct machine_desc *); +void arm_memblock_init(const struct machine_desc *);  phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);  #endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index e750a938fd3..2b751464d6f 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -30,14 +30,15 @@   */  #define UL(x) _AC(x, UL) +/* PAGE_OFFSET - the virtual address of the start of the kernel image */ +#define PAGE_OFFSET		UL(CONFIG_PAGE_OFFSET) +  #ifdef CONFIG_MMU  /* - * PAGE_OFFSET - the virtual address of the start of the kernel image   * TASK_SIZE - the maximum size of a user space task.   * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area   */ -#define PAGE_OFFSET		UL(CONFIG_PAGE_OFFSET)  #define TASK_SIZE		(UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))  #define TASK_UNMAPPED_BASE	ALIGN(TASK_SIZE / 3, SZ_16M) @@ -82,8 +83,6 @@   */  #define IOREMAP_MAX_ORDER	24 -#define CONSISTENT_END		(0xffe00000UL) -  #else /* CONFIG_MMU */  /* @@ -100,23 +99,15 @@  #define TASK_UNMAPPED_BASE	UL(0x00000000)  #endif -#ifndef PHYS_OFFSET -#define PHYS_OFFSET 		UL(CONFIG_DRAM_BASE) -#endif -  #ifndef END_MEM  #define END_MEM     		(UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)  #endif -#ifndef PAGE_OFFSET -#define PAGE_OFFSET		(PHYS_OFFSET) -#endif -  /*   * The module can be at any place in ram in nommu mode.   */  #define MODULES_END		(END_MEM) -#define MODULES_VADDR		(PHYS_OFFSET) +#define MODULES_VADDR		PAGE_OFFSET  #define XIP_VIRT_ADDR(physaddr)  (physaddr) @@ -157,24 +148,52 @@  #endif  #define ARCH_PGD_MASK		((1 << ARCH_PGD_SHIFT) - 1) +/* + * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical + * memory.  This is used for XIP and NoMMU kernels, or by kernels which + * have their own mach/memory.h.  Assembly code must always use + * PLAT_PHYS_OFFSET and not PHYS_OFFSET. + */ +#ifndef PLAT_PHYS_OFFSET +#define PLAT_PHYS_OFFSET	UL(CONFIG_PHYS_OFFSET) +#endif +  #ifndef __ASSEMBLY__  /*   * Physical vs virtual RAM address space conversion.  These are   * private definitions which should NOT be used outside memory.h   * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead. + * + * PFNs are used to describe any physical page; this means + * PFN 0 == physical address 0.   */ -#ifndef __virt_to_phys -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#if defined(__virt_to_phys) +#define PHYS_OFFSET	PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET	((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) + +#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)  /*   * Constants used to force the right instruction encodings and shifts   * so that all we need to do is modify the 8-bit constant field.   */  #define __PV_BITS_31_24	0x81000000 +#define __PV_BITS_7_0	0x81 -extern unsigned long __pv_phys_offset; -#define PHYS_OFFSET __pv_phys_offset +extern unsigned long __pv_phys_pfn_offset; +extern u64 __pv_offset; +extern void fixup_pv_table(const void *, unsigned long); +extern const void *__pv_table_begin, *__pv_table_end; + +#define PHYS_OFFSET	((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) +#define PHYS_PFN_OFFSET	(__pv_phys_pfn_offset) + +#define virt_to_pfn(kaddr) \ +	((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ +	 PHYS_PFN_OFFSET)  #define __pv_stub(from,to,instr,type)			\  	__asm__("@ __pv_stub\n"				\ @@ -185,45 +204,73 @@ extern unsigned long __pv_phys_offset;  	: "=r" (to)					\  	: "r" (from), "I" (type)) -static inline unsigned long __virt_to_phys(unsigned long x) +#define __pv_stub_mov_hi(t)				\ +	__asm__ volatile("@ __pv_stub_mov\n"		\ +	"1:	mov	%R0, %1\n"			\ +	"	.pushsection .pv_table,\"a\"\n"		\ +	"	.long	1b\n"				\ +	"	.popsection\n"				\ +	: "=r" (t)					\ +	: "I" (__PV_BITS_7_0)) + +#define __pv_add_carry_stub(x, y)			\ +	__asm__ volatile("@ __pv_add_carry_stub\n"	\ +	"1:	adds	%Q0, %1, %2\n"			\ +	"	adc	%R0, %R0, #0\n"			\ +	"	.pushsection .pv_table,\"a\"\n"		\ +	"	.long	1b\n"				\ +	"	.popsection\n"				\ +	: "+r" (y)					\ +	: "r" (x), "I" (__PV_BITS_31_24)		\ +	: "cc") + +static inline phys_addr_t __virt_to_phys(unsigned long x)  { -	unsigned long t; -	__pv_stub(x, t, "add", __PV_BITS_31_24); +	phys_addr_t t; + +	if (sizeof(phys_addr_t) == 4) { +		__pv_stub(x, t, "add", __PV_BITS_31_24); +	} else { +		__pv_stub_mov_hi(t); +		__pv_add_carry_stub(x, t); +	}  	return t;  } -static inline unsigned long __phys_to_virt(unsigned long x) +static inline unsigned long __phys_to_virt(phys_addr_t x)  {  	unsigned long t; -	__pv_stub(x, t, "sub", __PV_BITS_31_24); + +	/* +	 * 'unsigned long' cast discard upper word when +	 * phys_addr_t is 64 bit, and makes sure that inline +	 * assembler expression receives 32 bit argument +	 * in place where 'r' 32 bit operand is expected. +	 */ +	__pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);  	return t;  } +  #else -#define __virt_to_phys(x)	((x) - PAGE_OFFSET + PHYS_OFFSET) -#define __phys_to_virt(x)	((x) - PHYS_OFFSET + PAGE_OFFSET) -#endif -#endif -#endif /* __ASSEMBLY__ */ -#ifndef PHYS_OFFSET -#ifdef PLAT_PHYS_OFFSET  #define PHYS_OFFSET	PLAT_PHYS_OFFSET -#else -#define PHYS_OFFSET	UL(CONFIG_PHYS_OFFSET) -#endif -#endif +#define PHYS_PFN_OFFSET	((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) -#ifndef __ASSEMBLY__ +static inline phys_addr_t __virt_to_phys(unsigned long x) +{ +	return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; +} -/* - * PFNs are used to describe any physical page; this means - * PFN 0 == physical address 0. - * - * This is the PFN of the first RAM page in the kernel - * direct-mapped view.  We assume this is the first page - * of RAM in the mem_map as well. - */ -#define PHYS_PFN_OFFSET	((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) +static inline unsigned long __phys_to_virt(phys_addr_t x) +{ +	return x - PHYS_OFFSET + PAGE_OFFSET; +} + +#define virt_to_pfn(kaddr) \ +	((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ +	 PHYS_PFN_OFFSET) + +#endif  /*   * These are *only* valid on the kernel direct mapped RAM memory. @@ -238,16 +285,33 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)  static inline void *phys_to_virt(phys_addr_t x)  { -	return (void *)(__phys_to_virt((unsigned long)(x))); +	return (void *)__phys_to_virt(x);  }  /*   * Drivers should NOT use these either.   */  #define __pa(x)			__virt_to_phys((unsigned long)(x)) -#define __va(x)			((void *)__phys_to_virt((unsigned long)(x))) +#define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))  #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT) +extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); + +/* + * These are for systems that have a hardware interconnect supported alias of + * physical memory for idmap purposes.  Most cases should leave these + * untouched. + */ +static inline phys_addr_t __virt_to_idmap(unsigned long x) +{ +	if (arch_virt_to_idmap) +		return arch_virt_to_idmap(x); +	else +		return __virt_to_phys(x); +} + +#define virt_to_idmap(x)	__virt_to_idmap((unsigned long)(x)) +  /*   * Virtual <-> DMA view memory address translations   * Again, these are *only* valid on the kernel direct mapped RAM @@ -284,8 +348,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x)   */  #define ARCH_PFN_OFFSET		PHYS_PFN_OFFSET -#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr)	((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) +#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr)) +#define virt_addr_valid(kaddr)	(((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ +					&& pfn_valid(virt_to_pfn(kaddr)))  #endif diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 6f18da09668..64fd15159b7 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -16,7 +16,7 @@ typedef struct {  #ifdef CONFIG_CPU_HAS_ASID  #define ASID_BITS	8  #define ASID_MASK	((~0ULL) << ASID_BITS) -#define ASID(mm)	((mm)->context.id.counter & ~ASID_MASK) +#define ASID(mm)	((unsigned int)((mm)->context.id.counter & ~ASID_MASK))  #else  #define ASID(mm)	(0)  #endif diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index f94784f0e3a..891a56b35bc 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -28,53 +28,84 @@ struct outer_cache_fns {  	void (*clean_range)(unsigned long, unsigned long);  	void (*flush_range)(unsigned long, unsigned long);  	void (*flush_all)(void); -	void (*inv_all)(void);  	void (*disable)(void);  #ifdef CONFIG_OUTER_CACHE_SYNC  	void (*sync)(void);  #endif -	void (*set_debug)(unsigned long);  	void (*resume)(void); + +	/* This is an ARM L2C thing */ +	void (*write_sec)(unsigned long, unsigned);  };  extern struct outer_cache_fns outer_cache;  #ifdef CONFIG_OUTER_CACHE - +/** + * outer_inv_range - invalidate range of outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */  static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)  {  	if (outer_cache.inv_range)  		outer_cache.inv_range(start, end);  } + +/** + * outer_clean_range - clean dirty outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */  static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)  {  	if (outer_cache.clean_range)  		outer_cache.clean_range(start, end);  } + +/** + * outer_flush_range - clean and invalidate outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */  static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)  {  	if (outer_cache.flush_range)  		outer_cache.flush_range(start, end);  } +/** + * outer_flush_all - clean and invalidate all cache lines in the outer cache + * + * Note: depending on implementation, this may not be atomic - it must + * only be called with interrupts disabled and no other active outer + * cache masters. + * + * It is intended that this function is only used by implementations + * needing to override the outer_cache.disable() method due to security. + * (Some implementations perform this as a clean followed by an invalidate.) + */  static inline void outer_flush_all(void)  {  	if (outer_cache.flush_all)  		outer_cache.flush_all();  } -static inline void outer_inv_all(void) -{ -	if (outer_cache.inv_all) -		outer_cache.inv_all(); -} - -static inline void outer_disable(void) -{ -	if (outer_cache.disable) -		outer_cache.disable(); -} +/** + * outer_disable - clean, invalidate and disable the outer cache + * + * Disable the outer cache, ensuring that any data contained in the outer + * cache is pushed out to lower levels of system memory.  The note and + * conditions above concerning outer_flush_all() applies here. + */ +extern void outer_disable(void); +/** + * outer_resume - restore the cache configuration and re-enable outer cache + * + * Restore any configuration that the cache had when previously enabled, + * and re-enable the outer cache. + */  static inline void outer_resume(void)  {  	if (outer_cache.resume) @@ -90,13 +121,18 @@ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)  static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)  { }  static inline void outer_flush_all(void) { } -static inline void outer_inv_all(void) { }  static inline void outer_disable(void) { }  static inline void outer_resume(void) { }  #endif  #ifdef CONFIG_OUTER_CACHE_SYNC +/** + * outer_sync - perform a sync point for outer cache + * + * Ensure that all outer cache operations are complete and any store + * buffers are drained. + */  static inline void outer_sync(void)  {  	if (outer_cache.sync) diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index a98a2e112fa..7e95d8535e2 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -31,11 +31,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)  }  #endif /* CONFIG_PCI_DOMAINS */ -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ -	/* We don't do dynamic PCI IRQ allocation */ -} -  /*   * The PCI address space does equal the physical memory address space.   * The networking and block device layers use this boolean for bounce @@ -57,12 +52,9 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,  extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,                                 enum pci_mmap_state mmap_state, int write_combine); -/* - * Dummy implementation; always return 0. - */  static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)  { -	return 0; +	return channel ? 15 : 14;  }  #endif /* __KERNEL__ */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 943504f53f5..78a77936168 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -102,12 +102,14 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)  #else  	pte = alloc_pages(PGALLOC_GFP, 0);  #endif -	if (pte) { -		if (!PageHighMem(pte)) -			clean_pte_table(page_address(pte)); -		pgtable_page_ctor(pte); +	if (!pte) +		return NULL; +	if (!PageHighMem(pte)) +		clean_pte_table(page_address(pte)); +	if (!pgtable_page_ctor(pte)) { +		__free_page(pte); +		return NULL;  	} -  	return pte;  } diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index f97ee02386e..219ac88a954 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -140,6 +140,7 @@  #define L_PTE_MT_DEV_NONSHARED	(_AT(pteval_t, 0x0c) << 2)	/* 1100 */  #define L_PTE_MT_DEV_WC		(_AT(pteval_t, 0x09) << 2)	/* 1001 */  #define L_PTE_MT_DEV_CACHED	(_AT(pteval_t, 0x0b) << 2)	/* 1011 */ +#define L_PTE_MT_VECTORS	(_AT(pteval_t, 0x0f) << 2)	/* 1111 */  #define L_PTE_MT_MASK		(_AT(pteval_t, 0x0f) << 2)  #ifndef __ASSEMBLY__ @@ -160,6 +161,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)  	return (pmd_t *)pud;  } +#define pmd_large(pmd)		(pmd_val(pmd) & 2)  #define pmd_bad(pmd)		(pmd_val(pmd) & 2)  #define copy_pmd(pmdpd,pmdps)		\ @@ -181,6 +183,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)  #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +/* + * We don't have huge page support for short descriptors, for the moment + * define empty stubs for use by pin_page_for_write. + */ +#define pmd_hugewillfault(pmd)	(0) +#define pmd_thp_or_huge(pmd)	(0) +  #endif /* __ASSEMBLY__ */  #endif /* _ASM_PGTABLE_2LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 5689c18c85f..85c60adc8b6 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -120,11 +120,16 @@  /*   * 2nd stage PTE definitions for LPAE.   */ -#define L_PTE_S2_MT_UNCACHED	 (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITEBACK	 (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_RDONLY		 (_AT(pteval_t, 1) << 6)   /* HAP[1]   */ -#define L_PTE_S2_RDWR		 (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */ +#define L_PTE_S2_MT_UNCACHED		(_AT(pteval_t, 0x0) << 2) /* strongly ordered */ +#define L_PTE_S2_MT_WRITETHROUGH	(_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ +#define L_PTE_S2_MT_WRITEBACK		(_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ +#define L_PTE_S2_MT_DEV_SHARED		(_AT(pteval_t, 0x1) << 2) /* device */ +#define L_PTE_S2_MT_MASK		(_AT(pteval_t, 0xf) << 2) + +#define L_PTE_S2_RDONLY			(_AT(pteval_t, 1) << 6)   /* HAP[1]   */ +#define L_PTE_S2_RDWR			(_AT(pteval_t, 3) << 6)   /* HAP[2:1] */ + +#define L_PMD_S2_RDWR			(_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */  /*   * Hyp-mode PL2 PTE definitions for LPAE. @@ -140,6 +145,7 @@  						 PMD_TYPE_TABLE)  #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \  						 PMD_TYPE_SECT) +#define pmd_large(pmd)		pmd_sect(pmd)  #define pud_clear(pudp)			\  	do {				\ @@ -206,6 +212,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)  #define __HAVE_ARCH_PMD_WRITE  #define pmd_write(pmd)		(!(pmd_val(pmd) & PMD_SECT_RDONLY)) +#define pmd_hugewillfault(pmd)	(!pmd_young(pmd) || !pmd_write(pmd)) +#define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd)) +  #ifdef CONFIG_TRANSPARENT_HUGEPAGE  #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))  #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index be956dbf6ba..5478e5d6ad8 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -61,7 +61,7 @@ extern void __pgd_error(const char *file, int line, pgd_t);   * mapping to be mapped at.  This is particularly important for   * non-high vector CPUs.   */ -#define FIRST_USER_ADDRESS	PAGE_SIZE +#define FIRST_USER_ADDRESS	(PAGE_SIZE * 2)  /*   * Use TASK_SIZE as the ceiling argument for free_pgtables() and @@ -216,13 +216,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)  #define pte_none(pte)		(!pte_val(pte))  #define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT) +#define pte_valid(pte)		(pte_val(pte) & L_PTE_VALID) +#define pte_accessible(mm, pte)	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))  #define pte_write(pte)		(!(pte_val(pte) & L_PTE_RDONLY))  #define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY)  #define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG)  #define pte_exec(pte)		(!(pte_val(pte) & L_PTE_XN))  #define pte_special(pte)	(0) -#define pte_present_user(pte)  (pte_present(pte) && (pte_val(pte) & L_PTE_USER)) +#define pte_valid_user(pte)	\ +	(pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))  #if __LINUX_ARM_ARCH__ < 6  static inline void __sync_icache_dcache(pte_t pteval) @@ -237,7 +240,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,  {  	unsigned long ext = 0; -	if (addr < TASK_SIZE && pte_present_user(pteval)) { +	if (addr < TASK_SIZE && pte_valid_user(pteval)) {  		__sync_icache_dcache(pteval);  		ext |= PTE_EXT_NG;  	} @@ -254,6 +257,8 @@ PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);  PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);  PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);  PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG); +PTE_BIT_FUNC(mkexec,   &= ~L_PTE_XN); +PTE_BIT_FUNC(mknexec,   |= L_PTE_XN);  static inline pte_t pte_mkspecial(pte_t pte) { return pte; } diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index f24edad26c7..ae1919be8f9 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -71,6 +71,8 @@ struct arm_pmu {  	void		(*disable)(struct perf_event *event);  	int		(*get_event_idx)(struct pmu_hw_events *hw_events,  					 struct perf_event *event); +	void		(*clear_event_idx)(struct pmu_hw_events *hw_events, +					 struct perf_event *event);  	int		(*set_event_filter)(struct hw_perf_event *evt,  					    struct perf_event_attr *attr);  	u32		(*read_counter)(struct perf_event *event); diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h new file mode 100644 index 00000000000..806cfe622a9 --- /dev/null +++ b/arch/arm/include/asm/probes.h @@ -0,0 +1,43 @@ +/* + * arch/arm/include/asm/probes.h + * + * Original contents copied from arch/arm/include/asm/kprobes.h + * which contains the following notice... + * + * Copyright (C) 2006, 2007 Motorola Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + */ + +#ifndef _ASM_PROBES_H +#define _ASM_PROBES_H + +typedef u32 probes_opcode_t; + +struct arch_probes_insn; +typedef void (probes_insn_handler_t)(probes_opcode_t, +				     struct arch_probes_insn *, +				     struct pt_regs *); +typedef unsigned long (probes_check_cc)(unsigned long); +typedef void (probes_insn_singlestep_t)(probes_opcode_t, +					struct arch_probes_insn *, +					struct pt_regs *); +typedef void (probes_insn_fn_t)(void); + +/* Architecture specific copy of original instruction. */ +struct arch_probes_insn { +	probes_opcode_t			*insn; +	probes_insn_handler_t		*insn_handler; +	probes_check_cc			*insn_check_cc; +	probes_insn_singlestep_t	*insn_singlestep; +	probes_insn_fn_t		*insn_fn; +}; + +#endif diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 413f3876341..c3d5fc124a0 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -22,6 +22,7 @@  #include <asm/hw_breakpoint.h>  #include <asm/ptrace.h>  #include <asm/types.h> +#include <asm/unified.h>  #ifdef __KERNEL__  #define STACK_TOP	((current->personality & ADDR_LIMIT_32BIT) ? \ @@ -87,6 +88,17 @@ unsigned long get_wchan(struct task_struct *p);  #define KSTK_EIP(tsk)	task_pt_regs(tsk)->ARM_pc  #define KSTK_ESP(tsk)	task_pt_regs(tsk)->ARM_sp +#ifdef CONFIG_SMP +#define __ALT_SMP_ASM(smp, up)						\ +	"9998:	" smp "\n"						\ +	"	.pushsection \".alt.smp.init\", \"a\"\n"		\ +	"	.long	9998b\n"					\ +	"	" up "\n"						\ +	"	.popsection\n" +#else +#define __ALT_SMP_ASM(smp, up)	up +#endif +  /*   * Prefetching support - only ARMv5.   */ @@ -97,17 +109,22 @@ static inline void prefetch(const void *ptr)  {  	__asm__ __volatile__(  		"pld\t%a0" -		: -		: "p" (ptr) -		: "cc"); +		:: "p" (ptr));  } +#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)  #define ARCH_HAS_PREFETCHW -#define prefetchw(ptr)	prefetch(ptr) - -#define ARCH_HAS_SPINLOCK_PREFETCH -#define spin_lock_prefetch(x) do { } while (0) - +static inline void prefetchw(const void *ptr) +{ +	__asm__ __volatile__( +		".arch_extension	mp\n" +		__ALT_SMP_ASM( +			WASM(pldw)		"\t%a0", +			WASM(pld)		"\t%a0" +		) +		:: "p" (ptr)); +} +#endif  #endif  #define HAVE_ARCH_PICK_MMAP_LAYOUT diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index 4a2985e2196..cd94ef2ef28 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -11,12 +11,9 @@  #ifndef __ASMARM_PROM_H  #define __ASMARM_PROM_H -#define HAVE_ARCH_DEVTREE_FIXUPS -  #ifdef CONFIG_OF  extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys); -extern void arm_dt_memblock_reserve(void);  extern void __init arm_dt_init_cpu_maps(void);  #else /* CONFIG_OF */ @@ -26,7 +23,6 @@ static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)  	return NULL;  } -static inline void arm_dt_memblock_reserve(void) { }  static inline void arm_dt_init_cpu_maps(void) { }  #endif /* CONFIG_OF */ diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h index c4ae171850f..c25ef3ec6d1 100644 --- a/arch/arm/include/asm/psci.h +++ b/arch/arm/include/asm/psci.h @@ -29,16 +29,19 @@ struct psci_operations {  	int (*cpu_off)(struct psci_power_state state);  	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);  	int (*migrate)(unsigned long cpuid); +	int (*affinity_info)(unsigned long target_affinity, +			unsigned long lowest_affinity_level); +	int (*migrate_info_type)(void);  };  extern struct psci_operations psci_ops;  extern struct smp_operations psci_smp_ops;  #ifdef CONFIG_ARM_PSCI -void psci_init(void); +int psci_init(void);  bool psci_smp_available(void);  #else -static inline void psci_init(void) { } +static inline int psci_init(void) { return 0; }  static inline bool psci_smp_available(void) { return false; }  #endif diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 04c99f36ff7..c877654fe3b 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -27,9 +27,13 @@ struct pt_regs {  #define thumb_mode(regs) (0)  #endif +#ifndef CONFIG_CPU_V7M  #define isa_mode(regs) \ -	((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \ -	 (((regs)->ARM_cpsr & PSR_T_BIT) >> 5)) +	((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \ +	 (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT)))) +#else +#define isa_mode(regs) 1 /* Thumb */ +#endif  #define processor_mode(regs) \  	((regs)->ARM_cpsr & MODE_MASK) @@ -80,6 +84,12 @@ static inline long regs_return_value(struct pt_regs *regs)  #define instruction_pointer(regs)	(regs)->ARM_pc +static inline void instruction_pointer_set(struct pt_regs *regs, +					   unsigned long val) +{ +	instruction_pointer(regs) = val; +} +  #ifdef CONFIG_SMP  extern unsigned long profile_pc(struct pt_regs *regs);  #else diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h deleted file mode 100644 index 2389b71a8e7..00000000000 --- a/arch/arm/include/asm/sched_clock.h +++ /dev/null @@ -1,4 +0,0 @@ -/* You shouldn't include this file. Use linux/sched_clock.h instead. - * Temporary file until all asm/sched_clock.h users are gone - */ -#include <linux/sched_clock.h> diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index c50f0560950..e0adb9f1bf9 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -21,35 +21,7 @@  #define __tagtable(tag, fn) \  static const struct tagtable __tagtable_##fn __tag = { tag, fn } -/* - * Memory map description - */ -#define NR_BANKS	CONFIG_ARM_NR_BANKS - -struct membank { -	phys_addr_t start; -	phys_addr_t size; -	unsigned int highmem; -}; - -struct meminfo { -	int nr_banks; -	struct membank bank[NR_BANKS]; -}; - -extern struct meminfo meminfo; - -#define for_each_bank(iter,mi)				\ -	for (iter = 0; iter < (mi)->nr_banks; iter++) - -#define bank_pfn_start(bank)	__phys_to_pfn((bank)->start) -#define bank_pfn_end(bank)	__phys_to_pfn((bank)->start + (bank)->size) -#define bank_pfn_size(bank)	((bank)->size >> PAGE_SHIFT) -#define bank_phys_start(bank)	(bank)->start -#define bank_phys_end(bank)	((bank)->start + (bank)->size) -#define bank_phys_size(bank)	(bank)->size - -extern int arm_add_memory(phys_addr_t start, phys_addr_t size); +extern int arm_add_memory(u64 start, u64 size);  extern void early_print(const char *str, ...);  extern void dump_machine_table(void); diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index a8cae71cace..2ec765c39ab 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -74,6 +74,7 @@ struct secondary_data {  };  extern struct secondary_data secondary_data;  extern volatile int pen_release; +extern void secondary_startup(void);  extern int __cpu_disable(void); @@ -84,6 +85,8 @@ extern void arch_send_call_function_single_ipi(int cpu);  extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);  extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); +extern int register_ipi_completion(struct completion *completion, int cpu); +  struct smp_operations {  #ifdef CONFIG_SMP  	/* @@ -112,6 +115,15 @@ struct smp_operations {  #endif  }; +struct of_cpu_method { +	const char *method; +	struct smp_operations *ops; +}; + +#define CPU_METHOD_OF_DECLARE(name, _method, _ops)			\ +	static const struct of_cpu_method __cpu_method_of_table_##name	\ +		__used __section(__cpu_method_of_table)			\ +		= { .method = _method, .ops = _ops }  /*   * set platform specific SMP operations   */ diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 4f2c28060c9..ac4bfae2670 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,21 +5,13 @@  #error SMP not supported on pre-ARMv6 CPUs  #endif -#include <asm/processor.h> +#include <linux/prefetch.h>  /*   * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K   * extensions, so when running on UP, we have to patch these instructions away.   */ -#define ALT_SMP(smp, up)					\ -	"9998:	" smp "\n"					\ -	"	.pushsection \".alt.smp.init\", \"a\"\n"	\ -	"	.long	9998b\n"				\ -	"	" up "\n"					\ -	"	.popsection\n" -  #ifdef CONFIG_THUMB2_KERNEL -#define SEV		ALT_SMP("sev.w", "nop.w")  /*   * For Thumb-2, special care is needed to ensure that the conditional WFE   * instruction really does assemble to exactly 4 bytes (as required by @@ -31,31 +23,23 @@   * the assembler won't change IT instructions which are explicitly present   * in the input.   */ -#define WFE(cond)	ALT_SMP(		\ +#define WFE(cond)	__ALT_SMP_ASM(		\  	"it " cond "\n\t"			\  	"wfe" cond ".n",			\  						\  	"nop.w"					\  )  #else -#define SEV		ALT_SMP("sev", "nop") -#define WFE(cond)	ALT_SMP("wfe" cond, "nop") +#define WFE(cond)	__ALT_SMP_ASM("wfe" cond, "nop")  #endif +#define SEV		__ALT_SMP_ASM(WASM(sev), WASM(nop)) +  static inline void dsb_sev(void)  { -#if __LINUX_ARM_ARCH__ >= 7 -	__asm__ __volatile__ ( -		"dsb ishst\n" -		SEV -	); -#else -	__asm__ __volatile__ ( -		"mcr p15, 0, %0, c7, c10, 4\n" -		SEV -		: : "r" (0) -	); -#endif + +	dsb(ishst); +	__asm__(SEV);  }  /* @@ -77,6 +61,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)  	u32 newval;  	arch_spinlock_t lockval; +	prefetchw(&lock->slock);  	__asm__ __volatile__(  "1:	ldrex	%0, [%3]\n"  "	add	%1, %0, %4\n" @@ -100,6 +85,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)  	unsigned long contended, res;  	u32 slock; +	prefetchw(&lock->slock);  	do {  		__asm__ __volatile__(  		"	ldrex	%0, [%3]\n" @@ -127,10 +113,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)  	dsb_sev();  } +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ +	return lock.tickets.owner == lock.tickets.next; +} +  static inline int arch_spin_is_locked(arch_spinlock_t *lock)  { -	struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); -	return tickets.owner != tickets.next; +	return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));  }  static inline int arch_spin_is_contended(arch_spinlock_t *lock) @@ -152,6 +142,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)  {  	unsigned long tmp; +	prefetchw(&rw->lock);  	__asm__ __volatile__(  "1:	ldrex	%0, [%1]\n"  "	teq	%0, #0\n" @@ -170,6 +161,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)  {  	unsigned long contended, res; +	prefetchw(&rw->lock);  	do {  		__asm__ __volatile__(  		"	ldrex	%0, [%2]\n" @@ -203,7 +195,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)  }  /* write_can_lock - would write_trylock() succeed? */ -#define arch_write_can_lock(x)		((x)->lock == 0) +#define arch_write_can_lock(x)		(ACCESS_ONCE((x)->lock) == 0)  /*   * Read locks are a bit more hairy: @@ -221,6 +213,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)  {  	unsigned long tmp, tmp2; +	prefetchw(&rw->lock);  	__asm__ __volatile__(  "1:	ldrex	%0, [%2]\n"  "	adds	%0, %0, #1\n" @@ -241,6 +234,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)  	smp_mb(); +	prefetchw(&rw->lock);  	__asm__ __volatile__(  "1:	ldrex	%0, [%2]\n"  "	sub	%0, %0, #1\n" @@ -259,6 +253,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)  {  	unsigned long contended, res; +	prefetchw(&rw->lock);  	do {  		__asm__ __volatile__(  		"	ldrex	%0, [%2]\n" @@ -280,7 +275,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)  }  /* read_can_lock - would read_trylock() succeed? */ -#define arch_read_can_lock(x)		((x)->lock < 0x80000000) +#define arch_read_can_lock(x)		(ACCESS_ONCE((x)->lock) < 0x80000000)  #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)  #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index b262d2f8b47..47663fcb10a 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -25,7 +25,7 @@ typedef struct {  #define __ARCH_SPIN_LOCK_UNLOCKED	{ { 0 } }  typedef struct { -	volatile unsigned int lock; +	u32 lock;  } arch_rwlock_t;  #define __ARCH_RW_LOCK_UNLOCKED		{ 0 } diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h index 63479eecbf7..9732b8e11e6 100644 --- a/arch/arm/include/asm/sync_bitops.h +++ b/arch/arm/include/asm/sync_bitops.h @@ -2,7 +2,6 @@  #define __ASM_SYNC_BITOPS_H__  #include <asm/bitops.h> -#include <asm/system.h>  /* sync_bitops functions are equivalent to the SMP implementation of the   * original functions, independently from CONFIG_SMP being defined. diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index f1d96d4e809..4651f6999b7 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -7,7 +7,7 @@  #ifndef _ASM_ARM_SYSCALL_H  #define _ASM_ARM_SYSCALL_H -#include <linux/audit.h> /* for AUDIT_ARCH_* */ +#include <uapi/linux/audit.h> /* for AUDIT_ARCH_* */  #include <linux/elf.h> /* for ELF_EM */  #include <linux/err.h>  #include <linux/sched.h> @@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,  					 unsigned int i, unsigned int n,  					 unsigned long *args)  { +	if (n == 0) +		return; +  	if (i + n > SYSCALL_MAX_ARGS) {  		unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;  		unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; @@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,  					 unsigned int i, unsigned int n,  					 const unsigned long *args)  { +	if (n == 0) +		return; +  	if (i + n > SYSCALL_MAX_ARGS) {  		pr_warning("%s called with max args %d, handling only %d\n",  			   __func__, i + n, SYSCALL_MAX_ARGS); @@ -97,8 +103,7 @@ static inline void syscall_set_arguments(struct task_struct *task,  	memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0]));  } -static inline int syscall_get_arch(struct task_struct *task, -				   struct pt_regs *regs) +static inline int syscall_get_arch(void)  {  	/* ARM tasks don't change audit architectures on the fly. */  	return AUDIT_ARCH_ARM; diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h deleted file mode 100644 index 368165e33c1..00000000000 --- a/arch/arm/include/asm/system.h +++ /dev/null @@ -1,7 +0,0 @@ -/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */ -#include <asm/barrier.h> -#include <asm/compiler.h> -#include <asm/cmpxchg.h> -#include <asm/switch_to.h> -#include <asm/system_info.h> -#include <asm/system_misc.h> diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index df5e13d64f2..e4e4208a913 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -114,8 +114,14 @@ static inline struct thread_info *current_thread_info(void)  	((unsigned long)(task_thread_info(tsk)->cpu_context.pc))  #define thread_saved_sp(tsk)	\  	((unsigned long)(task_thread_info(tsk)->cpu_context.sp)) + +#ifndef CONFIG_THUMB2_KERNEL  #define thread_saved_fp(tsk)	\  	((unsigned long)(task_thread_info(tsk)->cpu_context.fp)) +#else +#define thread_saved_fp(tsk)	\ +	((unsigned long)(task_thread_info(tsk)->cpu_context.r7)) +#endif  extern void crunch_task_disable(struct thread_info *);  extern void crunch_task_copy(struct thread_info *, void *); @@ -141,12 +147,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,  #endif  /* - * We use bit 30 of the preempt_count to indicate that kernel - * preemption is occurring.  See <asm/hardirq.h>. - */ -#define PREEMPT_ACTIVE	0x40000000 - -/*   * thread information flags:   *  TIF_SYSCALL_TRACE	- syscall trace active   *  TIF_SYSCAL_AUDIT	- syscall auditing active @@ -159,6 +159,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,  #define TIF_SIGPENDING		0  #define TIF_NEED_RESCHED	1  #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */ +#define TIF_UPROBE		7  #define TIF_SYSCALL_TRACE	8  #define TIF_SYSCALL_AUDIT	9  #define TIF_SYSCALL_TRACEPOINT	10 @@ -171,6 +172,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,  #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)  #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME) +#define _TIF_UPROBE		(1 << TIF_UPROBE)  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)  #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)  #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT) @@ -184,7 +186,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,  /*   * Change these and you break ASM code in entry-common.S   */ -#define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME) +#define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +				 _TIF_NOTIFY_RESUME | _TIF_UPROBE)  #endif /* __KERNEL__ */  #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index 83f2aa83899..f6fcc67ef06 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h @@ -12,12 +12,6 @@  #ifndef _ASMARM_TIMEX_H  #define _ASMARM_TIMEX_H -#ifdef CONFIG_ARCH_MULTIPLATFORM -#define CLOCK_TICK_RATE 1000000 -#else -#include <mach/timex.h> -#endif -  typedef unsigned long cycles_t;  #define get_cycles()	({ cycles_t c; read_current_timer(&c) ? 0 : c; }) diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 0baf7f0d939..f1a0dace3ef 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -98,15 +98,25 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)  	}  } -static inline void tlb_flush_mmu(struct mmu_gather *tlb) +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)  {  	tlb_flush(tlb); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{  	free_pages_and_swap_cache(tlb->pages, tlb->nr);  	tlb->nr = 0;  	if (tlb->pages == tlb->local)  		__tlb_alloc_page(tlb);  } +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ +	tlb_flush_mmu_tlbonly(tlb); +	tlb_flush_mmu_free(tlb); +} +  static inline void  tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)  { diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 38960264040..def9e570199 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -560,37 +560,6 @@ static inline void __flush_bp_all(void)  		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));  } -#include <asm/cputype.h> -#ifdef CONFIG_ARM_ERRATA_798181 -static inline int erratum_a15_798181(void) -{ -	unsigned int midr = read_cpuid_id(); - -	/* Cortex-A15 r0p0..r3p2 affected */ -	if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) -		return 0; -	return 1; -} - -static inline void dummy_flush_tlb_a15_erratum(void) -{ -	/* -	 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. -	 */ -	asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); -	dsb(ish); -} -#else -static inline int erratum_a15_798181(void) -{ -	return 0; -} - -static inline void dummy_flush_tlb_a15_erratum(void) -{ -} -#endif -  /*   *	flush_pmd_entry   * @@ -697,4 +666,21 @@ extern void flush_bp_all(void);  #endif +#ifndef __ASSEMBLY__ +#ifdef CONFIG_ARM_ERRATA_798181 +extern void erratum_a15_798181_init(void); +#else +static inline void erratum_a15_798181_init(void) {} +#endif +extern bool (*erratum_a15_798181_handler)(void); + +static inline bool erratum_a15_798181(void) +{ +	if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) && +		erratum_a15_798181_handler)) +		return erratum_a15_798181_handler(); +	return false; +} +#endif +  #endif diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 58b8b84adcd..2fe85fff5cc 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -20,9 +20,6 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];  #define topology_core_cpumask(cpu)	(&cpu_topology[cpu].core_sibling)  #define topology_thread_cpumask(cpu)	(&cpu_topology[cpu].thread_sibling) -#define mc_capable()	(cpu_topology[0].socket_id != -1) -#define smt_capable()	(cpu_topology[0].thread_id != -1) -  void init_cpu_topology(void);  void store_cpu_topology(unsigned int cpuid);  const struct cpumask *cpu_coregroup_mask(int cpu); diff --git a/arch/arm/include/asm/trusted_foundations.h b/arch/arm/include/asm/trusted_foundations.h new file mode 100644 index 00000000000..624e1d436c6 --- /dev/null +++ b/arch/arm/include/asm/trusted_foundations.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2013, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + */ + +/* + * Support for the Trusted Foundations secure monitor. + * + * Trusted Foundation comes active on some ARM consumer devices (most + * Tegra-based devices sold on the market are concerned). Such devices can only + * perform some basic operations, like setting the CPU reset vector, through + * SMC calls to the secure monitor. The calls are completely specific to + * Trusted Foundations, and do *not* follow the SMC calling convention or the + * PSCI standard. + */ + +#ifndef __ASM_ARM_TRUSTED_FOUNDATIONS_H +#define __ASM_ARM_TRUSTED_FOUNDATIONS_H + +#include <linux/kconfig.h> +#include <linux/printk.h> +#include <linux/bug.h> +#include <linux/of.h> +#include <linux/cpu.h> +#include <linux/smp.h> + +struct trusted_foundations_platform_data { +	unsigned int version_major; +	unsigned int version_minor; +}; + +#if IS_ENABLED(CONFIG_TRUSTED_FOUNDATIONS) + +void register_trusted_foundations(struct trusted_foundations_platform_data *pd); +void of_register_trusted_foundations(void); + +#else /* CONFIG_TRUSTED_FOUNDATIONS */ + +static inline void register_trusted_foundations( +				   struct trusted_foundations_platform_data *pd) +{ +	/* +	 * If the system requires TF and we cannot provide it, continue booting +	 * but disable features that cannot be provided. +	 */ +	pr_err("No support for Trusted Foundations, continuing in degraded mode.\n"); +	pr_err("Secondary processors as well as CPU PM will be disabled.\n"); +#if IS_ENABLED(CONFIG_SMP) +	setup_max_cpus = 0; +#endif +	cpu_idle_poll_ctrl(true); +} + +static inline void of_register_trusted_foundations(void) +{ +	/* +	 * If we find the target should enable TF but does not support it, +	 * fail as the system won't be able to do much anyway +	 */ +	if (of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations")) +		register_trusted_foundations(NULL); +} +#endif /* CONFIG_TRUSTED_FOUNDATIONS */ + +#endif diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 72abdc541f3..75d95799b6e 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -19,7 +19,7 @@  #include <asm/unified.h>  #include <asm/compiler.h> -#if __LINUX_ARM_ARCH__ < 6 +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS  #include <asm-generic/uaccess-unaligned.h>  #else  #define __get_user_unaligned __get_user @@ -171,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long);  #define __put_user_check(x,p)							\  	({								\  		unsigned long __limit = current_thread_info()->addr_limit - 1; \ +		const typeof(*(p)) __user *__tmp_p = (p);		\  		register const typeof(*(p)) __r2 asm("r2") = (x);	\ -		register const typeof(*(p)) __user *__p asm("r0") = (p);\ +		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \  		register unsigned long __l asm("r1") = __limit;		\  		register int __e asm("r0");				\  		switch (sizeof(*(__p))) {				\ diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h index f5989f46b4d..b88beaba6b4 100644 --- a/arch/arm/include/asm/unified.h +++ b/arch/arm/include/asm/unified.h @@ -38,6 +38,8 @@  #ifdef __ASSEMBLY__  #define W(instr)	instr.w  #define BSYM(sym)	sym + 1 +#else +#define WASM(instr)	#instr ".w"  #endif  #else	/* !CONFIG_THUMB2_KERNEL */ @@ -50,6 +52,8 @@  #ifdef __ASSEMBLY__  #define W(instr)	instr  #define BSYM(sym)	sym +#else +#define WASM(instr)	#instr  #endif  #endif	/* CONFIG_THUMB2_KERNEL */ diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 141baa3f9a7..43876245fc5 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -15,7 +15,7 @@  #include <uapi/asm/unistd.h> -#define __NR_syscalls  (380) +#define __NR_syscalls  (384)  #define __ARM_NR_cmpxchg		(__ARM_NR_BASE+0x00fff0)  #define __ARCH_WANT_STAT64 @@ -48,6 +48,5 @@   */  #define __IGNORE_fadvise64_64  #define __IGNORE_migrate_pages -#define __IGNORE_kcmp  #endif /* __ASM_ARM_UNISTD_H */ diff --git a/arch/arm/include/asm/uprobes.h b/arch/arm/include/asm/uprobes.h new file mode 100644 index 00000000000..9472c20b7d4 --- /dev/null +++ b/arch/arm/include/asm/uprobes.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2012 Rabin Vincent <rabin at rab.in> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ASM_UPROBES_H +#define _ASM_UPROBES_H + +#include <asm/probes.h> +#include <asm/opcodes.h> + +typedef u32 uprobe_opcode_t; + +#define MAX_UINSN_BYTES		4 +#define UPROBE_XOL_SLOT_BYTES	64 + +#define UPROBE_SWBP_ARM_INSN	0xe7f001f9 +#define UPROBE_SS_ARM_INSN	0xe7f001fa +#define UPROBE_SWBP_INSN	__opcode_to_mem_arm(UPROBE_SWBP_ARM_INSN) +#define UPROBE_SWBP_INSN_SIZE	4 + +struct arch_uprobe_task { +	u32 backup; +	unsigned long	saved_trap_no; +}; + +struct arch_uprobe { +	u8 insn[MAX_UINSN_BYTES]; +	unsigned long ixol[2]; +	uprobe_opcode_t bpinsn; +	bool simulate; +	u32 pcreg; +	void (*prehandler)(struct arch_uprobe *auprobe, +			   struct arch_uprobe_task *autask, +			   struct pt_regs *regs); +	void (*posthandler)(struct arch_uprobe *auprobe, +			    struct arch_uprobe_task *autask, +			    struct pt_regs *regs); +	struct arch_probes_insn asi; +}; + +#endif diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h index 4d52f92967a..a6d0a29861e 100644 --- a/arch/arm/include/asm/word-at-a-time.h +++ b/arch/arm/include/asm/word-at-a-time.h @@ -48,10 +48,14 @@ static inline unsigned long find_zero(unsigned long mask)  	return ret;  } -#ifdef CONFIG_DCACHE_WORD_ACCESS -  #define zero_bytemask(mask) (mask) +#else	/* __ARMEB__ */ +#include <asm-generic/word-at-a-time.h> +#endif + +#ifdef CONFIG_DCACHE_WORD_ACCESS +  /*   * Load an unaligned word from kernel space.   * @@ -73,7 +77,11 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)  	"	bic	%2, %2, #0x3\n"  	"	ldr	%0, [%2]\n"  	"	lsl	%1, %1, #0x3\n" +#ifndef __ARMEB__  	"	lsr	%0, %0, %1\n" +#else +	"	lsl	%0, %0, %1\n" +#endif  	"	b	2b\n"  	"	.popsection\n"  	"	.pushsection __ex_table,\"a\"\n" @@ -86,11 +94,5 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)  	return ret;  } -  #endif	/* DCACHE_WORD_ACCESS */ - -#else	/* __ARMEB__ */ -#include <asm-generic/word-at-a-time.h> -#endif -  #endif /* __ASM_ARM_WORD_AT_A_TIME_H */ diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index 7704e28c348..712b50e0a6d 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -34,6 +34,7 @@  #define _ASM_ARM_XEN_HYPERCALL_H  #include <xen/interface/xen.h> +#include <xen/interface/sched.h>  long privcmd_call(unsigned call, unsigned long a1,  		unsigned long a2, unsigned long a3, @@ -48,6 +49,16 @@ int HYPERVISOR_memory_op(unsigned int cmd, void *arg);  int HYPERVISOR_physdev_op(int cmd, void *arg);  int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);  int HYPERVISOR_tmem_op(void *arg); +int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); + +static inline int +HYPERVISOR_suspend(unsigned long start_info_mfn) +{ +	struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; + +	/* start_info_mfn is unused on ARM */ +	return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); +}  static inline void  MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, @@ -63,9 +74,4 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,  	BUG();  } -static inline int -HYPERVISOR_multicall(void *call_list, int nr_calls) -{ -	BUG(); -}  #endif /* _ASM_ARM_XEN_HYPERCALL_H */ diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h index d7ab99a0c9e..1317ee40f4d 100644 --- a/arch/arm/include/asm/xen/hypervisor.h +++ b/arch/arm/include/asm/xen/hypervisor.h @@ -16,4 +16,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)  	return PARAVIRT_LAZY_NONE;  } +extern struct dma_map_ops *xen_dma_ops; +  #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h index 1151188bcd8..50066006e6b 100644 --- a/arch/arm/include/asm/xen/interface.h +++ b/arch/arm/include/asm/xen/interface.h @@ -40,6 +40,8 @@ typedef uint64_t xen_pfn_t;  #define PRI_xen_pfn "llx"  typedef uint64_t xen_ulong_t;  #define PRI_xen_ulong "llx" +typedef int64_t xen_long_t; +#define PRI_xen_long "llx"  /* Guest handles for primitive C types. */  __DEFINE_GUEST_HANDLE(uchar, unsigned char);  __DEFINE_GUEST_HANDLE(uint,  unsigned int); diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h new file mode 100644 index 00000000000..1109017499e --- /dev/null +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -0,0 +1,50 @@ +#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H +#define _ASM_ARM_XEN_PAGE_COHERENT_H + +#include <asm/page.h> +#include <linux/dma-attrs.h> +#include <linux/dma-mapping.h> + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, +		dma_addr_t *dma_handle, gfp_t flags, +		struct dma_attrs *attrs) +{ +	return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, +		void *cpu_addr, dma_addr_t dma_handle, +		struct dma_attrs *attrs) +{ +	__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); +} + +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, +	     unsigned long offset, size_t size, enum dma_data_direction dir, +	     struct dma_attrs *attrs) +{ +	__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); +} + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, +		size_t size, enum dma_data_direction dir, +		struct dma_attrs *attrs) +{ +	if (__generic_dma_ops(hwdev)->unmap_page) +		__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); +} + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, +		dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ +	if (__generic_dma_ops(hwdev)->sync_single_for_cpu) +		__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); +} + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, +		dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ +	if (__generic_dma_ops(hwdev)->sync_single_for_device) +		__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); +} +#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 359a7b50b15..ded062f9b35 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -6,12 +6,12 @@  #include <linux/pfn.h>  #include <linux/types.h> +#include <linux/dma-mapping.h> +#include <xen/xen.h>  #include <xen/interface/grant_table.h> -#define pfn_to_mfn(pfn)			(pfn)  #define phys_to_machine_mapping_valid(pfn) (1) -#define mfn_to_pfn(mfn)			(mfn)  #define mfn_to_virt(m)			(__va(mfn_to_pfn(m) << PAGE_SHIFT))  #define pte_mfn	    pte_pfn @@ -32,6 +32,38 @@ typedef struct xpaddr {  #define INVALID_P2M_ENTRY      (~0UL) +unsigned long __pfn_to_mfn(unsigned long pfn); +unsigned long __mfn_to_pfn(unsigned long mfn); +extern struct rb_root phys_to_mach; + +static inline unsigned long pfn_to_mfn(unsigned long pfn) +{ +	unsigned long mfn; + +	if (phys_to_mach.rb_node != NULL) { +		mfn = __pfn_to_mfn(pfn); +		if (mfn != INVALID_P2M_ENTRY) +			return mfn; +	} + +	return pfn; +} + +static inline unsigned long mfn_to_pfn(unsigned long mfn) +{ +	unsigned long pfn; + +	if (phys_to_mach.rb_node != NULL) { +		pfn = __mfn_to_pfn(mfn); +		if (pfn != INVALID_P2M_ENTRY) +			return pfn; +	} + +	return mfn; +} + +#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) +  static inline xmaddr_t phys_to_machine(xpaddr_t phys)  {  	unsigned offset = phys.paddr & ~PAGE_MASK; @@ -45,7 +77,6 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)  }  /* VIRT <-> MACHINE conversion */  #define virt_to_machine(v)	(phys_to_machine(XPADDR(__pa(v)))) -#define virt_to_pfn(v)          (PFN_DOWN(__pa(v)))  #define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v)))  #define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT)) @@ -65,28 +96,24 @@ static inline pte_t *lookup_address(unsigned long address, unsigned int *level)  	return NULL;  } -static inline int m2p_add_override(unsigned long mfn, struct page *page, -		struct gnttab_map_grant_ref *kmap_op) -{ -	return 0; -} +extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, +				   struct gnttab_map_grant_ref *kmap_ops, +				   struct page **pages, unsigned int count); -static inline int m2p_remove_override(struct page *page, bool clear_pte) -{ -	return 0; -} +extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, +				     struct gnttab_map_grant_ref *kmap_ops, +				     struct page **pages, unsigned int count); -static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ -	BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); -	return true; -} +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); +bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, +		unsigned long nr_pages);  static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)  {  	return __set_phys_to_machine(pfn, mfn);  } -#define xen_remap(cookie, size) ioremap_cached((cookie), (size)); +#define xen_remap(cookie, size) ioremap_cache((cookie), (size)) +#define xen_unmap(cookie) iounmap((cookie))  #endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S new file mode 100644 index 00000000000..2265a199280 --- /dev/null +++ b/arch/arm/include/debug/efm32.S @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2013 Pengutronix + * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define UARTn_CMD		0x000c +#define UARTn_CMD_TXEN			0x0004 + +#define	UARTn_STATUS		0x0010 +#define	UARTn_STATUS_TXC		0x0020 +#define	UARTn_STATUS_TXBL		0x0040 + +#define	UARTn_TXDATA		0x0034 + +		.macro	addruart, rx, tmp +		ldr	\rx, =(CONFIG_DEBUG_UART_PHYS) + +		/* +		 * enable TX. The driver might disable it to save energy. We +		 * don't care about disabling at the end as during debug power +		 * consumption isn't that important. +		 */ +		ldr	\tmp, =(UARTn_CMD_TXEN) +		str	\tmp, [\rx, #UARTn_CMD] +		.endm + +		.macro	senduart,rd,rx +		strb	\rd, [\rx, #UARTn_TXDATA] +		.endm + +		.macro	waituart,rd,rx +1001:		ldr	\rd, [\rx, #UARTn_STATUS] +		tst	\rd, #UARTn_STATUS_TXBL +		beq	1001b +		.endm + +		.macro	busyuart,rd,rx +1001:		ldr	\rd, [\rx, UARTn_STATUS] +		tst	\rd, #UARTn_STATUS_TXC +		bne	1001b +		.endm diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h index 29da84e183f..032a316eb80 100644 --- a/arch/arm/include/debug/imx-uart.h +++ b/arch/arm/include/debug/imx-uart.h @@ -43,6 +43,14 @@  #define IMX35_UART_BASE_ADDR(n)	IMX35_UART##n##_BASE_ADDR  #define IMX35_UART_BASE(n)	IMX35_UART_BASE_ADDR(n) +#define IMX50_UART1_BASE_ADDR	0x53fbc000 +#define IMX50_UART2_BASE_ADDR	0x53fc0000 +#define IMX50_UART3_BASE_ADDR	0x5000c000 +#define IMX50_UART4_BASE_ADDR	0x53ff0000 +#define IMX50_UART5_BASE_ADDR	0x63f90000 +#define IMX50_UART_BASE_ADDR(n)	IMX50_UART##n##_BASE_ADDR +#define IMX50_UART_BASE(n)	IMX50_UART_BASE_ADDR(n) +  #define IMX51_UART1_BASE_ADDR	0x73fbc000  #define IMX51_UART2_BASE_ADDR	0x73fc0000  #define IMX51_UART3_BASE_ADDR	0x7000c000 @@ -73,6 +81,15 @@  #define IMX6SL_UART_BASE_ADDR(n) IMX6SL_UART##n##_BASE_ADDR  #define IMX6SL_UART_BASE(n)	IMX6SL_UART_BASE_ADDR(n) +#define IMX6SX_UART1_BASE_ADDR	0x02020000 +#define IMX6SX_UART2_BASE_ADDR	0x021e8000 +#define IMX6SX_UART3_BASE_ADDR	0x021ec000 +#define IMX6SX_UART4_BASE_ADDR	0x021f0000 +#define IMX6SX_UART5_BASE_ADDR	0x021f4000 +#define IMX6SX_UART6_BASE_ADDR	0x022a0000 +#define IMX6SX_UART_BASE_ADDR(n) IMX6SX_UART##n##_BASE_ADDR +#define IMX6SX_UART_BASE(n)	IMX6SX_UART_BASE_ADDR(n) +  #define IMX_DEBUG_UART_BASE(soc) soc##_UART_BASE(CONFIG_DEBUG_IMX_UART_PORT)  #ifdef CONFIG_DEBUG_IMX1_UART @@ -85,6 +102,8 @@  #define UART_PADDR	IMX_DEBUG_UART_BASE(IMX31)  #elif defined(CONFIG_DEBUG_IMX35_UART)  #define UART_PADDR	IMX_DEBUG_UART_BASE(IMX35) +#elif defined(CONFIG_DEBUG_IMX50_UART) +#define UART_PADDR	IMX_DEBUG_UART_BASE(IMX50)  #elif defined(CONFIG_DEBUG_IMX51_UART)  #define UART_PADDR	IMX_DEBUG_UART_BASE(IMX51)  #elif defined(CONFIG_DEBUG_IMX53_UART) @@ -93,6 +112,8 @@  #define UART_PADDR	IMX_DEBUG_UART_BASE(IMX6Q)  #elif defined(CONFIG_DEBUG_IMX6SL_UART)  #define UART_PADDR	IMX_DEBUG_UART_BASE(IMX6SL) +#elif defined(CONFIG_DEBUG_IMX6SX_UART) +#define UART_PADDR	IMX_DEBUG_UART_BASE(IMX6SX)  #endif  #endif /* __DEBUG_IMX_UART_H */ diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S index 9166e1bc470..9ef57612811 100644 --- a/arch/arm/include/debug/msm.S +++ b/arch/arm/include/debug/msm.S @@ -15,46 +15,15 @@   *   */ -#if defined(CONFIG_ARCH_MSM7X00A) || defined(CONFIG_ARCH_QSD8X50) -#define MSM_UART1_PHYS        0xA9A00000 -#define MSM_UART2_PHYS        0xA9B00000 -#define MSM_UART3_PHYS        0xA9C00000 -#elif defined(CONFIG_ARCH_MSM7X30) -#define MSM_UART1_PHYS        0xACA00000 -#define MSM_UART2_PHYS        0xACB00000 -#define MSM_UART3_PHYS        0xACC00000 -#endif - -#if defined(CONFIG_DEBUG_MSM_UART1) -#define MSM_DEBUG_UART_BASE	0xE1000000 -#define MSM_DEBUG_UART_PHYS	MSM_UART1_PHYS -#elif defined(CONFIG_DEBUG_MSM_UART2) -#define MSM_DEBUG_UART_BASE	0xE1000000 -#define MSM_DEBUG_UART_PHYS	MSM_UART2_PHYS -#elif defined(CONFIG_DEBUG_MSM_UART3) -#define MSM_DEBUG_UART_BASE	0xE1000000 -#define MSM_DEBUG_UART_PHYS	MSM_UART3_PHYS -#endif - -#ifdef CONFIG_DEBUG_MSM8660_UART -#define MSM_DEBUG_UART_BASE	0xF0040000 -#define MSM_DEBUG_UART_PHYS	0x19C40000 -#endif - -#ifdef CONFIG_DEBUG_MSM8960_UART -#define MSM_DEBUG_UART_BASE	0xF0040000 -#define MSM_DEBUG_UART_PHYS	0x16440000 -#endif -  	.macro	addruart, rp, rv, tmp -#ifdef MSM_DEBUG_UART_PHYS -	ldr	\rp, =MSM_DEBUG_UART_PHYS -	ldr	\rv, =MSM_DEBUG_UART_BASE +#ifdef CONFIG_DEBUG_UART_PHYS +	ldr	\rp, =CONFIG_DEBUG_UART_PHYS +	ldr	\rv, =CONFIG_DEBUG_UART_VIRT  #endif  	.endm  	.macro	senduart, rd, rx -#ifdef CONFIG_MSM_HAS_DEBUG_UART_HS +#ifdef CONFIG_DEBUG_QCOM_UARTDM  	@ Write the 1 character to UARTDM_TF  	str	\rd, [\rx, #0x70]  #else @@ -63,7 +32,7 @@  	.endm  	.macro	waituart, rd, rx -#ifdef CONFIG_MSM_HAS_DEBUG_UART_HS +#ifdef CONFIG_DEBUG_QCOM_UARTDM  	@ check for TX_EMT in UARTDM_SR  	ldr	\rd, [\rx, #0x08]  	tst	\rd, #0x08 diff --git a/arch/arm/include/debug/pl01x.S b/arch/arm/include/debug/pl01x.S index 37c6895b87e..92ef808a233 100644 --- a/arch/arm/include/debug/pl01x.S +++ b/arch/arm/include/debug/pl01x.S @@ -25,12 +25,14 @@  		.macro	waituart,rd,rx  1001:		ldr	\rd, [\rx, #UART01x_FR] + ARM_BE8(	rev	\rd, \rd )  		tst	\rd, #UART01x_FR_TXFF  		bne	1001b  		.endm  		.macro	busyuart,rd,rx  1001:		ldr	\rd, [\rx, #UART01x_FR] + ARM_BE8(	rev	\rd, \rd )  		tst	\rd, #UART01x_FR_BUSY  		bne	1001b  		.endm diff --git a/arch/arm/include/debug/s3c24xx.S b/arch/arm/include/debug/s3c24xx.S new file mode 100644 index 00000000000..b1f54dc4888 --- /dev/null +++ b/arch/arm/include/debug/s3c24xx.S @@ -0,0 +1,46 @@ +/* arch/arm/mach-s3c2410/include/mach/debug-macro.S + * + * Debugging macro include header + * + *  Copyright (C) 1994-1999 Russell King + *  Copyright (C) 2005 Simtec Electronics + * + *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/serial_s3c.h> + +#define S3C2410_UART1_OFF (0x4000) + +	.macro addruart, rp, rv, tmp +		ldr	\rp, = CONFIG_DEBUG_UART_PHYS +		ldr	\rv, = CONFIG_DEBUG_UART_VIRT +	.endm + +	.macro  fifo_full_s3c2410 rd, rx +		ldr	\rd, [\rx, # S3C2410_UFSTAT] +		tst	\rd, #S3C2410_UFSTAT_TXFULL +	.endm + +	.macro fifo_level_s3c2410 rd, rx +		ldr	\rd, [\rx, # S3C2410_UFSTAT] +		and	\rd, \rd, #S3C2410_UFSTAT_TXMASK +	.endm + +/* Select the correct implementation depending on the configuration. The + * S3C2440 will get selected by default, as these are the most widely + * used variants of these +*/ + +#if defined(CONFIG_DEBUG_S3C2410_UART) +#define fifo_full  fifo_full_s3c2410 +#define fifo_level fifo_level_s3c2410 +#endif + +/* include the reset of the code which will do the work */ + +#include <debug/samsung.S> diff --git a/arch/arm/include/debug/samsung.S b/arch/arm/include/debug/samsung.S index f3a9cff6d5d..8d8d922e5e4 100644 --- a/arch/arm/include/debug/samsung.S +++ b/arch/arm/include/debug/samsung.S @@ -9,7 +9,7 @@   * published by the Free Software Foundation.  */ -#include <plat/regs-serial.h> +#include <linux/serial_s3c.h>  /* The S5PV210/S5PC110 implementations are as belows. */ diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S index be6a720dd18..3bc80599c02 100644 --- a/arch/arm/include/debug/tegra.S +++ b/arch/arm/include/debug/tegra.S @@ -46,15 +46,14 @@  #define TEGRA_APB_MISC_GP_HIDREV	(TEGRA_APB_MISC_BASE + 0x804)  /* - * Must be 1MB-aligned since a 1MB mapping is used early on. + * Must be section-aligned since a section mapping is used early on.   * Must not overlap with regions in mach-tegra/io.c:tegra_io_desc[].   */ -#define UART_VIRTUAL_BASE		0xfe100000 +#define UART_VIRTUAL_BASE		0xfe800000  #define checkuart(rp, rv, lhu, bit, uart) \  		/* Load address of CLK_RST register */ \ -		movw	rp, #TEGRA_CLK_RST_DEVICES_##lhu & 0xffff ; \ -		movt	rp, #TEGRA_CLK_RST_DEVICES_##lhu >> 16 ; \ +		ldr	rp, =TEGRA_CLK_RST_DEVICES_##lhu ; \  		/* Load value from CLK_RST register */ \  		ldr	rp, [rp, #0] ; \  		/* Test UART's reset bit */ \ @@ -62,8 +61,7 @@  		/* If set, can't use UART; jump to save no UART */ \  		bne	90f ; \  		/* Load address of CLK_OUT_ENB register */ \ -		movw	rp, #TEGRA_CLK_OUT_ENB_##lhu & 0xffff ; \ -		movt	rp, #TEGRA_CLK_OUT_ENB_##lhu >> 16 ; \ +		ldr	rp, =TEGRA_CLK_OUT_ENB_##lhu ; \  		/* Load value from CLK_OUT_ENB register */ \  		ldr	rp, [rp, #0] ; \  		/* Test UART's clock enable bit */ \ @@ -71,8 +69,7 @@  		/* If clear, can't use UART; jump to save no UART */ \  		beq	90f ; \  		/* Passed all tests, load address of UART registers */ \ -		movw	rp, #TEGRA_UART##uart##_BASE & 0xffff ; \ -		movt	rp, #TEGRA_UART##uart##_BASE >> 16 ; \ +		ldr	rp, =TEGRA_UART##uart##_BASE ; \  		/* Jump to save UART address */ \  		b 91f @@ -90,15 +87,16 @@  #ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA  		/* Check ODMDATA */ -10:		movw	\rp, #TEGRA_PMC_SCRATCH20 & 0xffff -		movt	\rp, #TEGRA_PMC_SCRATCH20 >> 16 +10:		ldr	\rp, =TEGRA_PMC_SCRATCH20  		ldr	\rp, [\rp, #0]		@ Load PMC_SCRATCH20 -		ubfx	\rv, \rp, #18, #2	@ 19:18 are console type +		lsr	\rv, \rp, #18		@ 19:18 are console type +		and	\rv, \rv, #3  		cmp	\rv, #2			@ 2 and 3 mean DCC, UART  		beq	11f			@ some boards swap the meaning  		cmp	\rv, #3			@ so accept either  		bne	90f -11:		ubfx	\rv, \rp, #15, #3	@ 17:15 are UART ID +11:		lsr	\rv, \rp, #15		@ 17:15 are UART ID +		and	\rv, #7	  		cmp	\rv, #0			@ UART 0?  		beq	20f  		cmp	\rv, #1			@ UART 1? @@ -156,28 +154,6 @@  92:		and	\rv, \rp, #0xffffff	@ offset within 1MB section  		add	\rv, \rv, #UART_VIRTUAL_BASE  		str	\rv, [\tmp, #8]		@ Store in tegra_uart_virt -		movw	\rv, #TEGRA_APB_MISC_GP_HIDREV & 0xffff -		movt	\rv, #TEGRA_APB_MISC_GP_HIDREV >> 16 -		ldr	\rv, [\rv, #0]		@ Load HIDREV -		ubfx	\rv, \rv, #8, #8	@ 15:8 are SoC version -		cmp	\rv, #0x20		@ Tegra20? -		moveq	\rv, #0x75		@ Tegra20 divisor -		movne	\rv, #0xdd		@ Tegra30 divisor -		str	\rv, [\tmp, #12]	@ Save divisor to scratch -		/* uart[UART_LCR] = UART_LCR_WLEN8 | UART_LCR_DLAB; */ -		mov	\rv, #UART_LCR_WLEN8 | UART_LCR_DLAB -		str	\rv, [\rp, #UART_LCR << UART_SHIFT] -		/* uart[UART_DLL] = div & 0xff; */ -		ldr	\rv, [\tmp, #12] -		and	\rv, \rv, #0xff -		str	\rv, [\rp, #UART_DLL << UART_SHIFT] -		/* uart[UART_DLM] = div >> 8; */ -		ldr	\rv, [\tmp, #12] -		lsr	\rv, \rv, #8 -		str	\rv, [\rp, #UART_DLM << UART_SHIFT] -		/* uart[UART_LCR] = UART_LCR_WLEN8; */ -		mov	\rv, #UART_LCR_WLEN8 -		str	\rv, [\rp, #UART_LCR << UART_SHIFT]  		b	100f  		.align @@ -205,8 +181,8 @@  		cmp	\rx, #0  		beq	1002f  1001:		ldrb	\rd, [\rx, #UART_LSR << UART_SHIFT] -		and	\rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE -		teq	\rd, #UART_LSR_TEMT | UART_LSR_THRE +		and	\rd, \rd, #UART_LSR_THRE +		teq	\rd, #UART_LSR_THRE  		bne	1001b  1002:  		.endm @@ -225,7 +201,7 @@  /*   * Storage for the state maintained by the macros above.   * - * In the kernel proper, this data is located in arch/arm/mach-tegra/common.c. + * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.   * That's because this header is included from multiple files, and we only   * want a single copy of the data. In particular, the UART probing code above   * assumes it's running using physical addresses. This is true when this file @@ -247,6 +223,4 @@ tegra_uart_config:  	.word 0  	/* Debug UART virtual address */  	.word 0 -	/* Scratch space for debug macro */ -	.word 0  #endif diff --git a/arch/arm/include/debug/vf.S b/arch/arm/include/debug/vf.S new file mode 100644 index 00000000000..b88933849a1 --- /dev/null +++ b/arch/arm/include/debug/vf.S @@ -0,0 +1,37 @@ +/* + * Copyright 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#define VF_UART0_BASE_ADDR	0x40027000 +#define VF_UART1_BASE_ADDR	0x40028000 +#define VF_UART2_BASE_ADDR	0x40029000 +#define VF_UART3_BASE_ADDR	0x4002a000 +#define VF_UART_BASE_ADDR(n)	VF_UART##n##_BASE_ADDR +#define VF_UART_BASE(n)		VF_UART_BASE_ADDR(n) +#define VF_UART_PHYSICAL_BASE	VF_UART_BASE(CONFIG_DEBUG_VF_UART_PORT) + +#define VF_UART_VIRTUAL_BASE	0xfe000000 + +	.macro	addruart, rp, rv, tmp +	ldr	\rp, =VF_UART_PHYSICAL_BASE 	@ physical +	and	\rv, \rp, #0xffffff		@ offset within 16MB section +	add	\rv, \rv, #VF_UART_VIRTUAL_BASE +	.endm + +	.macro	senduart, rd, rx +	strb	\rd, [\rx, #0x7]	@ Data Register +	.endm + +	.macro	busyuart, rd, rx +1001:	ldrb	\rd, [\rx, #0x4]	@ Status Register 1 +	tst	\rd, #1 << 6		@ TC +	beq	1001b			@ wait until transmit done +	.endm + +	.macro	waituart,rd,rx +	.endm diff --git a/arch/arm/include/debug/zynq.S b/arch/arm/include/debug/zynq.S index f9aa9740a73..bd13dedbdef 100644 --- a/arch/arm/include/debug/zynq.S +++ b/arch/arm/include/debug/zynq.S @@ -20,18 +20,18 @@  #define UART_SR_TXEMPTY		0x00000008	/* TX FIFO empty */  #define UART0_PHYS		0xE0000000 +#define UART0_VIRT		0xF0000000  #define UART1_PHYS		0xE0001000 -#define UART_SIZE		SZ_4K -#define UART_VIRT		0xF0001000 +#define UART1_VIRT		0xF0001000  #if IS_ENABLED(CONFIG_DEBUG_ZYNQ_UART1)  # define LL_UART_PADDR		UART1_PHYS +# define LL_UART_VADDR		UART1_VIRT  #else  # define LL_UART_PADDR		UART0_PHYS +# define LL_UART_VADDR		UART0_VIRT  #endif -#define LL_UART_VADDR		UART_VIRT -  		.macro	addruart, rp, rv, tmp  		ldr	\rp, =LL_UART_PADDR	@ physical  		ldr	\rv, =LL_UART_VADDR	@ virtual @@ -42,10 +42,15 @@  		.endm  		.macro	waituart,rd,rx +1001:		ldr	\rd, [\rx, #UART_SR_OFFSET] +ARM_BE8(	rev	\rd, \rd ) +		tst	\rd, #UART_SR_TXEMPTY +		beq	1001b  		.endm  		.macro	busyuart,rd,rx  1002:		ldr	\rd, [\rx, #UART_SR_OFFSET]	@ get status register +ARM_BE8(	rev	\rd, \rd )  		tst	\rd, #UART_SR_TXFULL		@  		bne	1002b			@ wait if FIFO is full  		.endm diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index 18d76fd5a2a..70a1c9da30c 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild @@ -7,6 +7,7 @@ header-y += hwcap.h  header-y += ioctls.h  header-y += kvm_para.h  header-y += mman.h +header-y += perf_regs.h  header-y += posix_types.h  header-y += ptrace.h  header-y += setup.h diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h index 6d34d080372..20d12f230a2 100644 --- a/arch/arm/include/uapi/asm/hwcap.h +++ b/arch/arm/include/uapi/asm/hwcap.h @@ -26,5 +26,15 @@  #define HWCAP_VFPD32	(1 << 19)	/* set if VFP has 32 regs (not 16) */  #define HWCAP_IDIV	(HWCAP_IDIVA | HWCAP_IDIVT)  #define HWCAP_LPAE	(1 << 20) +#define HWCAP_EVTSTRM	(1 << 21) + +/* + * HWCAP2 flags - for elf_hwcap2 (in kernel) and AT_HWCAP2 + */ +#define HWCAP2_AES	(1 << 0) +#define HWCAP2_PMULL	(1 << 1) +#define HWCAP2_SHA1	(1 << 2) +#define HWCAP2_SHA2	(1 << 3) +#define HWCAP2_CRC32	(1 << 4)  #endif /* _UAPI__ASMARM_HWCAP_H */ diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index c1ee007523d..e6ebdd3471e 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -20,6 +20,7 @@  #define __ARM_KVM_H__  #include <linux/types.h> +#include <linux/psci.h>  #include <asm/ptrace.h>  #define __KVM_HAVE_GUEST_DEBUG @@ -63,7 +64,8 @@ struct kvm_regs {  /* Supported Processor Types */  #define KVM_ARM_TARGET_CORTEX_A15	0 -#define KVM_ARM_NUM_TARGETS		1 +#define KVM_ARM_TARGET_CORTEX_A7	1 +#define KVM_ARM_NUM_TARGETS		2  /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */  #define KVM_ARM_DEVICE_TYPE_SHIFT	0 @@ -82,6 +84,7 @@ struct kvm_regs {  #define KVM_VGIC_V2_CPU_SIZE		0x2000  #define KVM_ARM_VCPU_POWER_OFF		0 /* CPU is started in OFF state */ +#define KVM_ARM_VCPU_PSCI_0_2		1 /* CPU uses PSCI v0.2 */  struct kvm_vcpu_init {  	__u32 target; @@ -118,6 +121,26 @@ struct kvm_arch_memory_slot {  #define KVM_REG_ARM_32_CRN_MASK		0x0000000000007800  #define KVM_REG_ARM_32_CRN_SHIFT	11 +#define ARM_CP15_REG_SHIFT_MASK(x,n) \ +	(((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) + +#define __ARM_CP15_REG(op1,crn,crm,op2) \ +	(KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \ +	ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \ +	ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \ +	ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \ +	ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2)) + +#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32) + +#define __ARM_CP15_REG64(op1,crm) \ +	(__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) +#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) + +#define KVM_REG_ARM_TIMER_CTL		ARM_CP15_REG32(0, 14, 3, 1) +#define KVM_REG_ARM_TIMER_CNT		ARM_CP15_REG64(1, 14)  +#define KVM_REG_ARM_TIMER_CVAL		ARM_CP15_REG64(3, 14)  +  /* Normal registers are mapped as coprocessor 16. */  #define KVM_REG_ARM_CORE		(0x0010 << KVM_REG_ARM_COPROC_SHIFT)  #define KVM_REG_ARM_CORE_REG(name)	(offsetof(struct kvm_regs, name) / 4) @@ -142,6 +165,14 @@ struct kvm_arch_memory_slot {  #define KVM_REG_ARM_VFP_FPINST		0x1009  #define KVM_REG_ARM_VFP_FPINST2		0x100A +/* Device Control API: ARM VGIC */ +#define KVM_DEV_ARM_VGIC_GRP_ADDR	0 +#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1 +#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS	2 +#define   KVM_DEV_ARM_VGIC_CPUID_SHIFT	32 +#define   KVM_DEV_ARM_VGIC_CPUID_MASK	(0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) +#define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT	0 +#define   KVM_DEV_ARM_VGIC_OFFSET_MASK	(0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)  /* KVM_IRQ_LINE irq field index values */  #define KVM_ARM_IRQ_TYPE_SHIFT		24 @@ -172,9 +203,9 @@ struct kvm_arch_memory_slot {  #define KVM_PSCI_FN_CPU_ON		KVM_PSCI_FN(2)  #define KVM_PSCI_FN_MIGRATE		KVM_PSCI_FN(3) -#define KVM_PSCI_RET_SUCCESS		0 -#define KVM_PSCI_RET_NI			((unsigned long)-1) -#define KVM_PSCI_RET_INVAL		((unsigned long)-2) -#define KVM_PSCI_RET_DENIED		((unsigned long)-3) +#define KVM_PSCI_RET_SUCCESS		PSCI_RET_SUCCESS +#define KVM_PSCI_RET_NI			PSCI_RET_NOT_SUPPORTED +#define KVM_PSCI_RET_INVAL		PSCI_RET_INVALID_PARAMS +#define KVM_PSCI_RET_DENIED		PSCI_RET_DENIED  #endif /* __ARM_KVM_H__ */ diff --git a/arch/arm/include/uapi/asm/perf_regs.h b/arch/arm/include/uapi/asm/perf_regs.h new file mode 100644 index 00000000000..ce59448458b --- /dev/null +++ b/arch/arm/include/uapi/asm/perf_regs.h @@ -0,0 +1,23 @@ +#ifndef _ASM_ARM_PERF_REGS_H +#define _ASM_ARM_PERF_REGS_H + +enum perf_event_arm_regs { +	PERF_REG_ARM_R0, +	PERF_REG_ARM_R1, +	PERF_REG_ARM_R2, +	PERF_REG_ARM_R3, +	PERF_REG_ARM_R4, +	PERF_REG_ARM_R5, +	PERF_REG_ARM_R6, +	PERF_REG_ARM_R7, +	PERF_REG_ARM_R8, +	PERF_REG_ARM_R9, +	PERF_REG_ARM_R10, +	PERF_REG_ARM_FP, +	PERF_REG_ARM_IP, +	PERF_REG_ARM_SP, +	PERF_REG_ARM_LR, +	PERF_REG_ARM_PC, +	PERF_REG_ARM_MAX, +}; +#endif /* _ASM_ARM_PERF_REGS_H */ diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index af33b44990e..ba94446c72d 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -406,6 +406,9 @@  #define __NR_process_vm_writev		(__NR_SYSCALL_BASE+377)  #define __NR_kcmp			(__NR_SYSCALL_BASE+378)  #define __NR_finit_module		(__NR_SYSCALL_BASE+379) +#define __NR_sched_setattr		(__NR_SYSCALL_BASE+380) +#define __NR_sched_getattr		(__NR_SYSCALL_BASE+381) +#define __NR_renameat2			(__NR_SYSCALL_BASE+382)  /*   * This may need to be greater than __NR_last_syscall+1 in order to  | 
