diff options
Diffstat (limited to 'arch/arm/include/asm')
238 files changed, 9574 insertions, 7496 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 6550db3aa5c..f5a35760198 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -1,3 +1,39 @@ -include include/asm-generic/Kbuild.asm -header-y += hwcap.h + +generic-y += auxvec.h +generic-y += bitsperlong.h +generic-y += cputime.h +generic-y += current.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += exec.h +generic-y += hash.h +generic-y += ioctl.h +generic-y += ipcbuf.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += local.h +generic-y += local64.h +generic-y += mcs_spinlock.h +generic-y += msgbuf.h +generic-y += param.h +generic-y += parport.h +generic-y += poll.h +generic-y += preempt.h +generic-y += resource.h +generic-y += rwsem.h +generic-y += sections.h +generic-y += segment.h +generic-y += sembuf.h +generic-y += serial.h +generic-y += shmbuf.h +generic-y += siginfo.h +generic-y += simd.h +generic-y += sizes.h +generic-y += socket.h +generic-y += sockios.h +generic-y += termbits.h +generic-y += termios.h +generic-y += timex.h +generic-y += trace_clock.h +generic-y += unaligned.h diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h deleted file mode 100644 index 93d04acaa31..00000000000 --- a/arch/arm/include/asm/a.out-core.h +++ /dev/null @@ -1,49 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_A_OUT_CORE_H -#define _ASM_A_OUT_CORE_H - -#ifdef __KERNEL__ - -#include <linux/user.h> -#include <linux/elfcore.h> - -/* - * fill in the user structure for an a.out core dump - */ -static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) -{ -	struct task_struct *tsk = current; - -	dump->magic = CMAGIC; -	dump->start_code = tsk->mm->start_code; -	dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1); - -	dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT; -	dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; -	dump->u_ssize = 0; - -	dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; -	dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; -	dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm; -	dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm; -	dump->u_debugreg[4] = tsk->thread.debug.nsaved; - -	if (dump->start_stack < 0x04000000) -		dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; - -	dump->regs = *regs; -	dump->u_fpvalid = dump_fpu (regs, &dump->u_fp); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_A_OUT_CORE_H */ diff --git a/arch/arm/include/asm/a.out.h b/arch/arm/include/asm/a.out.h deleted file mode 100644 index 083894b2e3b..00000000000 --- a/arch/arm/include/asm/a.out.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef __ARM_A_OUT_H__ -#define __ARM_A_OUT_H__ - -#include <linux/personality.h> -#include <linux/types.h> - -struct exec -{ -  __u32 a_info;		/* Use macros N_MAGIC, etc for access */ -  __u32 a_text;		/* length of text, in bytes */ -  __u32 a_data;		/* length of data, in bytes */ -  __u32 a_bss;		/* length of uninitialized data area for file, in bytes */ -  __u32 a_syms;		/* length of symbol table data in file, in bytes */ -  __u32 a_entry;	/* start address */ -  __u32 a_trsize;	/* length of relocation info for text, in bytes */ -  __u32 a_drsize;	/* length of relocation info for data, in bytes */ -}; - -/* - * This is always the same - */ -#define N_TXTADDR(a)	(0x00008000) - -#define N_TRSIZE(a)	((a).a_trsize) -#define N_DRSIZE(a)	((a).a_drsize) -#define N_SYMSIZE(a)	((a).a_syms) - -#define M_ARM 103 - -#ifndef LIBRARY_START_TEXT -#define LIBRARY_START_TEXT	(0x00c00000) -#endif - -#endif /* __A_OUT_GNU_H__ */ diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h new file mode 100644 index 00000000000..0704e0cf557 --- /dev/null +++ b/arch/arm/include/asm/arch_timer.h @@ -0,0 +1,129 @@ +#ifndef __ASMARM_ARCH_TIMER_H +#define __ASMARM_ARCH_TIMER_H + +#include <asm/barrier.h> +#include <asm/errno.h> +#include <linux/clocksource.h> +#include <linux/init.h> +#include <linux/types.h> + +#include <clocksource/arm_arch_timer.h> + +#ifdef CONFIG_ARM_ARCH_TIMER +int arch_timer_arch_init(void); + +/* + * These register accessors are marked inline so the compiler can + * nicely work out which register we want, and chuck away the rest of + * the code. At least it does so with a recent GCC (4.6.3). + */ +static __always_inline +void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) +{ +	if (access == ARCH_TIMER_PHYS_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); +			break; +		} +	} else if (access == ARCH_TIMER_VIRT_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); +			break; +		} +	} + +	isb(); +} + +static __always_inline +u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) +{ +	u32 val = 0; + +	if (access == ARCH_TIMER_PHYS_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); +			break; +		} +	} else if (access == ARCH_TIMER_VIRT_ACCESS) { +		switch (reg) { +		case ARCH_TIMER_REG_CTRL: +			asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); +			break; +		case ARCH_TIMER_REG_TVAL: +			asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); +			break; +		} +	} + +	return val; +} + +static inline u32 arch_timer_get_cntfrq(void) +{ +	u32 val; +	asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); +	return val; +} + +static inline u64 arch_counter_get_cntvct(void) +{ +	u64 cval; + +	isb(); +	asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); +	return cval; +} + +static inline u32 arch_timer_get_cntkctl(void) +{ +	u32 cntkctl; +	asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl)); +	return cntkctl; +} + +static inline void arch_timer_set_cntkctl(u32 cntkctl) +{ +	asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl)); +} + +static inline void arch_counter_set_user_access(void) +{ +	u32 cntkctl = arch_timer_get_cntkctl(); + +	/* Disable user access to both physical/virtual counters/timers */ +	/* Also disable virtual event stream */ +	cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN +			| ARCH_TIMER_USR_VT_ACCESS_EN +			| ARCH_TIMER_VIRT_EVT_EN +			| ARCH_TIMER_USR_VCT_ACCESS_EN +			| ARCH_TIMER_USR_PCT_ACCESS_EN); +	arch_timer_set_cntkctl(cntkctl); +} + +static inline void arch_timer_evtstrm_enable(int divider) +{ +	u32 cntkctl = arch_timer_get_cntkctl(); +	cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; +	/* Set the divider and enable virtual event stream */ +	cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) +			| ARCH_TIMER_VIRT_EVT_EN; +	arch_timer_set_cntkctl(cntkctl); +	elf_hwcap |= HWCAP_EVTSTRM; +} + +#endif + +#endif diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 062b58c029a..57f0584e8d9 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -13,18 +13,26 @@   *  Do not include any C declarations in this file - it is included by   *  assembler source.   */ +#ifndef __ASM_ASSEMBLER_H__ +#define __ASM_ASSEMBLER_H__ +  #ifndef __ASSEMBLY__  #error "Only include this from assembly code"  #endif  #include <asm/ptrace.h> +#include <asm/domain.h> +#include <asm/opcodes-virt.h> +#include <asm/asm-offsets.h> + +#define IOMEM(x)	(x)  /*   * Endian independent macros for shifting bytes within registers.   */  #ifndef __ARMEB__ -#define pull            lsr -#define push            lsl +#define lspull          lsr +#define lspush          lsl  #define get_byte_0      lsl #0  #define get_byte_1	lsr #8  #define get_byte_2	lsr #16 @@ -34,8 +42,8 @@  #define put_byte_2	lsl #16  #define put_byte_3	lsl #24  #else -#define pull            lsl -#define push            lsr +#define lspull          lsl +#define lspush          lsr  #define get_byte_0	lsr #24  #define get_byte_1	lsr #16  #define get_byte_2	lsr #8 @@ -46,6 +54,13 @@  #define put_byte_3      lsl #0  #endif +/* Select code for any configuration running in BE8 mode */ +#ifdef CONFIG_CPU_ENDIAN_BE8 +#define ARM_BE8(code...) code +#else +#define ARM_BE8(code...) +#endif +  /*   * Data preload for architectures that support it   */ @@ -129,16 +144,29 @@   * assumes FIQs are enabled, and that the processor is in SVC mode.   */  	.macro	save_and_disable_irqs, oldcpsr +#ifdef CONFIG_CPU_V7M +	mrs	\oldcpsr, primask +#else  	mrs	\oldcpsr, cpsr +#endif  	disable_irq  	.endm +	.macro	save_and_disable_irqs_notrace, oldcpsr +	mrs	\oldcpsr, cpsr +	disable_irq_notrace +	.endm +  /*   * Restore interrupt state previously stored in a register.  We don't   * guarantee that this will preserve the flags.   */  	.macro	restore_irqs_notrace, oldcpsr +#ifdef CONFIG_CPU_V7M +	msr	primask, \oldcpsr +#else  	msr	cpsr_c, \oldcpsr +#endif  	.endm  	.macro restore_irqs, oldcpsr @@ -147,6 +175,47 @@  	restore_irqs_notrace \oldcpsr  	.endm +/* + * Get current thread_info. + */ +	.macro	get_thread_info, rd + ARM(	mov	\rd, sp, lsr #13	) + THUMB(	mov	\rd, sp			) + THUMB(	lsr	\rd, \rd, #13		) +	mov	\rd, \rd, lsl #13 +	.endm + +/* + * Increment/decrement the preempt count. + */ +#ifdef CONFIG_PREEMPT_COUNT +	.macro	inc_preempt_count, ti, tmp +	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count +	add	\tmp, \tmp, #1			@ increment it +	str	\tmp, [\ti, #TI_PREEMPT] +	.endm + +	.macro	dec_preempt_count, ti, tmp +	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count +	sub	\tmp, \tmp, #1			@ decrement it +	str	\tmp, [\ti, #TI_PREEMPT] +	.endm + +	.macro	dec_preempt_count_ti, ti, tmp +	get_thread_info \ti +	dec_preempt_count \ti, \tmp +	.endm +#else +	.macro	inc_preempt_count, ti, tmp +	.endm + +	.macro	dec_preempt_count, ti, tmp +	.endm + +	.macro	dec_preempt_count_ti, ti, tmp +	.endm +#endif +  #define USER(x...)				\  9999:	x;					\  	.pushsection __ex_table,"a";		\ @@ -157,16 +226,24 @@  #ifdef CONFIG_SMP  #define ALT_SMP(instr...)					\  9998:	instr +/* + * Note: if you get assembler errors from ALT_UP() when building with + * CONFIG_THUMB2_KERNEL, you almost certainly need to use + * ALT_SMP( W(instr) ... ) + */  #define ALT_UP(instr...)					\  	.pushsection ".alt.smp.init", "a"			;\  	.long	9998b						;\ -	instr							;\ +9997:	instr							;\ +	.if . - 9997b != 4					;\ +		.error "ALT_UP() content must assemble to exactly 4 bytes";\ +	.endif							;\  	.popsection  #define ALT_UP_B(label)					\  	.equ	up_b_offset, label - 9998b			;\  	.pushsection ".alt.smp.init", "a"			;\  	.long	9998b						;\ -	b	. + up_b_offset					;\ +	W(b)	. + up_b_offset					;\  	.popsection  #else  #define ALT_SMP(instr...) @@ -175,22 +252,48 @@  #endif  /* + * Instruction barrier + */ +	.macro	instr_sync +#if __LINUX_ARM_ARCH__ >= 7 +	isb +#elif __LINUX_ARM_ARCH__ == 6 +	mcr	p15, 0, r0, c7, c5, 4 +#endif +	.endm + +/*   * SMP data memory barrier   */ -	.macro	smp_dmb +	.macro	smp_dmb mode  #ifdef CONFIG_SMP  #if __LINUX_ARM_ARCH__ >= 7 -	ALT_SMP(dmb) +	.ifeqs "\mode","arm" +	ALT_SMP(dmb	ish) +	.else +	ALT_SMP(W(dmb)	ish) +	.endif  #elif __LINUX_ARM_ARCH__ == 6  	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb  #else  #error Incompatible SMP platform  #endif +	.ifeqs "\mode","arm"  	ALT_UP(nop) +	.else +	ALT_UP(W(nop)) +	.endif  #endif  	.endm -#ifdef CONFIG_THUMB2_KERNEL +#if defined(CONFIG_CPU_V7M) +	/* +	 * setmode is used to assert to be in svc mode during boot. For v7-M +	 * this is done in __v7m_setup, so setmode can be empty here. +	 */ +	.macro	setmode, mode, reg +	.endm +#elif defined(CONFIG_THUMB2_KERNEL)  	.macro	setmode, mode, reg  	mov	\reg, #\mode  	msr	cpsr_c, \reg @@ -202,16 +305,48 @@  #endif  /* + * Helper macro to enter SVC mode cleanly and mask interrupts. reg is + * a scratch register for the macro to overwrite. + * + * This macro is intended for forcing the CPU into SVC mode at boot time. + * you cannot return to the original mode. + */ +.macro safe_svcmode_maskall reg:req +#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M) +	mrs	\reg , cpsr +	eor	\reg, \reg, #HYP_MODE +	tst	\reg, #MODE_MASK +	bic	\reg , \reg , #MODE_MASK +	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE +THUMB(	orr	\reg , \reg , #PSR_T_BIT	) +	bne	1f +	orr	\reg, \reg, #PSR_A_BIT +	adr	lr, BSYM(2f) +	msr	spsr_cxsf, \reg +	__MSR_ELR_HYP(14) +	__ERET +1:	msr	cpsr_c, \reg +2: +#else +/* + * workaround for possibly broken pre-v6 hardware + * (akita, Sharp Zaurus C-1000, PXA270-based) + */ +	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg +#endif +.endm + +/*   * STRT/LDRT access macros with ARM and Thumb-2 variants   */  #ifdef CONFIG_THUMB2_KERNEL -	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort +	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()  9999:  	.if	\inc == 1 -	\instr\cond\()bt \reg, [\ptr, #\off] +	\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]  	.elseif	\inc == 4 -	\instr\cond\()t \reg, [\ptr, #\off] +	\instr\cond\()\t\().w \reg, [\ptr, #\off]  	.else  	.error	"Unsupported inc macro argument"  	.endif @@ -238,7 +373,7 @@  	@ Slightly optimised to avoid incrementing the pointer twice  	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort  	.if	\rept == 2 -	usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort +	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort  	.endif  	add\cond \ptr, #\rept * \inc @@ -246,13 +381,13 @@  #else	/* !CONFIG_THUMB2_KERNEL */ -	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort +	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()  	.rept	\rept  9999:  	.if	\inc == 1 -	\instr\cond\()bt \reg, [\ptr], #\inc +	\instr\cond\()b\()\t \reg, [\ptr], #\inc  	.elseif	\inc == 4 -	\instr\cond\()t \reg, [\ptr], #\inc +	\instr\cond\()\t \reg, [\ptr], #\inc  	.else  	.error	"Unsupported inc macro argument"  	.endif @@ -273,3 +408,21 @@  	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f  	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort  	.endm + +/* Utility macro for declaring string literals */ +	.macro	string name:req, string +	.type \name , #object +\name: +	.asciz "\string" +	.size \name , . - \name +	.endm + +	.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +#ifndef CONFIG_CPU_USE_DOMAINS +	adds	\tmp, \addr, #\size - 1 +	sbcccs	\tmp, \tmp, \limit +	bcs	\bad +#endif +	.endm + +#endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 7e79503ab89..3040359094d 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -12,8 +12,11 @@  #define __ASM_ARM_ATOMIC_H  #include <linux/compiler.h> +#include <linux/prefetch.h>  #include <linux/types.h> -#include <asm/system.h> +#include <linux/irqflags.h> +#include <asm/barrier.h> +#include <asm/cmpxchg.h>  #define ATOMIC_INIT(i)	{ (i) } @@ -39,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)  	unsigned long tmp;  	int result; +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic_add\n"  "1:	ldrex	%0, [%3]\n"  "	add	%0, %0, %4\n" @@ -56,6 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)  	int result;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic_add_return\n"  "1:	ldrex	%0, [%3]\n" @@ -77,6 +82,7 @@ static inline void atomic_sub(int i, atomic_t *v)  	unsigned long tmp;  	int result; +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic_sub\n"  "1:	ldrex	%0, [%3]\n"  "	sub	%0, %0, %4\n" @@ -94,6 +100,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)  	int result;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic_sub_return\n"  "1:	ldrex	%0, [%3]\n" @@ -112,9 +119,11 @@ static inline int atomic_sub_return(int i, atomic_t *v)  static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)  { -	unsigned long oldval, res; +	int oldval; +	unsigned long res;  	smp_mb(); +	prefetchw(&ptr->counter);  	do {  		__asm__ __volatile__("@ atomic_cmpxchg\n" @@ -132,19 +141,31 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)  	return oldval;  } -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +static inline int __atomic_add_unless(atomic_t *v, int a, int u)  { -	unsigned long tmp, tmp2; +	int oldval, newval; +	unsigned long tmp; -	__asm__ __volatile__("@ atomic_clear_mask\n" -"1:	ldrex	%0, [%3]\n" -"	bic	%0, %0, %4\n" -"	strex	%1, %0, [%3]\n" -"	teq	%1, #0\n" -"	bne	1b" -	: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) -	: "r" (addr), "Ir" (mask) +	smp_mb(); +	prefetchw(&v->counter); + +	__asm__ __volatile__ ("@ atomic_add_unless\n" +"1:	ldrex	%0, [%4]\n" +"	teq	%0, %5\n" +"	beq	2f\n" +"	add	%1, %0, %6\n" +"	strex	%2, %1, [%4]\n" +"	teq	%2, #0\n" +"	bne	1b\n" +"2:" +	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter) +	: "r" (&v->counter), "r" (u), "r" (a)  	: "cc"); + +	if (oldval != u) +		smp_mb(); + +	return oldval;  }  #else /* ARM_ARCH_6 */ @@ -195,29 +216,19 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)  	return ret;  } -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) -{ -	unsigned long flags; - -	raw_local_irq_save(flags); -	*addr &= ~mask; -	raw_local_irq_restore(flags); -} - -#endif /* __LINUX_ARM_ARCH__ */ - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u)  {  	int c, old;  	c = atomic_read(v);  	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)  		c = old; -	return c != u; +	return c;  } -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + +#endif /* __LINUX_ARM_ARCH__ */ + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new))  #define atomic_inc(v)		atomic_add(1, v)  #define atomic_dec(v)		atomic_sub(1, v) @@ -230,21 +241,39 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)  #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) -#define smp_mb__before_atomic_dec()	smp_mb() -#define smp_mb__after_atomic_dec()	smp_mb() -#define smp_mb__before_atomic_inc()	smp_mb() -#define smp_mb__after_atomic_inc()	smp_mb() -  #ifndef CONFIG_GENERIC_ATOMIC64  typedef struct { -	u64 __aligned(8) counter; +	long long counter;  } atomic64_t;  #define ATOMIC64_INIT(i) { (i) } -static inline u64 atomic64_read(atomic64_t *v) +#ifdef CONFIG_ARM_LPAE +static inline long long atomic64_read(const atomic64_t *v)  { -	u64 result; +	long long result; + +	__asm__ __volatile__("@ atomic64_read\n" +"	ldrd	%0, %H0, [%1]" +	: "=&r" (result) +	: "r" (&v->counter), "Qo" (v->counter) +	); + +	return result; +} + +static inline void atomic64_set(atomic64_t *v, long long i) +{ +	__asm__ __volatile__("@ atomic64_set\n" +"	strd	%2, %H2, [%1]" +	: "=Qo" (v->counter) +	: "r" (&v->counter), "r" (i) +	); +} +#else +static inline long long atomic64_read(const atomic64_t *v) +{ +	long long result;  	__asm__ __volatile__("@ atomic64_read\n"  "	ldrexd	%0, %H0, [%1]" @@ -255,10 +284,11 @@ static inline u64 atomic64_read(atomic64_t *v)  	return result;  } -static inline void atomic64_set(atomic64_t *v, u64 i) +static inline void atomic64_set(atomic64_t *v, long long i)  { -	u64 tmp; +	long long tmp; +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_set\n"  "1:	ldrexd	%0, %H0, [%2]\n"  "	strexd	%0, %3, %H3, [%2]\n" @@ -268,16 +298,18 @@ static inline void atomic64_set(atomic64_t *v, u64 i)  	: "r" (&v->counter), "r" (i)  	: "cc");  } +#endif -static inline void atomic64_add(u64 i, atomic64_t *v) +static inline void atomic64_add(long long i, atomic64_t *v)  { -	u64 result; +	long long result;  	unsigned long tmp; +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_add\n"  "1:	ldrexd	%0, %H0, [%3]\n" -"	adds	%0, %0, %4\n" -"	adc	%H0, %H0, %H4\n" +"	adds	%Q0, %Q0, %Q4\n" +"	adc	%R0, %R0, %R4\n"  "	strexd	%1, %0, %H0, [%3]\n"  "	teq	%1, #0\n"  "	bne	1b" @@ -286,17 +318,18 @@ static inline void atomic64_add(u64 i, atomic64_t *v)  	: "cc");  } -static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +static inline long long atomic64_add_return(long long i, atomic64_t *v)  { -	u64 result; +	long long result;  	unsigned long tmp;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_add_return\n"  "1:	ldrexd	%0, %H0, [%3]\n" -"	adds	%0, %0, %4\n" -"	adc	%H0, %H0, %H4\n" +"	adds	%Q0, %Q0, %Q4\n" +"	adc	%R0, %R0, %R4\n"  "	strexd	%1, %0, %H0, [%3]\n"  "	teq	%1, #0\n"  "	bne	1b" @@ -309,15 +342,16 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)  	return result;  } -static inline void atomic64_sub(u64 i, atomic64_t *v) +static inline void atomic64_sub(long long i, atomic64_t *v)  { -	u64 result; +	long long result;  	unsigned long tmp; +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_sub\n"  "1:	ldrexd	%0, %H0, [%3]\n" -"	subs	%0, %0, %4\n" -"	sbc	%H0, %H0, %H4\n" +"	subs	%Q0, %Q0, %Q4\n" +"	sbc	%R0, %R0, %R4\n"  "	strexd	%1, %0, %H0, [%3]\n"  "	teq	%1, #0\n"  "	bne	1b" @@ -326,17 +360,18 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)  	: "cc");  } -static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) +static inline long long atomic64_sub_return(long long i, atomic64_t *v)  { -	u64 result; +	long long result;  	unsigned long tmp;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_sub_return\n"  "1:	ldrexd	%0, %H0, [%3]\n" -"	subs	%0, %0, %4\n" -"	sbc	%H0, %H0, %H4\n" +"	subs	%Q0, %Q0, %Q4\n" +"	sbc	%R0, %R0, %R4\n"  "	strexd	%1, %0, %H0, [%3]\n"  "	teq	%1, #0\n"  "	bne	1b" @@ -349,12 +384,14 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)  	return result;  } -static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) +static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, +					long long new)  { -	u64 oldval; +	long long oldval;  	unsigned long res;  	smp_mb(); +	prefetchw(&ptr->counter);  	do {  		__asm__ __volatile__("@ atomic64_cmpxchg\n" @@ -373,12 +410,13 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)  	return oldval;  } -static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) +static inline long long atomic64_xchg(atomic64_t *ptr, long long new)  { -	u64 result; +	long long result;  	unsigned long tmp;  	smp_mb(); +	prefetchw(&ptr->counter);  	__asm__ __volatile__("@ atomic64_xchg\n"  "1:	ldrexd	%0, %H0, [%3]\n" @@ -394,18 +432,19 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)  	return result;  } -static inline u64 atomic64_dec_if_positive(atomic64_t *v) +static inline long long atomic64_dec_if_positive(atomic64_t *v)  { -	u64 result; +	long long result;  	unsigned long tmp;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_dec_if_positive\n"  "1:	ldrexd	%0, %H0, [%3]\n" -"	subs	%0, %0, #1\n" -"	sbc	%H0, %H0, #0\n" -"	teq	%H0, #0\n" +"	subs	%Q0, %Q0, #1\n" +"	sbc	%R0, %R0, #0\n" +"	teq	%R0, #0\n"  "	bmi	2f\n"  "	strexd	%1, %0, %H0, [%3]\n"  "	teq	%1, #0\n" @@ -420,13 +459,14 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)  	return result;  } -static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)  { -	u64 val; +	long long val;  	unsigned long tmp;  	int ret = 1;  	smp_mb(); +	prefetchw(&v->counter);  	__asm__ __volatile__("@ atomic64_add_unless\n"  "1:	ldrexd	%0, %H0, [%4]\n" @@ -434,8 +474,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)  "	teqeq	%H0, %H5\n"  "	moveq	%1, #0\n"  "	beq	2f\n" -"	adds	%0, %0, %6\n" -"	adc	%H0, %H0, %H6\n" +"	adds	%Q0, %Q0, %Q6\n" +"	adc	%R0, %R0, %R6\n"  "	strexd	%2, %0, %H0, [%4]\n"  "	teq	%2, #0\n"  "	bne	1b\n" @@ -460,9 +500,6 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)  #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)  #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL) -#else /* !CONFIG_GENERIC_ATOMIC64 */ -#include <asm-generic/atomic64.h> -#endif -#include <asm-generic/atomic-long.h> +#endif /* !CONFIG_GENERIC_ATOMIC64 */  #endif  #endif diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h deleted file mode 100644 index c0536f6b29a..00000000000 --- a/arch/arm/include/asm/auxvec.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef __ASMARM_AUXVEC_H -#define __ASMARM_AUXVEC_H - -#endif diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h new file mode 100644 index 00000000000..1714800fa11 --- /dev/null +++ b/arch/arm/include/asm/bL_switcher.h @@ -0,0 +1,77 @@ +/* + * arch/arm/include/asm/bL_switcher.h + * + * Created by:  Nicolas Pitre, April 2012 + * Copyright:   (C) 2012-2013  Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef ASM_BL_SWITCHER_H +#define ASM_BL_SWITCHER_H + +#include <linux/compiler.h> +#include <linux/types.h> + +typedef void (*bL_switch_completion_handler)(void *cookie); + +int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, +			 bL_switch_completion_handler completer, +			 void *completer_cookie); +static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id) +{ +	return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL); +} + +/* + * Register here to be notified about runtime enabling/disabling of + * the switcher. + * + * The notifier chain is called with the switcher activation lock held: + * the switcher will not be enabled or disabled during callbacks. + * Callbacks must not call bL_switcher_{get,put}_enabled(). + */ +#define BL_NOTIFY_PRE_ENABLE	0 +#define BL_NOTIFY_POST_ENABLE	1 +#define BL_NOTIFY_PRE_DISABLE	2 +#define BL_NOTIFY_POST_DISABLE	3 + +#ifdef CONFIG_BL_SWITCHER + +int bL_switcher_register_notifier(struct notifier_block *nb); +int bL_switcher_unregister_notifier(struct notifier_block *nb); + +/* + * Use these functions to temporarily prevent enabling/disabling of + * the switcher. + * bL_switcher_get_enabled() returns true if the switcher is currently + * enabled.  Each call to bL_switcher_get_enabled() must be followed + * by a call to bL_switcher_put_enabled().  These functions are not + * recursive. + */ +bool bL_switcher_get_enabled(void); +void bL_switcher_put_enabled(void); + +int bL_switcher_trace_trigger(void); +int bL_switcher_get_logical_index(u32 mpidr); + +#else +static inline int bL_switcher_register_notifier(struct notifier_block *nb) +{ +	return 0; +} + +static inline int bL_switcher_unregister_notifier(struct notifier_block *nb) +{ +	return 0; +} + +static inline bool bL_switcher_get_enabled(void) { return false; } +static inline void bL_switcher_put_enabled(void) { } +static inline int bL_switcher_trace_trigger(void) { return 0; } +static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; } +#endif /* CONFIG_BL_SWITCHER */ + +#endif diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h new file mode 100644 index 00000000000..c6a3e73a6e2 --- /dev/null +++ b/arch/arm/include/asm/barrier.h @@ -0,0 +1,86 @@ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ +#include <asm/outercache.h> + +#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); + +#if __LINUX_ARM_ARCH__ >= 7 ||		\ +	(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define sev()	__asm__ __volatile__ ("sev" : : : "memory") +#define wfe()	__asm__ __volatile__ ("wfe" : : : "memory") +#define wfi()	__asm__ __volatile__ ("wfi" : : : "memory") +#endif + +#if __LINUX_ARM_ARCH__ >= 7 +#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") +#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") +#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") +#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ +				    : : "r" (0) : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ +				    : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ +				    : : "r" (0) : "memory") +#elif defined(CONFIG_CPU_FA526) +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ +				    : : "r" (0) : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ +				    : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#else +#define isb(x) __asm__ __volatile__ ("" : : : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ +				    : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#endif + +#ifdef CONFIG_ARCH_HAS_BARRIERS +#include <mach/barriers.h> +#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#define mb()		do { dsb(); outer_sync(); } while (0) +#define rmb()		dsb() +#define wmb()		do { dsb(st); outer_sync(); } while (0) +#else +#define mb()		barrier() +#define rmb()		barrier() +#define wmb()		barrier() +#endif + +#ifndef CONFIG_SMP +#define smp_mb()	barrier() +#define smp_rmb()	barrier() +#define smp_wmb()	barrier() +#else +#define smp_mb()	dmb(ish) +#define smp_rmb()	smp_mb() +#define smp_wmb()	dmb(ishst) +#endif + +#define smp_store_release(p, v)						\ +do {									\ +	compiletime_assert_atomic_type(*p);				\ +	smp_mb();							\ +	ACCESS_ONCE(*p) = (v);						\ +} while (0) + +#define smp_load_acquire(p)						\ +({									\ +	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\ +	compiletime_assert_atomic_type(*p);				\ +	smp_mb();							\ +	___p1;								\ +}) + +#define read_barrier_depends()		do { } while(0) +#define smp_read_barrier_depends()	do { } while(0) + +#define set_mb(var, value)	do { var = value; smp_mb(); } while (0) + +#define smp_mb__before_atomic()	smp_mb() +#define smp_mb__after_atomic()	smp_mb() + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_BARRIER_H */ diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 338ff19ae44..56380995f4c 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -24,10 +24,8 @@  #endif  #include <linux/compiler.h> -#include <asm/system.h> - -#define smp_mb__before_clear_bit()	mb() -#define smp_mb__after_clear_bit()	mb() +#include <linux/irqflags.h> +#include <asm/barrier.h>  /*   * These functions are the basis of our bit ops. @@ -149,14 +147,18 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)   */  /* + * Native endian assembly bitops.  nr = 0 -> word 0 bit 0. + */ +extern void _set_bit(int nr, volatile unsigned long * p); +extern void _clear_bit(int nr, volatile unsigned long * p); +extern void _change_bit(int nr, volatile unsigned long * p); +extern int _test_and_set_bit(int nr, volatile unsigned long * p); +extern int _test_and_clear_bit(int nr, volatile unsigned long * p); +extern int _test_and_change_bit(int nr, volatile unsigned long * p); + +/*   * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.   */ -extern void _set_bit_le(int nr, volatile unsigned long * p); -extern void _clear_bit_le(int nr, volatile unsigned long * p); -extern void _change_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_set_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p); -extern int _test_and_change_bit_le(int nr, volatile unsigned long * p);  extern int _find_first_zero_bit_le(const void * p, unsigned size);  extern int _find_next_zero_bit_le(const void * p, int size, int offset);  extern int _find_first_bit_le(const unsigned long *p, unsigned size); @@ -165,12 +167,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);  /*   * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.   */ -extern void _set_bit_be(int nr, volatile unsigned long * p); -extern void _clear_bit_be(int nr, volatile unsigned long * p); -extern void _change_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_set_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p); -extern int _test_and_change_bit_be(int nr, volatile unsigned long * p);  extern int _find_first_zero_bit_be(const void * p, unsigned size);  extern int _find_next_zero_bit_be(const void * p, int size, int offset);  extern int _find_first_bit_be(const unsigned long *p, unsigned size); @@ -180,58 +176,40 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);  /*   * The __* form of bitops are non-atomic and may be reordered.   */ -#define	ATOMIC_BITOP_LE(name,nr,p)		\ -	(__builtin_constant_p(nr) ?		\ -	 ____atomic_##name(nr, p) :		\ -	 _##name##_le(nr,p)) - -#define	ATOMIC_BITOP_BE(name,nr,p)		\ -	(__builtin_constant_p(nr) ?		\ -	 ____atomic_##name(nr, p) :		\ -	 _##name##_be(nr,p)) +#define ATOMIC_BITOP(name,nr,p)			\ +	(__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))  #else -#define ATOMIC_BITOP_LE(name,nr,p)	_##name##_le(nr,p) -#define ATOMIC_BITOP_BE(name,nr,p)	_##name##_be(nr,p) +#define ATOMIC_BITOP(name,nr,p)		_##name(nr,p)  #endif -#define NONATOMIC_BITOP(name,nr,p)		\ -	(____nonatomic_##name(nr, p)) +/* + * Native endian atomic definitions. + */ +#define set_bit(nr,p)			ATOMIC_BITOP(set_bit,nr,p) +#define clear_bit(nr,p)			ATOMIC_BITOP(clear_bit,nr,p) +#define change_bit(nr,p)		ATOMIC_BITOP(change_bit,nr,p) +#define test_and_set_bit(nr,p)		ATOMIC_BITOP(test_and_set_bit,nr,p) +#define test_and_clear_bit(nr,p)	ATOMIC_BITOP(test_and_clear_bit,nr,p) +#define test_and_change_bit(nr,p)	ATOMIC_BITOP(test_and_change_bit,nr,p)  #ifndef __ARMEB__  /*   * These are the little endian, atomic definitions.   */ -#define set_bit(nr,p)			ATOMIC_BITOP_LE(set_bit,nr,p) -#define clear_bit(nr,p)			ATOMIC_BITOP_LE(clear_bit,nr,p) -#define change_bit(nr,p)		ATOMIC_BITOP_LE(change_bit,nr,p) -#define test_and_set_bit(nr,p)		ATOMIC_BITOP_LE(test_and_set_bit,nr,p) -#define test_and_clear_bit(nr,p)	ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) -#define test_and_change_bit(nr,p)	ATOMIC_BITOP_LE(test_and_change_bit,nr,p)  #define find_first_zero_bit(p,sz)	_find_first_zero_bit_le(p,sz)  #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_le(p,sz,off)  #define find_first_bit(p,sz)		_find_first_bit_le(p,sz)  #define find_next_bit(p,sz,off)		_find_next_bit_le(p,sz,off) -#define WORD_BITOFF_TO_LE(x)		((x)) -  #else -  /*   * These are the big endian, atomic definitions.   */ -#define set_bit(nr,p)			ATOMIC_BITOP_BE(set_bit,nr,p) -#define clear_bit(nr,p)			ATOMIC_BITOP_BE(clear_bit,nr,p) -#define change_bit(nr,p)		ATOMIC_BITOP_BE(change_bit,nr,p) -#define test_and_set_bit(nr,p)		ATOMIC_BITOP_BE(test_and_set_bit,nr,p) -#define test_and_clear_bit(nr,p)	ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) -#define test_and_change_bit(nr,p)	ATOMIC_BITOP_BE(test_and_change_bit,nr,p)  #define find_first_zero_bit(p,sz)	_find_first_zero_bit_be(p,sz)  #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_be(p,sz,off)  #define find_first_bit(p,sz)		_find_first_bit_be(p,sz)  #define find_next_bit(p,sz,off)		_find_next_bit_be(p,sz,off) -#define WORD_BITOFF_TO_LE(x)		((x) ^ 0x18) -  #endif  #if __LINUX_ARM_ARCH__ < 5 @@ -274,25 +252,59 @@ static inline int constant_fls(int x)  }  /* - * On ARMv5 and above those functions can be implemented around - * the clz instruction for much better code efficiency. + * On ARMv5 and above those functions can be implemented around the + * clz instruction for much better code efficiency.  __clz returns + * the number of leading zeros, zero input will return 32, and + * 0x80000000 will return 0.   */ +static inline unsigned int __clz(unsigned int x) +{ +	unsigned int ret; +	asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); + +	return ret; +} + +/* + * fls() returns zero if the input is zero, otherwise returns the bit + * position of the last set bit, where the LSB is 1 and MSB is 32. + */  static inline int fls(int x)  { -	int ret; -  	if (__builtin_constant_p(x))  	       return constant_fls(x); -	asm("clz\t%0, %1" : "=r" (ret) : "r" (x) : "cc"); -       	ret = 32 - ret; -	return ret; +	return 32 - __clz(x); +} + +/* + * __fls() returns the bit position of the last bit set, where the + * LSB is 0 and MSB is 31.  Zero input is undefined. + */ +static inline unsigned long __fls(unsigned long x) +{ +	return fls(x) - 1; +} + +/* + * ffs() returns zero if the input was zero, otherwise returns the bit + * position of the first set bit, where the LSB is 1 and MSB is 32. + */ +static inline int ffs(int x) +{ +	return fls(x & -x); +} + +/* + * __ffs() returns the bit position of the first bit set, where the + * LSB is 0 and MSB is 31.  Zero input is undefined. + */ +static inline unsigned long __ffs(unsigned long x) +{ +	return ffs(x) - 1;  } -#define __fls(x) (fls(x) - 1) -#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) -#define __ffs(x) (ffs(x) - 1)  #define ffz(x) __ffs( ~(x) )  #endif @@ -303,41 +315,34 @@ static inline int fls(int x)  #include <asm-generic/bitops/hweight.h>  #include <asm-generic/bitops/lock.h> -/* - * Ext2 is defined to use little-endian byte ordering. - * These do not need to be atomic. - */ -#define ext2_set_bit(nr,p)			\ -		__test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) -#define ext2_set_bit_atomic(lock,nr,p)          \ -                test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) -#define ext2_clear_bit(nr,p)			\ -		__test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) -#define ext2_clear_bit_atomic(lock,nr,p)        \ -                test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) -#define ext2_test_bit(nr,p)			\ -		test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) -#define ext2_find_first_zero_bit(p,sz)		\ -		_find_first_zero_bit_le(p,sz) -#define ext2_find_next_zero_bit(p,sz,off)	\ -		_find_next_zero_bit_le(p,sz,off) -#define ext2_find_next_bit(p, sz, off) \ -		_find_next_bit_le(p, sz, off) +#ifdef __ARMEB__ + +static inline int find_first_zero_bit_le(const void *p, unsigned size) +{ +	return _find_first_zero_bit_le(p, size); +} +#define find_first_zero_bit_le find_first_zero_bit_le + +static inline int find_next_zero_bit_le(const void *p, int size, int offset) +{ +	return _find_next_zero_bit_le(p, size, offset); +} +#define find_next_zero_bit_le find_next_zero_bit_le + +static inline int find_next_bit_le(const void *p, int size, int offset) +{ +	return _find_next_bit_le(p, size, offset); +} +#define find_next_bit_le find_next_bit_le + +#endif + +#include <asm-generic/bitops/le.h>  /* - * Minix is defined to use little-endian byte ordering. - * These do not need to be atomic. + * Ext2 is defined to use little-endian byte ordering.   */ -#define minix_set_bit(nr,p)			\ -		__set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) -#define minix_test_bit(nr,p)			\ -		test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) -#define minix_test_and_set_bit(nr,p)		\ -		__test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) -#define minix_test_and_clear_bit(nr,p)		\ -		__test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) -#define minix_find_first_zero_bit(p,sz)		\ -		_find_first_zero_bit_le(p,sz) +#include <asm-generic/bitops/ext2-atomic-setbit.h>  #endif /* __KERNEL__ */ diff --git a/arch/arm/include/asm/bitsperlong.h b/arch/arm/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b..00000000000 --- a/arch/arm/include/asm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/bitsperlong.h> diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 4d88425a416..b274bde2490 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -1,24 +1,92 @@  #ifndef _ASMARM_BUG_H  #define _ASMARM_BUG_H +#include <linux/linkage.h> +#include <linux/types.h> +#include <asm/opcodes.h>  #ifdef CONFIG_BUG + +/* + * Use a suitable undefined instruction to use for ARM/Thumb2 bug handling. + * We need to be careful not to conflict with those used by other modules and + * the register_undef_hook() system. + */ +#ifdef CONFIG_THUMB2_KERNEL +#define BUG_INSTR_VALUE 0xde02 +#define BUG_INSTR(__value) __inst_thumb16(__value) +#else +#define BUG_INSTR_VALUE 0xe7f001f2 +#define BUG_INSTR(__value) __inst_arm(__value) +#endif + + +#define BUG() _BUG(__FILE__, __LINE__, BUG_INSTR_VALUE) +#define _BUG(file, line, value) __BUG(file, line, value) +  #ifdef CONFIG_DEBUG_BUGVERBOSE -extern void __bug(const char *file, int line) __attribute__((noreturn)); -/* give file/line information */ -#define BUG()		__bug(__FILE__, __LINE__) +/* + * The extra indirection is to ensure that the __FILE__ string comes through + * OK. Many version of gcc do not support the asm %c parameter which would be + * preferable to this unpleasantness. We use mergeable string sections to + * avoid multiple copies of the string appearing in the kernel image. + */ -#else +#define __BUG(__file, __line, __value)				\ +do {								\ +	asm volatile("1:\t" BUG_INSTR(__value) "\n"  \ +		".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ +		"2:\t.asciz " #__file "\n" 			\ +		".popsection\n" 				\ +		".pushsection __bug_table,\"a\"\n"		\ +		"3:\t.word 1b, 2b\n"				\ +		"\t.hword " #__line ", 0\n"			\ +		".popsection");					\ +	unreachable();						\ +} while (0) -/* this just causes an oops */ -#define BUG()		do { *(int *)0 = 0; } while (1) +#else  /* not CONFIG_DEBUG_BUGVERBOSE */ -#endif +#define __BUG(__file, __line, __value)				\ +do {								\ +	asm volatile(BUG_INSTR(__value) "\n");			\ +	unreachable();						\ +} while (0) +#endif  /* CONFIG_DEBUG_BUGVERBOSE */  #define HAVE_ARCH_BUG -#endif +#endif  /* CONFIG_BUG */  #include <asm-generic/bug.h> +struct pt_regs; +void die(const char *msg, struct pt_regs *regs, int err); + +struct siginfo; +void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, +		unsigned long err, unsigned long trap); + +#ifdef CONFIG_ARM_LPAE +#define FAULT_CODE_ALIGNMENT	33 +#define FAULT_CODE_DEBUG	34 +#else +#define FAULT_CODE_ALIGNMENT	1 +#define FAULT_CODE_DEBUG	2 +#endif + +void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, +				       struct pt_regs *), +		     int sig, int code, const char *name); + +void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, +				       struct pt_regs *), +		     int sig, int code, const char *name); + +extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +struct mm_struct; +extern void show_pte(struct mm_struct *mm, unsigned long addr); +extern void __show_regs(struct pt_regs *); +  #endif diff --git a/arch/arm/include/asm/byteorder.h b/arch/arm/include/asm/byteorder.h deleted file mode 100644 index 77379748b17..00000000000 --- a/arch/arm/include/asm/byteorder.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - *  arch/arm/include/asm/byteorder.h - * - * ARM Endian-ness.  In little endian mode, the data bus is connected such - * that byte accesses appear as: - *  0 = d0...d7, 1 = d8...d15, 2 = d16...d23, 3 = d24...d31 - * and word accesses (data or instruction) appear as: - *  d0...d31 - * - * When in big endian mode, byte accesses appear as: - *  0 = d24...d31, 1 = d16...d23, 2 = d8...d15, 3 = d0...d7 - * and word accesses (data or instruction) appear as: - *  d0...d31 - */ -#ifndef __ASM_ARM_BYTEORDER_H -#define __ASM_ARM_BYTEORDER_H - -#ifdef __ARMEB__ -#include <linux/byteorder/big_endian.h> -#else -#include <linux/byteorder/little_endian.h> -#endif - -#endif - diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h index 9d6122096fb..75fe66bc02b 100644 --- a/arch/arm/include/asm/cache.h +++ b/arch/arm/include/asm/cache.h @@ -23,4 +23,6 @@  #define ARCH_SLAB_MINALIGN 8  #endif +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) +  #endif diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 3acd8fa25e3..fd43f7f55b7 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -12,7 +12,7 @@  #include <linux/mm.h> -#include <asm/glue.h> +#include <asm/glue-cache.h>  #include <asm/shmparam.h>  #include <asm/cachetype.h>  #include <asm/outercache.h> @@ -20,123 +20,6 @@  #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)  /* - *	Cache Model - *	=========== - */ -#undef _CACHE -#undef MULTI_CACHE - -#if defined(CONFIG_CPU_CACHE_V3) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE v3 -# endif -#endif - -#if defined(CONFIG_CPU_CACHE_V4) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE v4 -# endif -#endif - -#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ -    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ -    defined(CONFIG_CPU_ARM1026) -# define MULTI_CACHE 1 -#endif - -#if defined(CONFIG_CPU_FA526) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE fa -# endif -#endif - -#if defined(CONFIG_CPU_ARM926T) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE arm926 -# endif -#endif - -#if defined(CONFIG_CPU_ARM940T) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE arm940 -# endif -#endif - -#if defined(CONFIG_CPU_ARM946E) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE arm946 -# endif -#endif - -#if defined(CONFIG_CPU_CACHE_V4WB) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE v4wb -# endif -#endif - -#if defined(CONFIG_CPU_XSCALE) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE xscale -# endif -#endif - -#if defined(CONFIG_CPU_XSC3) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE xsc3 -# endif -#endif - -#if defined(CONFIG_CPU_MOHAWK) -# ifdef _CACHE -#  define MULTI_CACHE 1 -# else -#  define _CACHE mohawk -# endif -#endif - -#if defined(CONFIG_CPU_FEROCEON) -# define MULTI_CACHE 1 -#endif - -#if defined(CONFIG_CPU_V6) -//# ifdef _CACHE -#  define MULTI_CACHE 1 -//# else -//#  define _CACHE v6 -//# endif -#endif - -#if defined(CONFIG_CPU_V7) -//# ifdef _CACHE -#  define MULTI_CACHE 1 -//# else -//#  define _CACHE v7 -//# endif -#endif - -#if !defined(_CACHE) && !defined(MULTI_CACHE) -#error Unknown cache maintainence model -#endif - -/*   * This flag is used to indicate that the page pointed to by a pte is clean   * and does not require cleaning before returning it to the user.   */ @@ -166,6 +49,13 @@   *   *		Unconditionally clean and invalidate the entire cache.   * + *     flush_kern_louis() + * + *             Flush data cache levels up to the level of unification + *             inner shareable and invalidate the I-cache. + *             Only needed from v7 onwards, falls back to flush_cache_all() + *             for all other processor versions. + *   *	flush_user_all()   *   *		Clean and invalidate all user space cache entries @@ -214,11 +104,12 @@  struct cpu_cache_fns {  	void (*flush_icache_all)(void);  	void (*flush_kern_all)(void); +	void (*flush_kern_louis)(void);  	void (*flush_user_all)(void);  	void (*flush_user_range)(unsigned long, unsigned long, unsigned int);  	void (*coherent_kern_range)(unsigned long, unsigned long); -	void (*coherent_user_range)(unsigned long, unsigned long); +	int  (*coherent_user_range)(unsigned long, unsigned long);  	void (*flush_kern_dcache_area)(void *, size_t);  	void (*dma_map_area)(const void *, size_t, int); @@ -236,6 +127,7 @@ extern struct cpu_cache_fns cpu_cache;  #define __cpuc_flush_icache_all		cpu_cache.flush_icache_all  #define __cpuc_flush_kern_all		cpu_cache.flush_kern_all +#define __cpuc_flush_kern_louis		cpu_cache.flush_kern_louis  #define __cpuc_flush_user_all		cpu_cache.flush_user_all  #define __cpuc_flush_user_range		cpu_cache.flush_user_range  #define __cpuc_coherent_kern_range	cpu_cache.coherent_kern_range @@ -249,25 +141,18 @@ extern struct cpu_cache_fns cpu_cache;   * visible to the CPU.   */  #define dmac_map_area			cpu_cache.dma_map_area -#define dmac_unmap_area		cpu_cache.dma_unmap_area +#define dmac_unmap_area			cpu_cache.dma_unmap_area  #define dmac_flush_range		cpu_cache.dma_flush_range  #else -#define __cpuc_flush_icache_all		__glue(_CACHE,_flush_icache_all) -#define __cpuc_flush_kern_all		__glue(_CACHE,_flush_kern_cache_all) -#define __cpuc_flush_user_all		__glue(_CACHE,_flush_user_cache_all) -#define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range) -#define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range) -#define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range) -#define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area) -  extern void __cpuc_flush_icache_all(void);  extern void __cpuc_flush_kern_all(void); +extern void __cpuc_flush_kern_louis(void);  extern void __cpuc_flush_user_all(void);  extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);  extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); -extern void __cpuc_coherent_user_range(unsigned long, unsigned long); +extern int  __cpuc_coherent_user_range(unsigned long, unsigned long);  extern void __cpuc_flush_dcache_area(void *, size_t);  /* @@ -276,10 +161,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);   * is visible to DMA, or data written by DMA to system memory is   * visible to the CPU.   */ -#define dmac_map_area			__glue(_CACHE,_dma_map_area) -#define dmac_unmap_area		__glue(_CACHE,_dma_unmap_area) -#define dmac_flush_range		__glue(_CACHE,_dma_flush_range) -  extern void dmac_map_area(const void *, size_t, int);  extern void dmac_unmap_area(const void *, size_t, int);  extern void dmac_flush_range(const void *, const void *); @@ -316,7 +197,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,   * Optimized __flush_icache_all for the common cases. Note that UP ARMv7   * will fall through to use __flush_icache_all_generic.   */ -#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) ||		\ +#if (defined(CONFIG_CPU_V7) && \ +     (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \  	defined(CONFIG_SMP_ON_UP)  #define __flush_icache_preferred	__cpuc_flush_icache_all  #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) @@ -330,8 +212,14 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,  static inline void __flush_icache_all(void)  {  	__flush_icache_preferred(); +	dsb(ishst);  } +/* + * Flush caches up to Level of Unification Inner Shareable + */ +#define flush_cache_louis()		__cpuc_flush_kern_louis() +  #define flush_cache_all()		__cpuc_flush_kern_all()  static inline void vivt_flush_cache_mm(struct mm_struct *mm) @@ -343,7 +231,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm)  static inline void  vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)  { -	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) +	struct mm_struct *mm = vma->vm_mm; + +	if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))  		__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),  					vma->vm_flags);  } @@ -351,7 +241,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned  static inline void  vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)  { -	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { +	struct mm_struct *mm = vma->vm_mm; + +	if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {  		unsigned long addr = user_addr & PAGE_MASK;  		__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);  	} @@ -377,8 +269,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr   * Harvard caches are synchronised for the user space address range.   * This is used for the ARM private sys_cacheflush system call.   */ -#define flush_cache_user_range(vma,start,end) \ -	__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) +#define flush_cache_user_range(s,e)	__cpuc_coherent_user_range(s,e)  /*   * Perform necessary cache operations to ensure that data previously @@ -429,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,  }  #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -static inline void flush_kernel_dcache_page(struct page *page) -{ -} +extern void flush_kernel_dcache_page(struct page *);  #define flush_dcache_mmap_lock(mapping) \  	spin_lock_irq(&(mapping)->tree_lock) @@ -463,7 +352,7 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)  		 * set_pte_at() called from vmap_pte_range() does not  		 * have a DSB after cleaning the cache line.  		 */ -		dsb(); +		dsb(ishst);  }  static inline void flush_cache_vunmap(unsigned long start, unsigned long end) @@ -472,4 +361,132 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)  		flush_cache_all();  } +/* + * Memory synchronization helpers for mixed cached vs non cached accesses. + * + * Some synchronization algorithms have to set states in memory with the + * cache enabled or disabled depending on the code path.  It is crucial + * to always ensure proper cache maintenance to update main memory right + * away in that case. + * + * Any cached write must be followed by a cache clean operation. + * Any cached read must be preceded by a cache invalidate operation. + * Yet, in the read case, a cache flush i.e. atomic clean+invalidate + * operation is needed to avoid discarding possible concurrent writes to the + * accessed memory. + * + * Also, in order to prevent a cached writer from interfering with an + * adjacent non-cached writer, each state variable must be located to + * a separate cache line. + */ + +/* + * This needs to be >= the max cache writeback size of all + * supported platforms included in the current kernel configuration. + * This is used to align state variables to their own cache lines. + */ +#define __CACHE_WRITEBACK_ORDER 6  /* guessed from existing platforms */ +#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) + +/* + * There is no __cpuc_clean_dcache_area but we use it anyway for + * code intent clarity, and alias it to __cpuc_flush_dcache_area. + */ +#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area + +/* + * Ensure preceding writes to *p by this CPU are visible to + * subsequent reads by other CPUs: + */ +static inline void __sync_cache_range_w(volatile void *p, size_t size) +{ +	char *_p = (char *)p; + +	__cpuc_clean_dcache_area(_p, size); +	outer_clean_range(__pa(_p), __pa(_p + size)); +} + +/* + * Ensure preceding writes to *p by other CPUs are visible to + * subsequent reads by this CPU.  We must be careful not to + * discard data simultaneously written by another CPU, hence the + * usage of flush rather than invalidate operations. + */ +static inline void __sync_cache_range_r(volatile void *p, size_t size) +{ +	char *_p = (char *)p; + +#ifdef CONFIG_OUTER_CACHE +	if (outer_cache.flush_range) { +		/* +		 * Ensure dirty data migrated from other CPUs into our cache +		 * are cleaned out safely before the outer cache is cleaned: +		 */ +		__cpuc_clean_dcache_area(_p, size); + +		/* Clean and invalidate stale data for *p from outer ... */ +		outer_flush_range(__pa(_p), __pa(_p + size)); +	} +#endif + +	/* ... and inner cache: */ +	__cpuc_flush_dcache_area(_p, size); +} + +#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) +#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) + +/* + * Disabling cache access for one CPU in an ARMv7 SMP system is tricky. + * To do so we must: + * + * - Clear the SCTLR.C bit to prevent further cache allocations + * - Flush the desired level of cache + * - Clear the ACTLR "SMP" bit to disable local coherency + * + * ... and so without any intervening memory access in between those steps, + * not even to the stack. + * + * WARNING -- After this has been called: + * + * - No ldrex/strex (and similar) instructions must be used. + * - The CPU is obviously no longer coherent with the other CPUs. + * - This is unlikely to work as expected if Linux is running non-secure. + * + * Note: + * + * - This is known to apply to several ARMv7 processor implementations, + *   however some exceptions may exist.  Caveat emptor. + * + * - The clobber list is dictated by the call to v7_flush_dcache_*. + *   fp is preserved to the stack explicitly prior disabling the cache + *   since adding it to the clobber list is incompatible with having + *   CONFIG_FRAME_POINTER=y.  ip is saved as well if ever r12-clobbering + *   trampoline are inserted by the linker and to keep sp 64-bit aligned. + */ +#define v7_exit_coherency_flush(level) \ +	asm volatile( \ +	"stmfd	sp!, {fp, ip} \n\t" \ +	"mrc	p15, 0, r0, c1, c0, 0	@ get SCTLR \n\t" \ +	"bic	r0, r0, #"__stringify(CR_C)" \n\t" \ +	"mcr	p15, 0, r0, c1, c0, 0	@ set SCTLR \n\t" \ +	"isb	\n\t" \ +	"bl	v7_flush_dcache_"__stringify(level)" \n\t" \ +	"clrex	\n\t" \ +	"mrc	p15, 0, r0, c1, c0, 1	@ get ACTLR \n\t" \ +	"bic	r0, r0, #(1 << 6)	@ disable local coherency \n\t" \ +	"mcr	p15, 0, r0, c1, c0, 1	@ set ACTLR \n\t" \ +	"isb	\n\t" \ +	"dsb	\n\t" \ +	"ldmfd	sp!, {fp, ip}" \ +	: : : "r0","r1","r2","r3","r4","r5","r6","r7", \ +	      "r9","r10","lr","memory" ) + +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); + +void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, +			     void *kaddr, unsigned long len);  #endif diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h index c023db09fcc..7ea78144ae2 100644 --- a/arch/arm/include/asm/cachetype.h +++ b/arch/arm/include/asm/cachetype.h @@ -7,6 +7,7 @@  #define CACHEID_VIPT			(CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING)  #define CACHEID_ASID_TAGGED		(1 << 3)  #define CACHEID_VIPT_I_ALIASING		(1 << 4) +#define CACHEID_PIPT			(1 << 5)  extern unsigned int cacheid; @@ -16,6 +17,7 @@ extern unsigned int cacheid;  #define cache_is_vipt_aliasing()	cacheid_is(CACHEID_VIPT_ALIASING)  #define icache_is_vivt_asid_tagged()	cacheid_is(CACHEID_ASID_TAGGED)  #define icache_is_vipt_aliasing()	cacheid_is(CACHEID_VIPT_I_ALIASING) +#define icache_is_pipt()		cacheid_is(CACHEID_PIPT)  /*   * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture @@ -26,7 +28,8 @@ extern unsigned int cacheid;  #if __LINUX_ARM_ARCH__ >= 7  #define __CACHEID_ARCH_MIN	(CACHEID_VIPT_NONALIASING |\  				 CACHEID_ASID_TAGGED |\ -				 CACHEID_VIPT_I_ALIASING) +				 CACHEID_VIPT_I_ALIASING |\ +				 CACHEID_PIPT)  #elif __LINUX_ARM_ARCH__ >= 6  #define	__CACHEID_ARCH_MIN	(~CACHEID_VIVT)  #else diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h index 6dcc1643086..52331511547 100644 --- a/arch/arm/include/asm/checksum.h +++ b/arch/arm/include/asm/checksum.h @@ -87,19 +87,33 @@ static inline __wsum  csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,  		   unsigned short proto, __wsum sum)  { -	__asm__( -	"adds	%0, %1, %2		@ csum_tcpudp_nofold	\n\ -	adcs	%0, %0, %3					\n" +	u32 lenprot = len | proto << 16; +	if (__builtin_constant_p(sum) && sum == 0) { +		__asm__( +		"adds	%0, %1, %2	@ csum_tcpudp_nofold0	\n\t"  #ifdef __ARMEB__ -	"adcs	%0, %0, %4					\n" +		"adcs	%0, %0, %3				\n\t"  #else -	"adcs	%0, %0, %4, lsl #8				\n" +		"adcs	%0, %0, %3, ror #8			\n\t"  #endif -	"adcs	%0, %0, %5					\n\ -	adc	%0, %0, #0" -	: "=&r"(sum) -	: "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto)) -	: "cc"); +		"adc	%0, %0, #0" +		: "=&r" (sum) +		: "r" (daddr), "r" (saddr), "r" (lenprot) +		: "cc"); +	} else { +		__asm__( +		"adds	%0, %1, %2	@ csum_tcpudp_nofold	\n\t" +		"adcs	%0, %0, %3				\n\t" +#ifdef __ARMEB__ +		"adcs	%0, %0, %4				\n\t" +#else +		"adcs	%0, %0, %4, ror #8			\n\t" +#endif +		"adc	%0, %0, #0" +		: "=&r"(sum) +		: "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot) +		: "cc"); +	}  	return sum;  }	  /* diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index b56c1389b6f..4e8a4b27d7c 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -12,23 +12,20 @@  #ifndef __ASM_CLKDEV_H  #define __ASM_CLKDEV_H -struct clk; -struct device; +#include <linux/slab.h> -struct clk_lookup { -	struct list_head	node; -	const char		*dev_id; -	const char		*con_id; -	struct clk		*clk; -}; - -struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, -	const char *dev_fmt, ...); - -void clkdev_add(struct clk_lookup *cl); -void clkdev_drop(struct clk_lookup *cl); +#ifndef CONFIG_COMMON_CLK +#ifdef CONFIG_HAVE_MACH_CLKDEV +#include <mach/clkdev.h> +#else +#define __clk_get(clk)	({ 1; }) +#define __clk_put(clk)	do { } while (0) +#endif +#endif -void clkdev_add_table(struct clk_lookup *, size_t); -int clk_add_alias(const char *, const char *, char *, struct device *); +static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) +{ +	return kzalloc(size, GFP_KERNEL); +}  #endif diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h new file mode 100644 index 00000000000..abb2c3769b0 --- /dev/null +++ b/arch/arm/include/asm/cmpxchg.h @@ -0,0 +1,288 @@ +#ifndef __ASM_ARM_CMPXCHG_H +#define __ASM_ARM_CMPXCHG_H + +#include <linux/irqflags.h> +#include <linux/prefetch.h> +#include <asm/barrier.h> + +#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) +/* + * On the StrongARM, "swp" is terminally broken since it bypasses the + * cache totally.  This means that the cache becomes inconsistent, and, + * since we use normal loads/stores as well, this is really bad. + * Typically, this causes oopsen in filp_close, but could have other, + * more disastrous effects.  There are two work-arounds: + *  1. Disable interrupts and emulate the atomic swap + *  2. Clean the cache, perform atomic swap, flush the cache + * + * We choose (1) since its the "easiest" to achieve here and is not + * dependent on the processor type. + * + * NOTE that this solution won't work on an SMP system, so explcitly + * forbid it here. + */ +#define swp_is_buggy +#endif + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ +	extern void __bad_xchg(volatile void *, int); +	unsigned long ret; +#ifdef swp_is_buggy +	unsigned long flags; +#endif +#if __LINUX_ARM_ARCH__ >= 6 +	unsigned int tmp; +#endif + +	smp_mb(); +	prefetchw((const void *)ptr); + +	switch (size) { +#if __LINUX_ARM_ARCH__ >= 6 +	case 1: +		asm volatile("@	__xchg1\n" +		"1:	ldrexb	%0, [%3]\n" +		"	strexb	%1, %2, [%3]\n" +		"	teq	%1, #0\n" +		"	bne	1b" +			: "=&r" (ret), "=&r" (tmp) +			: "r" (x), "r" (ptr) +			: "memory", "cc"); +		break; +	case 4: +		asm volatile("@	__xchg4\n" +		"1:	ldrex	%0, [%3]\n" +		"	strex	%1, %2, [%3]\n" +		"	teq	%1, #0\n" +		"	bne	1b" +			: "=&r" (ret), "=&r" (tmp) +			: "r" (x), "r" (ptr) +			: "memory", "cc"); +		break; +#elif defined(swp_is_buggy) +#ifdef CONFIG_SMP +#error SMP is not supported on this platform +#endif +	case 1: +		raw_local_irq_save(flags); +		ret = *(volatile unsigned char *)ptr; +		*(volatile unsigned char *)ptr = x; +		raw_local_irq_restore(flags); +		break; + +	case 4: +		raw_local_irq_save(flags); +		ret = *(volatile unsigned long *)ptr; +		*(volatile unsigned long *)ptr = x; +		raw_local_irq_restore(flags); +		break; +#else +	case 1: +		asm volatile("@	__xchg1\n" +		"	swpb	%0, %1, [%2]" +			: "=&r" (ret) +			: "r" (x), "r" (ptr) +			: "memory", "cc"); +		break; +	case 4: +		asm volatile("@	__xchg4\n" +		"	swp	%0, %1, [%2]" +			: "=&r" (ret) +			: "r" (x), "r" (ptr) +			: "memory", "cc"); +		break; +#endif +	default: +		__bad_xchg(ptr, size), ret = 0; +		break; +	} +	smp_mb(); + +	return ret; +} + +#define xchg(ptr,x) \ +	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +#include <asm-generic/cmpxchg-local.h> + +#if __LINUX_ARM_ARCH__ < 6 +/* min ARCH < ARMv6 */ + +#ifdef CONFIG_SMP +#error "SMP is not supported on this platform" +#endif + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n)				  	       \ +	((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ +			(unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#ifndef CONFIG_SMP +#include <asm-generic/cmpxchg.h> +#endif + +#else	/* min ARCH >= ARMv6 */ + +extern void __bad_cmpxchg(volatile void *ptr, int size); + +/* + * cmpxchg only support 32-bits operands on ARMv6. + */ + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, +				      unsigned long new, int size) +{ +	unsigned long oldval, res; + +	prefetchw((const void *)ptr); + +	switch (size) { +#ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */ +	case 1: +		do { +			asm volatile("@ __cmpxchg1\n" +			"	ldrexb	%1, [%2]\n" +			"	mov	%0, #0\n" +			"	teq	%1, %3\n" +			"	strexbeq %0, %4, [%2]\n" +				: "=&r" (res), "=&r" (oldval) +				: "r" (ptr), "Ir" (old), "r" (new) +				: "memory", "cc"); +		} while (res); +		break; +	case 2: +		do { +			asm volatile("@ __cmpxchg1\n" +			"	ldrexh	%1, [%2]\n" +			"	mov	%0, #0\n" +			"	teq	%1, %3\n" +			"	strexheq %0, %4, [%2]\n" +				: "=&r" (res), "=&r" (oldval) +				: "r" (ptr), "Ir" (old), "r" (new) +				: "memory", "cc"); +		} while (res); +		break; +#endif +	case 4: +		do { +			asm volatile("@ __cmpxchg4\n" +			"	ldrex	%1, [%2]\n" +			"	mov	%0, #0\n" +			"	teq	%1, %3\n" +			"	strexeq %0, %4, [%2]\n" +				: "=&r" (res), "=&r" (oldval) +				: "r" (ptr), "Ir" (old), "r" (new) +				: "memory", "cc"); +		} while (res); +		break; +	default: +		__bad_cmpxchg(ptr, size); +		oldval = 0; +	} + +	return oldval; +} + +static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, +					 unsigned long new, int size) +{ +	unsigned long ret; + +	smp_mb(); +	ret = __cmpxchg(ptr, old, new, size); +	smp_mb(); + +	return ret; +} + +#define cmpxchg(ptr,o,n)						\ +	((__typeof__(*(ptr)))__cmpxchg_mb((ptr),			\ +					  (unsigned long)(o),		\ +					  (unsigned long)(n),		\ +					  sizeof(*(ptr)))) + +static inline unsigned long __cmpxchg_local(volatile void *ptr, +					    unsigned long old, +					    unsigned long new, int size) +{ +	unsigned long ret; + +	switch (size) { +#ifdef CONFIG_CPU_V6	/* min ARCH == ARMv6 */ +	case 1: +	case 2: +		ret = __cmpxchg_local_generic(ptr, old, new, size); +		break; +#endif +	default: +		ret = __cmpxchg(ptr, old, new, size); +	} + +	return ret; +} + +static inline unsigned long long __cmpxchg64(unsigned long long *ptr, +					     unsigned long long old, +					     unsigned long long new) +{ +	unsigned long long oldval; +	unsigned long res; + +	prefetchw(ptr); + +	__asm__ __volatile__( +"1:	ldrexd		%1, %H1, [%3]\n" +"	teq		%1, %4\n" +"	teqeq		%H1, %H4\n" +"	bne		2f\n" +"	strexd		%0, %5, %H5, [%3]\n" +"	teq		%0, #0\n" +"	bne		1b\n" +"2:" +	: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) +	: "r" (ptr), "r" (old), "r" (new) +	: "cc"); + +	return oldval; +} + +static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr, +						unsigned long long old, +						unsigned long long new) +{ +	unsigned long long ret; + +	smp_mb(); +	ret = __cmpxchg64(ptr, old, new); +	smp_mb(); + +	return ret; +} + +#define cmpxchg_local(ptr,o,n)						\ +	((__typeof__(*(ptr)))__cmpxchg_local((ptr),			\ +				       (unsigned long)(o),		\ +				       (unsigned long)(n),		\ +				       sizeof(*(ptr)))) + +#define cmpxchg64(ptr, o, n)						\ +	((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\ +					(unsigned long long)(o),	\ +					(unsigned long long)(n))) + +#define cmpxchg64_relaxed(ptr, o, n)					\ +	((__typeof__(*(ptr)))__cmpxchg64((ptr),				\ +					(unsigned long long)(o),	\ +					(unsigned long long)(n))) + +#define cmpxchg64_local(ptr, o, n)	cmpxchg64_relaxed((ptr), (o), (n)) + +#endif	/* __LINUX_ARM_ARCH__ >= 6 */ + +#endif /* __ASM_ARM_CMPXCHG_H */ diff --git a/arch/arm/include/asm/compiler.h b/arch/arm/include/asm/compiler.h new file mode 100644 index 00000000000..8155db2f7fa --- /dev/null +++ b/arch/arm/include/asm/compiler.h @@ -0,0 +1,15 @@ +#ifndef __ASM_ARM_COMPILER_H +#define __ASM_ARM_COMPILER_H + +/* + * This is used to ensure the compiler did actually allocate the register we + * asked it for some inline assembly sequences.  Apparently we can't trust + * the compiler from one version to another so a bit of paranoia won't hurt. + * This string is meant to be concatenated with the inline asm string and + * will cause compilation to stop on mismatch. + * (for details, see gcc PR 15089) + */ +#define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t" + + +#endif /* __ASM_ARM_COMPILER_H */ diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h new file mode 100644 index 00000000000..c3f11524f10 --- /dev/null +++ b/arch/arm/include/asm/cp15.h @@ -0,0 +1,119 @@ +#ifndef __ASM_ARM_CP15_H +#define __ASM_ARM_CP15_H + +#include <asm/barrier.h> + +/* + * CR1 bits (CP#15 CR1) + */ +#define CR_M	(1 << 0)	/* MMU enable				*/ +#define CR_A	(1 << 1)	/* Alignment abort enable		*/ +#define CR_C	(1 << 2)	/* Dcache enable			*/ +#define CR_W	(1 << 3)	/* Write buffer enable			*/ +#define CR_P	(1 << 4)	/* 32-bit exception handler		*/ +#define CR_D	(1 << 5)	/* 32-bit data address range		*/ +#define CR_L	(1 << 6)	/* Implementation defined		*/ +#define CR_B	(1 << 7)	/* Big endian				*/ +#define CR_S	(1 << 8)	/* System MMU protection		*/ +#define CR_R	(1 << 9)	/* ROM MMU protection			*/ +#define CR_F	(1 << 10)	/* Implementation defined		*/ +#define CR_Z	(1 << 11)	/* Implementation defined		*/ +#define CR_I	(1 << 12)	/* Icache enable			*/ +#define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/ +#define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/ +#define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/ +#define CR_DT	(1 << 16) +#ifdef CONFIG_MMU +#define CR_HA	(1 << 17)	/* Hardware management of Access Flag   */ +#else +#define CR_BR	(1 << 17)	/* MPU Background region enable (PMSA)  */ +#endif +#define CR_IT	(1 << 18) +#define CR_ST	(1 << 19) +#define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/ +#define CR_U	(1 << 22)	/* Unaligned access operation		*/ +#define CR_XP	(1 << 23)	/* Extended page tables			*/ +#define CR_VE	(1 << 24)	/* Vectored interrupts			*/ +#define CR_EE	(1 << 25)	/* Exception (Big) Endian		*/ +#define CR_TRE	(1 << 28)	/* TEX remap enable			*/ +#define CR_AFE	(1 << 29)	/* Access flag enable			*/ +#define CR_TE	(1 << 30)	/* Thumb exception enable		*/ + +#ifndef __ASSEMBLY__ + +#if __LINUX_ARM_ARCH__ >= 4 +#define vectors_high()	(get_cr() & CR_V) +#else +#define vectors_high()	(0) +#endif + +#ifdef CONFIG_CPU_CP15 + +extern unsigned long cr_alignment;	/* defined in entry-armv.S */ + +static inline unsigned long get_cr(void) +{ +	unsigned long val; +	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc"); +	return val; +} + +static inline void set_cr(unsigned long val) +{ +	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR" +	  : : "r" (val) : "cc"); +	isb(); +} + +static inline unsigned int get_auxcr(void) +{ +	unsigned int val; +	asm("mrc p15, 0, %0, c1, c0, 1	@ get AUXCR" : "=r" (val)); +	return val; +} + +static inline void set_auxcr(unsigned int val) +{ +	asm volatile("mcr p15, 0, %0, c1, c0, 1	@ set AUXCR" +	  : : "r" (val)); +	isb(); +} + +#define CPACC_FULL(n)		(3 << (n * 2)) +#define CPACC_SVC(n)		(1 << (n * 2)) +#define CPACC_DISABLE(n)	(0 << (n * 2)) + +static inline unsigned int get_copro_access(void) +{ +	unsigned int val; +	asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" +	  : "=r" (val) : : "cc"); +	return val; +} + +static inline void set_copro_access(unsigned int val) +{ +	asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" +	  : : "r" (val) : "cc"); +	isb(); +} + +#else /* ifdef CONFIG_CPU_CP15 */ + +/* + * cr_alignment is tightly coupled to cp15 (at least in the minds of the + * developers). Yielding 0 for machines without a cp15 (and making it + * read-only) is fine for most cases and saves quite some #ifdeffery. + */ +#define cr_alignment	UL(0) + +static inline unsigned long get_cr(void) +{ +	return 0; +} + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + +#endif /* ifndef __ASSEMBLY__ */ + +#endif diff --git a/arch/arm/include/asm/cpu-multi32.h b/arch/arm/include/asm/cpu-multi32.h deleted file mode 100644 index e2b5b0b2116..00000000000 --- a/arch/arm/include/asm/cpu-multi32.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - *  arch/arm/include/asm/cpu-multi32.h - * - *  Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <asm/page.h> - -struct mm_struct; - -/* - * Don't change this structure - ASM code - * relies on it. - */ -extern struct processor { -	/* MISC -	 * get data abort address/flags -	 */ -	void (*_data_abort)(unsigned long pc); -	/* -	 * Retrieve prefetch fault address -	 */ -	unsigned long (*_prefetch_abort)(unsigned long lr); -	/* -	 * Set up any processor specifics -	 */ -	void (*_proc_init)(void); -	/* -	 * Disable any processor specifics -	 */ -	void (*_proc_fin)(void); -	/* -	 * Special stuff for a reset -	 */ -	void (*reset)(unsigned long addr) __attribute__((noreturn)); -	/* -	 * Idle the processor -	 */ -	int (*_do_idle)(void); -	/* -	 * Processor architecture specific -	 */ -	/* -	 * clean a virtual address range from the -	 * D-cache without flushing the cache. -	 */ -	void (*dcache_clean_area)(void *addr, int size); - -	/* -	 * Set the page table -	 */ -	void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); -	/* -	 * Set a possibly extended PTE.  Non-extended PTEs should -	 * ignore 'ext'. -	 */ -	void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); -} processor; - -#define cpu_proc_init()			processor._proc_init() -#define cpu_proc_fin()			processor._proc_fin() -#define cpu_reset(addr)			processor.reset(addr) -#define cpu_do_idle()			processor._do_idle() -#define cpu_dcache_clean_area(addr,sz)	processor.dcache_clean_area(addr,sz) -#define cpu_set_pte_ext(ptep,pte,ext)	processor.set_pte_ext(ptep,pte,ext) -#define cpu_do_switch_mm(pgd,mm)	processor.switch_mm(pgd,mm) diff --git a/arch/arm/include/asm/cpu-single.h b/arch/arm/include/asm/cpu-single.h deleted file mode 100644 index f073a6d2a40..00000000000 --- a/arch/arm/include/asm/cpu-single.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - *  arch/arm/include/asm/cpu-single.h - * - *  Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -/* - * Single CPU - */ -#ifdef __STDC__ -#define __catify_fn(name,x)	name##x -#else -#define __catify_fn(name,x)	name/**/x -#endif -#define __cpu_fn(name,x)	__catify_fn(name,x) - -/* - * If we are supporting multiple CPUs, then we must use a table of - * function pointers for this lot.  Otherwise, we can optimise the - * table away. - */ -#define cpu_proc_init			__cpu_fn(CPU_NAME,_proc_init) -#define cpu_proc_fin			__cpu_fn(CPU_NAME,_proc_fin) -#define cpu_reset			__cpu_fn(CPU_NAME,_reset) -#define cpu_do_idle			__cpu_fn(CPU_NAME,_do_idle) -#define cpu_dcache_clean_area		__cpu_fn(CPU_NAME,_dcache_clean_area) -#define cpu_do_switch_mm		__cpu_fn(CPU_NAME,_switch_mm) -#define cpu_set_pte_ext			__cpu_fn(CPU_NAME,_set_pte_ext) - -#include <asm/page.h> - -struct mm_struct; - -/* declare all the functions as extern */ -extern void cpu_proc_init(void); -extern void cpu_proc_fin(void); -extern int cpu_do_idle(void); -extern void cpu_dcache_clean_area(void *, int); -extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); -extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); -extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h index 793968173be..2744f060255 100644 --- a/arch/arm/include/asm/cpu.h +++ b/arch/arm/include/asm/cpu.h @@ -15,8 +15,8 @@  struct cpuinfo_arm {  	struct cpu	cpu; +	u32		cpuid;  #ifdef CONFIG_SMP -	struct task_struct *idle;  	unsigned int	loops_per_jiffy;  #endif  }; diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h new file mode 100644 index 00000000000..2fca60ab513 --- /dev/null +++ b/arch/arm/include/asm/cpuidle.h @@ -0,0 +1,29 @@ +#ifndef __ASM_ARM_CPUIDLE_H +#define __ASM_ARM_CPUIDLE_H + +#ifdef CONFIG_CPU_IDLE +extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev, +		struct cpuidle_driver *drv, int index); +#else +static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev, +		struct cpuidle_driver *drv, int index) { return -ENODEV; } +#endif + +/* Common ARM WFI state */ +#define ARM_CPUIDLE_WFI_STATE_PWR(p) {\ +	.enter                  = arm_cpuidle_simple_enter,\ +	.exit_latency           = 1,\ +	.target_residency       = 1,\ +	.power_usage		= p,\ +	.flags                  = CPUIDLE_FLAG_TIME_VALID,\ +	.name                   = "WFI",\ +	.desc                   = "ARM WFI",\ +} + +/* + * in case power_specified == 1, give a default WFI power value needed + * by some governors + */ +#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX) + +#endif diff --git a/arch/arm/include/asm/cputime.h b/arch/arm/include/asm/cputime.h deleted file mode 100644 index 3a8002a5fec..00000000000 --- a/arch/arm/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ARM_CPUTIME_H -#define __ARM_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __ARM_CPUTIME_H */ diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 20ae96cc002..8c2b7321a47 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -2,12 +2,32 @@  #define __ASM_ARM_CPUTYPE_H  #include <linux/stringify.h> +#include <linux/kernel.h>  #define CPUID_ID	0  #define CPUID_CACHETYPE	1  #define CPUID_TCM	2  #define CPUID_TLBTYPE	3 +#define CPUID_MPUIR	4 +#define CPUID_MPIDR	5 +#define CPUID_REVIDR	6 +#ifdef CONFIG_CPU_V7M +#define CPUID_EXT_PFR0	0x40 +#define CPUID_EXT_PFR1	0x44 +#define CPUID_EXT_DFR0	0x48 +#define CPUID_EXT_AFR0	0x4c +#define CPUID_EXT_MMFR0	0x50 +#define CPUID_EXT_MMFR1	0x54 +#define CPUID_EXT_MMFR2	0x58 +#define CPUID_EXT_MMFR3	0x5c +#define CPUID_EXT_ISAR0	0x60 +#define CPUID_EXT_ISAR1	0x64 +#define CPUID_EXT_ISAR2	0x68 +#define CPUID_EXT_ISAR3	0x6c +#define CPUID_EXT_ISAR4	0x70 +#define CPUID_EXT_ISAR5	0x74 +#else  #define CPUID_EXT_PFR0	"c1, 0"  #define CPUID_EXT_PFR1	"c1, 1"  #define CPUID_EXT_DFR0	"c1, 2" @@ -22,6 +42,44 @@  #define CPUID_EXT_ISAR3	"c2, 3"  #define CPUID_EXT_ISAR4	"c2, 4"  #define CPUID_EXT_ISAR5	"c2, 5" +#endif + +#define MPIDR_SMP_BITMASK (0x3 << 30) +#define MPIDR_SMP_VALUE (0x2 << 30) + +#define MPIDR_MT_BITMASK (0x1 << 24) + +#define MPIDR_HWID_BITMASK 0xFFFFFF + +#define MPIDR_INVALID (~MPIDR_HWID_BITMASK) + +#define MPIDR_LEVEL_BITS 8 +#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) + +#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ +	((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK) + +#define ARM_CPU_IMP_ARM			0x41 +#define ARM_CPU_IMP_INTEL		0x69 + +#define ARM_CPU_PART_ARM1136		0xB360 +#define ARM_CPU_PART_ARM1156		0xB560 +#define ARM_CPU_PART_ARM1176		0xB760 +#define ARM_CPU_PART_ARM11MPCORE	0xB020 +#define ARM_CPU_PART_CORTEX_A8		0xC080 +#define ARM_CPU_PART_CORTEX_A9		0xC090 +#define ARM_CPU_PART_CORTEX_A5		0xC050 +#define ARM_CPU_PART_CORTEX_A15		0xC0F0 +#define ARM_CPU_PART_CORTEX_A7		0xC070 +#define ARM_CPU_PART_CORTEX_A12		0xC0D0 +#define ARM_CPU_PART_CORTEX_A17		0xC0E0 + +#define ARM_CPU_XSCALE_ARCH_MASK	0xe000 +#define ARM_CPU_XSCALE_ARCH_V1		0x2000 +#define ARM_CPU_XSCALE_ARCH_V2		0x4000 +#define ARM_CPU_XSCALE_ARCH_V3		0x6000 + +extern unsigned int processor_id;  #ifdef CONFIG_CPU_CP15  #define read_cpuid(reg)							\ @@ -33,22 +91,56 @@  		    : "cc");						\  		__val;							\  	}) + +/* + * The memory clobber prevents gcc 4.5 from reordering the mrc before + * any is_smp() tests, which can cause undefined instruction aborts on + * ARM1136 r0 due to the missing extended CP15 registers. + */  #define read_cpuid_ext(ext_reg)						\  	({								\  		unsigned int __val;					\  		asm("mrc	p15, 0, %0, c0, " ext_reg		\  		    : "=r" (__val)					\  		    :							\ -		    : "cc");						\ +		    : "memory");					\  		__val;							\  	}) -#else -extern unsigned int processor_id; -#define read_cpuid(reg) (processor_id) -#define read_cpuid_ext(reg) 0 -#endif + +#elif defined(CONFIG_CPU_V7M) + +#include <asm/io.h> +#include <asm/v7m.h> + +#define read_cpuid(reg)							\ +	({								\ +		WARN_ON_ONCE(1);					\ +		0;							\ +	}) + +static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset) +{ +	return readl(BASEADDR_V7M_SCB + offset); +} + +#else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */  /* + * read_cpuid and read_cpuid_ext should only ever be called on machines that + * have cp15 so warn on other usages. + */ +#define read_cpuid(reg)							\ +	({								\ +		WARN_ON_ONCE(1);					\ +		0;							\ +	}) + +#define read_cpuid_ext(reg) read_cpuid(reg) + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + +#ifdef CONFIG_CPU_CP15 +/*   * The CPU ID never changes at run time, so we might as well tell the   * compiler that it's constant.  Use this function to read the CPU ID   * rather than directly reading processor_id or read_cpuid() directly. @@ -58,6 +150,37 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)  	return read_cpuid(CPUID_ID);  } +#elif defined(CONFIG_CPU_V7M) + +static inline unsigned int __attribute_const__ read_cpuid_id(void) +{ +	return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID); +} + +#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */ + +static inline unsigned int __attribute_const__ read_cpuid_id(void) +{ +	return processor_id; +} + +#endif /* ifdef CONFIG_CPU_CP15 / else */ + +static inline unsigned int __attribute_const__ read_cpuid_implementor(void) +{ +	return (read_cpuid_id() & 0xFF000000) >> 24; +} + +static inline unsigned int __attribute_const__ read_cpuid_part_number(void) +{ +	return read_cpuid_id() & 0xFFF0; +} + +static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) +{ +	return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; +} +  static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)  {  	return read_cpuid(CPUID_CACHETYPE); @@ -68,6 +191,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)  	return read_cpuid(CPUID_TCM);  } +static inline unsigned int __attribute_const__ read_cpuid_mpidr(void) +{ +	return read_cpuid(CPUID_MPIDR); +} +  /*   * Intel's XScale3 core supports some v6 features (supersections, L2)   * but advertises itself as v5 as it does not support the v6 ISA.  For @@ -94,4 +222,23 @@ static inline int cpu_is_xsc3(void)  #define	cpu_is_xscale()	1  #endif +/* + * Marvell's PJ4 and PJ4B cores are based on V7 version, + * but require a specical sequence for enabling coprocessors. + * For this reason, we need a way to distinguish them. + */ +#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) +static inline int cpu_is_pj4(void) +{ +	unsigned int id; + +	id = read_cpuid_id(); +	if ((id & 0xff0fff00) == 0x560f5800) +		return 1; + +	return 0; +} +#else +#define cpu_is_pj4()	0 +#endif  #endif diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h new file mode 100644 index 00000000000..2381199acb7 --- /dev/null +++ b/arch/arm/include/asm/cti.h @@ -0,0 +1,159 @@ +#ifndef __ASMARM_CTI_H +#define __ASMARM_CTI_H + +#include	<asm/io.h> +#include	<asm/hardware/coresight.h> + +/* The registers' definition is from section 3.2 of + * Embedded Cross Trigger Revision: r0p0 + */ +#define		CTICONTROL		0x000 +#define		CTISTATUS		0x004 +#define		CTILOCK			0x008 +#define		CTIPROTECTION		0x00C +#define		CTIINTACK		0x010 +#define		CTIAPPSET		0x014 +#define		CTIAPPCLEAR		0x018 +#define		CTIAPPPULSE		0x01c +#define		CTIINEN			0x020 +#define		CTIOUTEN		0x0A0 +#define		CTITRIGINSTATUS		0x130 +#define		CTITRIGOUTSTATUS	0x134 +#define		CTICHINSTATUS		0x138 +#define		CTICHOUTSTATUS		0x13c +#define		CTIPERIPHID0		0xFE0 +#define		CTIPERIPHID1		0xFE4 +#define		CTIPERIPHID2		0xFE8 +#define		CTIPERIPHID3		0xFEC +#define		CTIPCELLID0		0xFF0 +#define		CTIPCELLID1		0xFF4 +#define		CTIPCELLID2		0xFF8 +#define		CTIPCELLID3		0xFFC + +/* The below are from section 3.6.4 of + * CoreSight v1.0 Architecture Specification + */ +#define		LOCKACCESS		0xFB0 +#define		LOCKSTATUS		0xFB4 + +/** + * struct cti - cross trigger interface struct + * @base: mapped virtual address for the cti base + * @irq: irq number for the cti + * @trig_out_for_irq: triger out number which will cause + *	the @irq happen + * + * cti struct used to operate cti registers. + */ +struct cti { +	void __iomem *base; +	int irq; +	int trig_out_for_irq; +}; + +/** + * cti_init - initialize the cti instance + * @cti: cti instance + * @base: mapped virtual address for the cti base + * @irq: irq number for the cti + * @trig_out: triger out number which will cause + *	the @irq happen + * + * called by machine code to pass the board dependent + * @base, @irq and @trig_out to cti. + */ +static inline void cti_init(struct cti *cti, +	void __iomem *base, int irq, int trig_out) +{ +	cti->base = base; +	cti->irq  = irq; +	cti->trig_out_for_irq = trig_out; +} + +/** + * cti_map_trigger - use the @chan to map @trig_in to @trig_out + * @cti: cti instance + * @trig_in: trigger in number + * @trig_out: trigger out number + * @channel: channel number + * + * This function maps one trigger in of @trig_in to one trigger + * out of @trig_out using the channel @chan. + */ +static inline void cti_map_trigger(struct cti *cti, +	int trig_in, int trig_out, int chan) +{ +	void __iomem *base = cti->base; +	unsigned long val; + +	val = __raw_readl(base + CTIINEN + trig_in * 4); +	val |= BIT(chan); +	__raw_writel(val, base + CTIINEN + trig_in * 4); + +	val = __raw_readl(base + CTIOUTEN + trig_out * 4); +	val |= BIT(chan); +	__raw_writel(val, base + CTIOUTEN + trig_out * 4); +} + +/** + * cti_enable - enable the cti module + * @cti: cti instance + * + * enable the cti module + */ +static inline void cti_enable(struct cti *cti) +{ +	__raw_writel(0x1, cti->base + CTICONTROL); +} + +/** + * cti_disable - disable the cti module + * @cti: cti instance + * + * enable the cti module + */ +static inline void cti_disable(struct cti *cti) +{ +	__raw_writel(0, cti->base + CTICONTROL); +} + +/** + * cti_irq_ack - clear the cti irq + * @cti: cti instance + * + * clear the cti irq + */ +static inline void cti_irq_ack(struct cti *cti) +{ +	void __iomem *base = cti->base; +	unsigned long val; + +	val = __raw_readl(base + CTIINTACK); +	val |= BIT(cti->trig_out_for_irq); +	__raw_writel(val, base + CTIINTACK); +} + +/** + * cti_unlock - unlock cti module + * @cti: cti instance + * + * unlock the cti module, or else any writes to the cti + * module is not allowed. + */ +static inline void cti_unlock(struct cti *cti) +{ +	__raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS); +} + +/** + * cti_lock - lock cti module + * @cti: cti instance + * + * lock the cti module, so any writes to the cti + * module will be not allowed. + */ +static inline void cti_lock(struct cti *cti) +{ +	__raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS); +} +#endif diff --git a/arch/arm/include/asm/current.h b/arch/arm/include/asm/current.h deleted file mode 100644 index 75d21e2a3ff..00000000000 --- a/arch/arm/include/asm/current.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _ASMARM_CURRENT_H -#define _ASMARM_CURRENT_H - -#include <linux/thread_info.h> - -static inline struct task_struct *get_current(void) __attribute_const__; - -static inline struct task_struct *get_current(void) -{ -	return current_thread_info()->task; -} - -#define current (get_current()) - -#endif /* _ASMARM_CURRENT_H */ diff --git a/arch/arm/include/asm/dcc.h b/arch/arm/include/asm/dcc.h new file mode 100644 index 00000000000..b74899de077 --- /dev/null +++ b/arch/arm/include/asm/dcc.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#include <asm/barrier.h> + +static inline u32 __dcc_getstatus(void) +{ +	u32 __ret; +	asm volatile("mrc p14, 0, %0, c0, c1, 0	@ read comms ctrl reg" +		: "=r" (__ret) : : "cc"); + +	return __ret; +} + +static inline char __dcc_getchar(void) +{ +	char __c; + +	asm volatile("mrc p14, 0, %0, c0, c5, 0	@ read comms data reg" +		: "=r" (__c)); +	isb(); + +	return __c; +} + +static inline void __dcc_putchar(char c) +{ +	asm volatile("mcr p14, 0, %0, c0, c5, 0	@ write a char" +		: /* no output register */ +		: "r" (c)); +	isb(); +} diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index b2deda18154..dff714d886d 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h @@ -6,9 +6,28 @@  #ifndef __ASM_ARM_DELAY_H  #define __ASM_ARM_DELAY_H +#include <asm/memory.h>  #include <asm/param.h>	/* HZ */ -extern void __delay(int loops); +#define MAX_UDELAY_MS	2 +#define UDELAY_MULT	((UL(2199023) * HZ) >> 11) +#define UDELAY_SHIFT	30 + +#ifndef __ASSEMBLY__ + +struct delay_timer { +	unsigned long (*read_current_timer)(void); +	unsigned long freq; +}; + +extern struct arm_delay_ops { +	void (*delay)(unsigned long); +	void (*const_udelay)(unsigned long); +	void (*udelay)(unsigned long); +	unsigned long ticks_per_jiffy; +} arm_delay_ops; + +#define __delay(n)		arm_delay_ops.delay(n)  /*   * This function intentionally does not exist; if you see references to @@ -23,22 +42,31 @@ extern void __bad_udelay(void);   * division by multiplication: you don't have to worry about   * loss of precision.   * - * Use only for very small delays ( < 1 msec).  Should probably use a + * Use only for very small delays ( < 2 msec).  Should probably use a   * lookup table, really, as the multiplications take much too long with   * short delays.  This is a "reasonable" implementation, though (and the   * first constant multiplications gets optimized away if the delay is   * a constant)   */ -extern void __udelay(unsigned long usecs); -extern void __const_udelay(unsigned long); - -#define MAX_UDELAY_MS 2 +#define __udelay(n)		arm_delay_ops.udelay(n) +#define __const_udelay(n)	arm_delay_ops.const_udelay(n)  #define udelay(n)							\  	(__builtin_constant_p(n) ?					\  	  ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() :		\ -			__const_udelay((n) * ((2199023U*HZ)>>11))) :	\ +			__const_udelay((n) * UDELAY_MULT)) :		\  	  __udelay(n)) +/* Loop-based definitions for assembly code. */ +extern void __loop_delay(unsigned long loops); +extern void __loop_udelay(unsigned long usecs); +extern void __loop_const_udelay(unsigned long); + +/* Delay-loop timer registration. */ +#define ARCH_HAS_READ_CURRENT_TIMER +extern void register_current_timer_delay(const struct delay_timer *timer); + +#endif /* __ASSEMBLY__ */ +  #endif /* defined(_ARM_DELAY_H) */ diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 9f390ce335c..dc662fca923 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -7,12 +7,30 @@  #define ASMARM_DEVICE_H  struct dev_archdata { +	struct dma_map_ops	*dma_ops;  #ifdef CONFIG_DMABOUNCE  	struct dmabounce_device_info *dmabounce;  #endif +#ifdef CONFIG_IOMMU_API +	void *iommu; /* private IOMMU data */ +#endif +#ifdef CONFIG_ARM_DMA_USE_IOMMU +	struct dma_iommu_mapping	*mapping; +#endif  }; +struct omap_device; +  struct pdev_archdata { +#ifdef CONFIG_ARCH_OMAP +	struct omap_device *od; +#endif  }; +#ifdef CONFIG_ARM_DMA_USE_IOMMU +#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping) +#else +#define to_dma_iommu_mapping(dev) NULL +#endif +  #endif diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index d3f0a9eee9f..662c7bd0610 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h @@ -1,8 +1,8 @@  #ifndef __ASM_ARM_DIV64  #define __ASM_ARM_DIV64 -#include <asm/system.h>  #include <linux/types.h> +#include <asm/compiler.h>  /*   * The semantics of do_div() are: @@ -46,7 +46,7 @@  	__rem;							\  }) -#if __GNUC__ < 4 +#if __GNUC__ < 4 || !defined(CONFIG_AEABI)  /*   * gcc versions earlier than 4.0 are simply too problematic for the @@ -156,7 +156,7 @@  		/* Select the best insn combination to perform the   */	\  		/* actual __m * __n / (__p << 64) operation.         */	\  		if (!__c) {						\ -			asm (	"umull	%Q0, %R0, %1, %Q2\n\t"		\ +			asm (	"umull	%Q0, %R0, %Q1, %Q2\n\t"		\  				"mov	%Q0, #0"			\  				: "=&r" (__res)				\  				: "r" (__m), "r" (__n)			\ diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h new file mode 100644 index 00000000000..4f8e9e5514b --- /dev/null +++ b/arch/arm/include/asm/dma-contiguous.h @@ -0,0 +1,14 @@ +#ifndef ASMARM_DMA_CONTIGUOUS_H +#define ASMARM_DMA_CONTIGUOUS_H + +#ifdef __KERNEL__ +#ifdef CONFIG_DMA_CMA + +#include <linux/types.h> + +void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size); + +#endif +#endif + +#endif diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h new file mode 100644 index 00000000000..8e3fcb924db --- /dev/null +++ b/arch/arm/include/asm/dma-iommu.h @@ -0,0 +1,37 @@ +#ifndef ASMARM_DMA_IOMMU_H +#define ASMARM_DMA_IOMMU_H + +#ifdef __KERNEL__ + +#include <linux/mm_types.h> +#include <linux/scatterlist.h> +#include <linux/dma-debug.h> +#include <linux/kmemcheck.h> +#include <linux/kref.h> + +struct dma_iommu_mapping { +	/* iommu specific data */ +	struct iommu_domain	*domain; + +	unsigned long		**bitmaps;	/* array of bitmaps */ +	unsigned int		nr_bitmaps;	/* nr of elements in array */ +	unsigned int		extensions; +	size_t			bitmap_size;	/* size of a single bitmap */ +	size_t			bits;		/* per bitmap */ +	dma_addr_t		base; + +	spinlock_t		lock; +	struct kref		kref; +}; + +struct dma_iommu_mapping * +arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size); + +void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); + +int arm_iommu_attach_device(struct device *dev, +					struct dma_iommu_mapping *mapping); +void arm_iommu_detach_device(struct device *dev); + +#endif /* __KERNEL__ */ +#endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index c568da7dcae..c45b61a4b4a 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -5,44 +5,102 @@  #include <linux/mm_types.h>  #include <linux/scatterlist.h> +#include <linux/dma-attrs.h> +#include <linux/dma-debug.h>  #include <asm-generic/dma-coherent.h>  #include <asm/memory.h> +#include <xen/xen.h> +#include <asm/xen/hypervisor.h> + +#define DMA_ERROR_CODE	(~0) +extern struct dma_map_ops arm_dma_ops; +extern struct dma_map_ops arm_coherent_dma_ops; + +static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) +{ +	if (dev && dev->archdata.dma_ops) +		return dev->archdata.dma_ops; +	return &arm_dma_ops; +} + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ +	if (xen_initial_domain()) +		return xen_dma_ops; +	else +		return __generic_dma_ops(dev); +} + +static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +{ +	BUG_ON(!dev); +	dev->archdata.dma_ops = ops; +} + +#include <asm-generic/dma-mapping-common.h> + +static inline int dma_set_mask(struct device *dev, u64 mask) +{ +	return get_dma_ops(dev)->set_dma_mask(dev, mask); +} + +#ifdef __arch_page_to_dma +#error Please update to __arch_pfn_to_dma +#endif +  /* - * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions - * used internally by the DMA-mapping API to provide DMA addresses. They - * must not be used by drivers. + * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private + * functions used internally by the DMA-mapping API to provide DMA + * addresses. They must not be used by drivers.   */ -#ifndef __arch_page_to_dma -static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) +#ifndef __arch_pfn_to_dma +static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)  { -	return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); +	if (dev) +		pfn -= dev->dma_pfn_offset; +	return (dma_addr_t)__pfn_to_bus(pfn);  } -static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) +static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)  { -	return pfn_to_page(__bus_to_pfn(addr)); +	unsigned long pfn = __bus_to_pfn(addr); + +	if (dev) +		pfn += dev->dma_pfn_offset; + +	return pfn;  }  static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)  { -	return (void *)__bus_to_virt(addr); +	if (dev) { +		unsigned long pfn = dma_to_pfn(dev, addr); + +		return phys_to_virt(__pfn_to_phys(pfn)); +	} + +	return (void *)__bus_to_virt((unsigned long)addr);  }  static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)  { +	if (dev) +		return pfn_to_dma(dev, virt_to_pfn(addr)); +  	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));  } +  #else -static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) +static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)  { -	return __arch_page_to_dma(dev, page); +	return __arch_pfn_to_dma(dev, pfn);  } -static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr) +static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)  { -	return __arch_dma_to_page(dev, addr); +	return __arch_dma_to_pfn(dev, addr);  }  static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) @@ -56,100 +114,60 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)  }  #endif -/* - * The DMA API is built upon the notion of "buffer ownership".  A buffer - * is either exclusively owned by the CPU (and therefore may be accessed - * by it) or exclusively owned by the DMA device.  These helper functions - * represent the transitions between these two ownership states. - * - * Note, however, that on later ARMs, this notion does not work due to - * speculative prefetches.  We model our approach on the assumption that - * the CPU does do speculative prefetches, which means we clean caches - * before transfers and delay cache invalidation until transfer completion. - * - * Private support functions: these are not part of the API and are - * liable to change.  Drivers must not use these. - */ -static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, -	enum dma_data_direction dir) +/* The ARM override for dma_max_pfn() */ +static inline unsigned long dma_max_pfn(struct device *dev)  { -	extern void ___dma_single_cpu_to_dev(const void *, size_t, -		enum dma_data_direction); - -	if (!arch_is_coherent()) -		___dma_single_cpu_to_dev(kaddr, size, dir); +	return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);  } +#define dma_max_pfn(dev) dma_max_pfn(dev) -static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, -	enum dma_data_direction dir) +static inline int set_arch_dma_coherent_ops(struct device *dev)  { -	extern void ___dma_single_dev_to_cpu(const void *, size_t, -		enum dma_data_direction); - -	if (!arch_is_coherent()) -		___dma_single_dev_to_cpu(kaddr, size, dir); +	set_dma_ops(dev, &arm_coherent_dma_ops); +	return 0;  } +#define set_arch_dma_coherent_ops(dev)	set_arch_dma_coherent_ops(dev) -static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, -	size_t size, enum dma_data_direction dir) +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)  { -	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, -		size_t, enum dma_data_direction); - -	if (!arch_is_coherent()) -		___dma_page_cpu_to_dev(page, off, size, dir); +	unsigned int offset = paddr & ~PAGE_MASK; +	return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;  } -static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, -	size_t size, enum dma_data_direction dir) +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)  { -	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, -		size_t, enum dma_data_direction); - -	if (!arch_is_coherent()) -		___dma_page_dev_to_cpu(page, off, size, dir); +	unsigned int offset = dev_addr & ~PAGE_MASK; +	return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;  } -/* - * Return whether the given device DMA address mask can be supported - * properly.  For example, if your device can only drive the low 24-bits - * during bus mastering, then you would pass 0x00ffffff as the mask - * to this function. - * - * FIXME: This should really be a platform specific issue - we should - * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. - */ -static inline int dma_supported(struct device *dev, u64 mask) +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)  { -	if (mask < ISA_DMA_THRESHOLD) +	u64 limit, mask; + +	if (!dev->dma_mask)  		return 0; -	return 1; -} -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ -#ifdef CONFIG_DMABOUNCE -	if (dev->archdata.dmabounce) { -		if (dma_mask >= ISA_DMA_THRESHOLD) -			return 0; -		else -			return -EIO; -	} -#endif -	if (!dev->dma_mask || !dma_supported(dev, dma_mask)) -		return -EIO; +	mask = *dev->dma_mask; -	*dev->dma_mask = dma_mask; +	limit = (mask + 1) & ~mask; +	if (limit && size > limit) +		return 0; -	return 0; +	if ((addr | (addr + size - 1)) & ~mask) +		return 0; + +	return 1;  } +static inline void dma_mark_clean(void *addr, size_t size) { } +  /*   * DMA errors are defined by all-bits-set in the DMA address.   */  static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)  { -	return dma_addr == ~0; +	debug_dma_mapping_error(dev, dma_addr); +	return dma_addr == DMA_ERROR_CODE;  }  /* @@ -167,72 +185,109 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,  {  } +extern int dma_supported(struct device *dev, u64 mask); + +extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); +  /** - * dma_alloc_coherent - allocate consistent memory for DMA + * arm_dma_alloc - allocate consistent memory for DMA   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices   * @size: required memory size   * @handle: bus-specific DMA address + * @attrs: optinal attributes that specific mapping properties   * - * Allocate some uncached, unbuffered memory for a device for - * performing DMA.  This function allocates pages, and will - * return the CPU-viewed address, and sets @handle to be the - * device-viewed address. + * Allocate some memory for a device for performing DMA.  This function + * allocates pages, and will return the CPU-viewed address, and sets @handle + * to be the device-viewed address.   */ -extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); +extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, +			   gfp_t gfp, struct dma_attrs *attrs); + +#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, +				       dma_addr_t *dma_handle, gfp_t flag, +				       struct dma_attrs *attrs) +{ +	struct dma_map_ops *ops = get_dma_ops(dev); +	void *cpu_addr; +	BUG_ON(!ops); + +	cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); +	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); +	return cpu_addr; +}  /** - * dma_free_coherent - free memory allocated by dma_alloc_coherent + * arm_dma_free - free memory allocated by arm_dma_alloc   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices   * @size: size of memory originally requested in dma_alloc_coherent   * @cpu_addr: CPU-view address returned from dma_alloc_coherent   * @handle: device-view address returned from dma_alloc_coherent + * @attrs: optinal attributes that specific mapping properties   *   * Free (and unmap) a DMA buffer previously allocated by - * dma_alloc_coherent(). + * arm_dma_alloc().   *   * References to memory and mappings associated with cpu_addr/handle   * during and after this call executing are illegal.   */ -extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); +extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, +			 dma_addr_t handle, struct dma_attrs *attrs); + +#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, +				     void *cpu_addr, dma_addr_t dma_handle, +				     struct dma_attrs *attrs) +{ +	struct dma_map_ops *ops = get_dma_ops(dev); +	BUG_ON(!ops); + +	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); +	ops->free(dev, size, cpu_addr, dma_handle, attrs); +}  /** - * dma_mmap_coherent - map a coherent DMA allocation into user space + * arm_dma_mmap - map a coherent DMA allocation into user space   * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices   * @vma: vm_area_struct describing requested user mapping   * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent   * @handle: device-view address returned from dma_alloc_coherent   * @size: size of memory originally requested in dma_alloc_coherent + * @attrs: optinal attributes that specific mapping properties   *   * Map a coherent DMA buffer previously allocated by dma_alloc_coherent   * into user space.  The coherent DMA buffer must not be freed by the   * driver until the user space mapping has been released.   */ -int dma_mmap_coherent(struct device *, struct vm_area_struct *, -		void *, dma_addr_t, size_t); - - -/** - * dma_alloc_writecombine - allocate writecombining memory for DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @size: required memory size - * @handle: bus-specific DMA address - * - * Allocate some uncached, buffered memory for a device for - * performing DMA.  This function allocates pages, and will - * return the CPU-viewed address, and sets @handle to be the - * device-viewed address. - */ -extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, -		gfp_t); +extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, +			void *cpu_addr, dma_addr_t dma_addr, size_t size, +			struct dma_attrs *attrs); -#define dma_free_writecombine(dev,size,cpu_addr,handle) \ -	dma_free_coherent(dev,size,cpu_addr,handle) +static inline void *dma_alloc_writecombine(struct device *dev, size_t size, +				       dma_addr_t *dma_handle, gfp_t flag) +{ +	DEFINE_DMA_ATTRS(attrs); +	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); +	return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); +} -int dma_mmap_writecombine(struct device *, struct vm_area_struct *, -		void *, dma_addr_t, size_t); +static inline void dma_free_writecombine(struct device *dev, size_t size, +				     void *cpu_addr, dma_addr_t dma_handle) +{ +	DEFINE_DMA_ATTRS(attrs); +	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); +	return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); +} +/* + * This can be called during early boot to increase the size of the atomic + * coherent DMA pool above the default value of 256KiB. It must be called + * before postcore_initcall. + */ +extern void __init init_dma_coherent_pool_size(unsigned long size); -#ifdef CONFIG_DMABOUNCE  /*   * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"   * and utilize bounce buffers as needed to work around limited DMA windows. @@ -251,14 +306,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,   * @dev: valid struct device pointer   * @small_buf_size: size of buffers to use with small buffer pool   * @large_buf_size: size of buffers to use with large buffer pool (can be 0) + * @needs_bounce_fn: called to determine whether buffer needs bouncing   *   * This function should be called by low-level platform code to register   * a device as requireing DMA buffer bouncing. The function will allocate   * appropriate DMA pools for the device. - *   */  extern int dmabounce_register_dev(struct device *, unsigned long, -		unsigned long); +		unsigned long, int (*)(struct device *, dma_addr_t, size_t));  /**   * dmabounce_unregister_dev @@ -272,213 +327,22 @@ extern int dmabounce_register_dev(struct device *, unsigned long,   */  extern void dmabounce_unregister_dev(struct device *); -/** - * dma_needs_bounce - * - * @dev: valid struct device pointer - * @dma_handle: dma_handle of unbounced buffer - * @size: size of region being mapped - * - * Platforms that utilize the dmabounce mechanism must implement - * this function. - * - * The dmabounce routines call this function whenever a dma-mapping - * is requested to determine whether a given buffer needs to be bounced - * or not. The function must return 0 if the buffer is OK for - * DMA access and 1 if the buffer needs to be bounced. - * - */ -extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); - -/* - * The DMA API, implemented by dmabounce.c.  See below for descriptions. - */ -extern dma_addr_t dma_map_single(struct device *, void *, size_t, -		enum dma_data_direction); -extern void dma_unmap_single(struct device *, dma_addr_t, size_t, -		enum dma_data_direction); -extern dma_addr_t dma_map_page(struct device *, struct page *, -		unsigned long, size_t, enum dma_data_direction); -extern void dma_unmap_page(struct device *, dma_addr_t, size_t, -		enum dma_data_direction); - -/* - * Private functions - */ -int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, -		size_t, enum dma_data_direction); -int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, -		size_t, enum dma_data_direction); -#else -static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, -	unsigned long offset, size_t size, enum dma_data_direction dir) -{ -	return 1; -} - -static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, -	unsigned long offset, size_t size, enum dma_data_direction dir) -{ -	return 1; -} - - -/** - * dma_map_single - map a single buffer for streaming DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @cpu_addr: CPU direct mapped address of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed.  The CPU - * can regain ownership by calling dma_unmap_single() or - * dma_sync_single_for_cpu(). - */ -static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, -		size_t size, enum dma_data_direction dir) -{ -	BUG_ON(!valid_dma_direction(dir)); - -	__dma_single_cpu_to_dev(cpu_addr, size, dir); - -	return virt_to_dma(dev, cpu_addr); -} - -/** - * dma_map_page - map a portion of a page for streaming DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @page: page that buffer resides in - * @offset: offset into page for start of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed.  The CPU - * can regain ownership by calling dma_unmap_page(). - */ -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, -	     unsigned long offset, size_t size, enum dma_data_direction dir) -{ -	BUG_ON(!valid_dma_direction(dir)); - -	__dma_page_cpu_to_dev(page, offset, size, dir); - -	return page_to_dma(dev, page) + offset; -} - -/** - * dma_unmap_single - unmap a single buffer previously mapped - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_single) - * @dir: DMA transfer direction (same as passed to dma_map_single) - * - * Unmap a single streaming mode DMA translation.  The handle and size - * must match what was provided in the previous dma_map_single() call. - * All other usages are undefined. - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - */ -static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, -		size_t size, enum dma_data_direction dir) -{ -	__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); -} - -/** - * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_page) - * @dir: DMA transfer direction (same as passed to dma_map_page) - * - * Unmap a page streaming mode DMA translation.  The handle and size - * must match what was provided in the previous dma_map_page() call. - * All other usages are undefined. - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - */ -static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, -		size_t size, enum dma_data_direction dir) -{ -	__dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, -		size, dir); -} -#endif /* CONFIG_DMABOUNCE */ - -/** - * dma_sync_single_range_for_cpu - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @offset: offset of region to start sync - * @size: size of region to sync - * @dir: DMA transfer direction (same as passed to dma_map_single) - * - * Make physical memory consistent for a single streaming mode DMA - * translation after a transfer. - * - * If you perform a dma_map_single() but wish to interrogate the - * buffer using the cpu, yet do not wish to teardown the PCI dma - * mapping, you must call this function before doing so.  At the - * next point you give the PCI dma address back to the card, you - * must first the perform a dma_sync_for_device, and then the - * device again owns the buffer. - */ -static inline void dma_sync_single_range_for_cpu(struct device *dev, -		dma_addr_t handle, unsigned long offset, size_t size, -		enum dma_data_direction dir) -{ -	BUG_ON(!valid_dma_direction(dir)); - -	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) -		return; - -	__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, -		dma_addr_t handle, unsigned long offset, size_t size, -		enum dma_data_direction dir) -{ -	BUG_ON(!valid_dma_direction(dir)); - -	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) -		return; -	__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); -} - -static inline void dma_sync_single_for_cpu(struct device *dev, -		dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ -	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, -		dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ -	dma_sync_single_range_for_device(dev, handle, 0, size, dir); -}  /*   * The scatter list versions of the above methods.   */ -extern int dma_map_sg(struct device *, struct scatterlist *, int, -		enum dma_data_direction); -extern void dma_unmap_sg(struct device *, struct scatterlist *, int, +extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, +		enum dma_data_direction, struct dma_attrs *attrs); +extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, +		enum dma_data_direction, struct dma_attrs *attrs); +extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,  		enum dma_data_direction); -extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, +extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,  		enum dma_data_direction); -extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, -		enum dma_data_direction); - +extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, +		void *cpu_addr, dma_addr_t dma_addr, size_t size, +		struct dma_attrs *attrs);  #endif /* __KERNEL__ */  #endif diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index ca51143f97f..99084431d6a 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -1,13 +1,16 @@  #ifndef __ASM_ARM_DMA_H  #define __ASM_ARM_DMA_H -#include <asm/memory.h> -  /*   * This is the maximum virtual address which can be DMA'd from.   */ -#ifndef MAX_DMA_ADDRESS -#define MAX_DMA_ADDRESS	0xffffffff +#ifndef CONFIG_ZONE_DMA +#define MAX_DMA_ADDRESS	0xffffffffUL +#else +#define MAX_DMA_ADDRESS	({ \ +	extern phys_addr_t arm_dma_zone_size; \ +	arm_dma_zone_size && arm_dma_zone_size < (0x10000000 - PAGE_OFFSET) ? \ +		(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })  #endif  #ifdef CONFIG_ISA_DMA_API @@ -16,7 +19,6 @@   * It should not be re-used except for that purpose.   */  #include <linux/spinlock.h> -#include <asm/system.h>  #include <asm/scatterlist.h>  #include <mach/isa-dma.h> @@ -31,18 +33,18 @@  #define DMA_MODE_CASCADE 0xc0  #define DMA_AUTOINIT	 0x10 -extern spinlock_t  dma_spin_lock; +extern raw_spinlock_t  dma_spin_lock;  static inline unsigned long claim_dma_lock(void)  {  	unsigned long flags; -	spin_lock_irqsave(&dma_spin_lock, flags); +	raw_spin_lock_irqsave(&dma_spin_lock, flags);  	return flags;  }  static inline void release_dma_lock(unsigned long flags)  { -	spin_unlock_irqrestore(&dma_spin_lock, flags); +	raw_spin_unlock_irqrestore(&dma_spin_lock, flags);  }  /* Clear the 'DMA Pointer Flip Flop'. @@ -103,7 +105,7 @@ extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);   */  extern void __set_dma_addr(unsigned int chan, void *addr);  #define set_dma_addr(chan, addr)				\ -	__set_dma_addr(chan, bus_to_virt(addr)) +	__set_dma_addr(chan, (void *)__bus_to_virt(addr))  /* Set the DMA byte count for this channel   * diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index cc7ef408071..6ddbe446425 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -10,6 +10,10 @@  #ifndef __ASM_PROC_DOMAIN_H  #define __ASM_PROC_DOMAIN_H +#ifndef __ASSEMBLY__ +#include <asm/barrier.h> +#endif +  /*   * Domain numbers   * @@ -45,20 +49,24 @@   */  #define DOMAIN_NOACCESS	0  #define DOMAIN_CLIENT	1 +#ifdef CONFIG_CPU_USE_DOMAINS  #define DOMAIN_MANAGER	3 +#else +#define DOMAIN_MANAGER	1 +#endif  #define domain_val(dom,type)	((type) << (2*(dom)))  #ifndef __ASSEMBLY__ -#ifdef CONFIG_MMU -#define set_domain(x)					\ -	do {						\ -	__asm__ __volatile__(				\ -	"mcr	p15, 0, %0, c3, c0	@ set domain"	\ -	  : : "r" (x));					\ -	isb();						\ -	} while (0) +#ifdef CONFIG_CPU_USE_DOMAINS +static inline void set_domain(unsigned val) +{ +	asm volatile( +	"mcr	p15, 0, %0, c3, c0	@ set domain" +	  : : "r" (val)); +	isb(); +}  #define modify_domain(dom,type)					\  	do {							\ @@ -70,9 +78,32 @@  	} while (0)  #else -#define set_domain(x)		do { } while (0) -#define modify_domain(dom,type)	do { } while (0) +static inline void set_domain(unsigned val) { } +static inline void modify_domain(unsigned dom, unsigned type)	{ }  #endif +/* + * Generate the T (user) versions of the LDR/STR and related + * instructions (inline assembly) + */ +#ifdef CONFIG_CPU_USE_DOMAINS +#define TUSER(instr)	#instr "t" +#else +#define TUSER(instr)	#instr  #endif -#endif /* !__ASSEMBLY__ */ + +#else /* __ASSEMBLY__ */ + +/* + * Generate the T (user) versions of the LDR/STR and related + * instructions + */ +#ifdef CONFIG_CPU_USE_DOMAINS +#define TUSER(instr)	instr ## t +#else +#define TUSER(instr)	instr +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* !__ASM_PROC_DOMAIN_H */ diff --git a/arch/arm/include/asm/ecard.h b/arch/arm/include/asm/ecard.h index 29f2610efc7..eaea14676d5 100644 --- a/arch/arm/include/asm/ecard.h +++ b/arch/arm/include/asm/ecard.h @@ -161,7 +161,6 @@ struct expansion_card {  	/* Private internal data */  	const char		*card_desc;	/* Card description		*/ -	CONST unsigned int	podaddr;	/* Base Linux address for card	*/  	CONST loader_t		loader;		/* loader program */  	u64			dma_mask;  }; diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h new file mode 100644 index 00000000000..0df7a2c1fc3 --- /dev/null +++ b/arch/arm/include/asm/edac.h @@ -0,0 +1,48 @@ +/* + * Copyright 2011 Calxeda, Inc. + * Based on PPC version Copyright 2007 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef ASM_EDAC_H +#define ASM_EDAC_H +/* + * ECC atomic, DMA, SMP and interrupt safe scrub function. + * Implements the per arch atomic_scrub() that EDAC use for software + * ECC scrubbing.  It reads memory and then writes back the original + * value, allowing the hardware to detect and correct memory errors. + */ +static inline void atomic_scrub(void *va, u32 size) +{ +#if __LINUX_ARM_ARCH__ >= 6 +	unsigned int *virt_addr = va; +	unsigned int temp, temp2; +	unsigned int i; + +	for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) { +		/* Very carefully read and write to memory atomically +		 * so we are interrupt, DMA and SMP safe. +		 */ +		__asm__ __volatile__("\n" +			"1:	ldrex	%0, [%2]\n" +			"	strex	%1, %0, [%2]\n" +			"	teq	%1, #0\n" +			"	bne	1b\n" +			: "=&r"(temp), "=&r"(temp2) +			: "r"(virt_addr) +			: "cc"); +	} +#endif +} + +#endif diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 8bb66bca2e3..f4b46d39b9c 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -19,8 +19,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];  typedef struct user_fp elf_fpregset_t; -#define EM_ARM	40 -  #define EF_ARM_EABI_MASK	0xff000000  #define EF_ARM_EABI_UNKNOWN	0x00000000  #define EF_ARM_EABI_VER1	0x01000000 @@ -99,6 +97,8 @@ struct elf32_hdr;  extern int elf_check_arch(const struct elf32_hdr *);  #define elf_check_arch elf_check_arch +#define vmcore_elf64_check_arch(x) (0) +  extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);  #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) @@ -106,6 +106,7 @@ struct task_struct;  int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);  #define ELF_CORE_COPY_TASK_REGS dump_task_regs +#define CORE_DUMP_USE_REGSET  #define ELF_EXEC_PAGESIZE	4096  /* This is the location that an ET_DYN program is loaded if exec'ed.  Typical @@ -127,8 +128,10 @@ struct mm_struct;  extern unsigned long arch_randomize_brk(struct mm_struct *mm);  #define arch_randomize_brk arch_randomize_brk -extern int vectors_user_mapping(void); -#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() -#define ARCH_HAS_SETUP_ADDITIONAL_PAGES +#ifdef CONFIG_MMU +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +int arch_setup_additional_pages(struct linux_binprm *, int); +#endif  #endif diff --git a/arch/arm/include/asm/emergency-restart.h b/arch/arm/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42..00000000000 --- a/arch/arm/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include <asm-generic/emergency-restart.h> - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S new file mode 100644 index 00000000000..88d61815f0c --- /dev/null +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -0,0 +1,39 @@ +#include <asm/assembler.h> + +/* + * Interrupt handling.  Preserves r7, r8, r9 + */ +	.macro	arch_irq_handler_default +	get_irqnr_preamble r6, lr +1:	get_irqnr_and_base r0, r2, r6, lr +	movne	r1, sp +	@ +	@ routine called with r0 = irq number, r1 = struct pt_regs * +	@ +	adrne	lr, BSYM(1b) +	bne	asm_do_IRQ + +#ifdef CONFIG_SMP +	/* +	 * XXX +	 * +	 * this macro assumes that irqstat (r2) and base (r6) are +	 * preserved from get_irqnr_and_base above +	 */ +	ALT_SMP(test_for_ipi r0, r2, r6, lr) +	ALT_UP_B(9997f) +	movne	r1, sp +	adrne	lr, BSYM(1b) +	bne	do_IPI +#endif +9997: +	.endm + +	.macro	arch_irq_handler, symbol_name +	.align	5 +	.global \symbol_name +\symbol_name: +	mov	r8, lr +	arch_irq_handler_default +	mov     pc, r8 +	.endm diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S deleted file mode 100644 index 3ceb85e4385..00000000000 --- a/arch/arm/include/asm/entry-macro-vic2.S +++ /dev/null @@ -1,57 +0,0 @@ -/* arch/arm/include/asm/entry-macro-vic2.S - * - * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S - * - * Copyright 2008 Openmoko, Inc. - * Copyright 2008 Simtec Electronics - *	http://armlinux.simtec.co.uk/ - *	Ben Dooks <ben@simtec.co.uk> - * - * Low-level IRQ helper macros for a device with two VICs - * - * This file is licensed under  the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. -*/ - -/* This should be included from <mach/entry-macro.S> with the necessary - * defines for virtual addresses and IRQ bases for the two vics. - * - * The code needs the following defined: - *	IRQ_VIC0_BASE	IRQ number of VIC0's first IRQ - *	IRQ_VIC1_BASE	IRQ number of VIC1's first IRQ - *	VA_VIC0		Virtual address of VIC0 - *	VA_VIC1		Virtual address of VIC1 - * - * Note, code assumes VIC0's virtual address is an ARM immediate constant - * away from VIC1. -*/ - -#include <asm/hardware/vic.h> - -	.macro	disable_fiq -	.endm - -	.macro	get_irqnr_preamble, base, tmp -	ldr	\base, =VA_VIC0 -	.endm - -	.macro	arch_ret_to_user, tmp1, tmp2 -	.endm - -	.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp - -	@ check the vic0 -	mov	\irqnr, #IRQ_VIC0_BASE + 31 -	ldr	\irqstat, [ \base, # VIC_IRQ_STATUS ] -	teq	\irqstat, #0 - -	@ otherwise try vic1 -	addeq	\tmp, \base, #(VA_VIC1 - VA_VIC0) -	addeq	\irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE) -	ldreq	\irqstat, [ \tmp, # VIC_IRQ_STATUS ] -	teqeq	\irqstat, #0 - -	clzne	\irqstat, \irqstat -	subne	\irqnr, \irqnr, \irqstat -	.endm diff --git a/arch/arm/include/asm/errno.h b/arch/arm/include/asm/errno.h deleted file mode 100644 index 6e60f0612bb..00000000000 --- a/arch/arm/include/asm/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ARM_ERRNO_H -#define _ARM_ERRNO_H - -#include <asm-generic/errno.h> - -#endif diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h new file mode 100644 index 00000000000..5abaf5bbd98 --- /dev/null +++ b/arch/arm/include/asm/exception.h @@ -0,0 +1,19 @@ +/* + * Annotations for marking C functions as exception handlers. + * + * These should only be used for C functions that are called from the low + * level exception entry code and not any intervening C code. + */ +#ifndef __ASM_ARM_EXCEPTION_H +#define __ASM_ARM_EXCEPTION_H + +#include <linux/ftrace.h> + +#define __exception	__attribute__((section(".exception.text"))) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#define __exception_irq_entry	__irq_entry +#else +#define __exception_irq_entry	__exception +#endif + +#endif /* __ASM_ARM_EXCEPTION_H */ diff --git a/arch/arm/include/asm/fcntl.h b/arch/arm/include/asm/fcntl.h deleted file mode 100644 index a80b6607b2e..00000000000 --- a/arch/arm/include/asm/fcntl.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _ARM_FCNTL_H -#define _ARM_FCNTL_H - -#define O_DIRECTORY	 040000	/* must be a directory */ -#define O_NOFOLLOW	0100000	/* don't follow links */ -#define O_DIRECT	0200000	/* direct disk access hint - currently ignored */ -#define O_LARGEFILE	0400000 - -#include <asm-generic/fcntl.h> - -#endif diff --git a/arch/arm/include/asm/fiq.h b/arch/arm/include/asm/fiq.h index 2242ce22ec6..d493d0b742a 100644 --- a/arch/arm/include/asm/fiq.h +++ b/arch/arm/include/asm/fiq.h @@ -4,6 +4,13 @@   * Support for FIQ on ARM architectures.   * Written by Philip Blundell <philb@gnu.org>, 1998   * Re-written by Russell King + * + * NOTE: The FIQ mode registers are not magically preserved across + * suspend/resume. + * + * Drivers which require these registers to be preserved across power + * management operations must implement appropriate suspend/resume handlers to + * save and restore them.   */  #ifndef __ASM_FIQ_H @@ -29,9 +36,21 @@ struct fiq_handler {  extern int claim_fiq(struct fiq_handler *f);  extern void release_fiq(struct fiq_handler *f);  extern void set_fiq_handler(void *start, unsigned int length); -extern void set_fiq_regs(struct pt_regs *regs); -extern void get_fiq_regs(struct pt_regs *regs);  extern void enable_fiq(int fiq);  extern void disable_fiq(int fiq); +/* helpers defined in fiqasm.S: */ +extern void __set_fiq_regs(unsigned long const *regs); +extern void __get_fiq_regs(unsigned long *regs); + +static inline void set_fiq_regs(struct pt_regs const *regs) +{ +	__set_fiq_regs(®s->ARM_r8); +} + +static inline void get_fiq_regs(struct pt_regs *regs) +{ +	__get_fiq_regs(®s->ARM_r8); +} +  #endif diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h new file mode 100644 index 00000000000..2c9f10df756 --- /dev/null +++ b/arch/arm/include/asm/firmware.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2012 Samsung Electronics. + * Kyungmin Park <kyungmin.park@samsung.com> + * Tomasz Figa <t.figa@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_FIRMWARE_H +#define __ASM_ARM_FIRMWARE_H + +#include <linux/bug.h> + +/* + * struct firmware_ops + * + * A structure to specify available firmware operations. + * + * A filled up structure can be registered with register_firmware_ops(). + */ +struct firmware_ops { +	/* +	 * Inform the firmware we intend to enter CPU idle mode +	 */ +	int (*prepare_idle)(void); +	/* +	 * Enters CPU idle mode +	 */ +	int (*do_idle)(void); +	/* +	 * Sets boot address of specified physical CPU +	 */ +	int (*set_cpu_boot_addr)(int cpu, unsigned long boot_addr); +	/* +	 * Boots specified physical CPU +	 */ +	int (*cpu_boot)(int cpu); +	/* +	 * Initializes L2 cache +	 */ +	int (*l2x0_init)(void); +}; + +/* Global pointer for current firmware_ops structure, can't be NULL. */ +extern const struct firmware_ops *firmware_ops; + +/* + * call_firmware_op(op, ...) + * + * Checks if firmware operation is present and calls it, + * otherwise returns -ENOSYS + */ +#define call_firmware_op(op, ...)					\ +	((firmware_ops->op) ? firmware_ops->op(__VA_ARGS__) : (-ENOSYS)) + +/* + * register_firmware_ops(ops) + * + * A function to register platform firmware_ops struct. + */ +static inline void register_firmware_ops(const struct firmware_ops *ops) +{ +	BUG_ON(!ops); + +	firmware_ops = ops; +} + +#endif diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h index bbae919bceb..74124b0d0d7 100644 --- a/arch/arm/include/asm/fixmap.h +++ b/arch/arm/include/asm/fixmap.h @@ -1,24 +1,11 @@  #ifndef _ASM_FIXMAP_H  #define _ASM_FIXMAP_H -/* - * Nothing too fancy for now. - * - * On ARM we already have well known fixed virtual addresses imposed by - * the architecture such as the vector page which is located at 0xffff0000, - * therefore a second level page table is already allocated covering - * 0xfff00000 upwards. - * - * The cache flushing code in proc-xscale.S uses the virtual area between - * 0xfffe0000 and 0xfffeffff. - */ - -#define FIXADDR_START		0xfff00000UL -#define FIXADDR_TOP		0xfffe0000UL +#define FIXADDR_START		0xffc00000UL +#define FIXADDR_TOP		0xffe00000UL  #define FIXADDR_SIZE		(FIXADDR_TOP - FIXADDR_START) -#define FIX_KMAP_BEGIN		0 -#define FIX_KMAP_END		(FIXADDR_SIZE >> PAGE_SHIFT) +#define FIX_KMAP_NR_PTES	(FIXADDR_SIZE >> PAGE_SHIFT)  #define __fix_to_virt(x)	(FIXADDR_START + ((x) << PAGE_SHIFT))  #define __virt_to_fix(x)	(((x) - FIXADDR_START) >> PAGE_SHIFT) @@ -27,7 +14,7 @@ extern void __this_fixmap_does_not_exist(void);  static inline unsigned long fix_to_virt(const unsigned int idx)  { -	if (idx >= FIX_KMAP_END) +	if (idx >= FIX_KMAP_NR_PTES)  		__this_fixmap_does_not_exist();  	return __fix_to_virt(idx);  } diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h index 59426a4595c..e847d23351e 100644 --- a/arch/arm/include/asm/flat.h +++ b/arch/arm/include/asm/flat.h @@ -8,7 +8,7 @@  #define	flat_argvp_envp_on_stack()		1  #define	flat_old_ram_flag(flags)		(flags)  #define	flat_reloc_valid(reloc, size)		((reloc) <= (size)) -#define	flat_get_addr_from_rp(rp, relval, flags, persistent) get_unaligned(rp) +#define	flat_get_addr_from_rp(rp, relval, flags, persistent) ((void)persistent,get_unaligned(rp))  #define	flat_put_addr_at_rp(rp, val, relval)	put_unaligned(val,rp)  #define	flat_get_relocate_addr(rel)		(rel)  #define	flat_set_persistent(relval, p)		0 diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h index c9f03eccc9d..f4882553fbb 100644 --- a/arch/arm/include/asm/floppy.h +++ b/arch/arm/include/asm/floppy.h @@ -25,7 +25,7 @@  #define fd_inb(port)		inb((port))  #define fd_request_irq()	request_irq(IRQ_FLOPPYDISK,floppy_interrupt,\ -					    IRQF_DISABLED,"floppy",NULL) +					    0,"floppy",NULL)  #define fd_free_irq()		free_irq(IRQ_FLOPPYDISK,NULL)  #define fd_disable_irq()	disable_irq(IRQ_FLOPPYDISK)  #define fd_enable_irq()		enable_irq(IRQ_FLOPPYDISK) diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h new file mode 100644 index 00000000000..de535474692 --- /dev/null +++ b/arch/arm/include/asm/fncpy.h @@ -0,0 +1,94 @@ +/* + * arch/arm/include/asm/fncpy.h - helper macros for function body copying + * + * Copyright (C) 2011 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * These macros are intended for use when there is a need to copy a low-level + * function body into special memory. + * + * For example, when reconfiguring the SDRAM controller, the code doing the + * reconfiguration may need to run from SRAM. + * + * NOTE: that the copied function body must be entirely self-contained and + * position-independent in order for this to work properly. + * + * NOTE: in order for embedded literals and data to get referenced correctly, + * the alignment of functions must be preserved when copying.  To ensure this, + * the source and destination addresses for fncpy() must be aligned to a + * multiple of 8 bytes: you will be get a BUG() if this condition is not met. + * You will typically need a ".align 3" directive in the assembler where the + * function to be copied is defined, and ensure that your allocator for the + * destination buffer returns 8-byte-aligned pointers. + * + * Typical usage example: + * + * extern int f(args); + * extern uint32_t size_of_f; + * int (*copied_f)(args); + * void *sram_buffer; + * + * copied_f = fncpy(sram_buffer, &f, size_of_f); + * + * ... later, call the function: ... + * + * copied_f(args); + * + * The size of the function to be copied can't be determined from C: + * this must be determined by other means, such as adding assmbler directives + * in the file where f is defined. + */ + +#ifndef __ASM_FNCPY_H +#define __ASM_FNCPY_H + +#include <linux/types.h> +#include <linux/string.h> + +#include <asm/bug.h> +#include <asm/cacheflush.h> + +/* + * Minimum alignment requirement for the source and destination addresses + * for function copying. + */ +#define FNCPY_ALIGN 8 + +#define fncpy(dest_buf, funcp, size) ({					\ +	uintptr_t __funcp_address;					\ +	typeof(funcp) __result;						\ +									\ +	asm("" : "=r" (__funcp_address) : "0" (funcp));			\ +									\ +	/*								\ +	 * Ensure alignment of source and destination addresses,	\ +	 * disregarding the function's Thumb bit:			\ +	 */								\ +	BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) ||		\ +		(__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1)));	\ +									\ +	memcpy(dest_buf, (void const *)(__funcp_address & ~1), size);	\ +	flush_icache_range((unsigned long)(dest_buf),			\ +		(unsigned long)(dest_buf) + (size));			\ +									\ +	asm("" : "=r" (__result)					\ +		: "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1)));	\ +									\ +	__result;							\ +}) + +#endif /* !__ASM_FNCPY_H */ diff --git a/arch/arm/include/asm/fpstate.h b/arch/arm/include/asm/fpstate.h index ee5e03efc1b..3ad4c10d0d8 100644 --- a/arch/arm/include/asm/fpstate.h +++ b/arch/arm/include/asm/fpstate.h @@ -18,7 +18,7 @@   * VFP storage area has:   *  - FPEXC, FPSCR, FPINST and FPINST2.   *  - 16 or 32 double precision data registers - *  - an implementation-dependant word of state for FLDMX/FSTMX (pre-ARMv6) + *  - an implementation-dependent word of state for FLDMX/FSTMX (pre-ARMv6)   *    *  FPEXC will always be non-zero once the VFP has been used in this process.   */ diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index f89515adac6..39eb16b0066 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -52,15 +52,7 @@ extern inline void *return_address(unsigned int level)  #endif -#define HAVE_ARCH_CALLER_ADDR - -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 ((unsigned long)return_address(1)) -#define CALLER_ADDR2 ((unsigned long)return_address(2)) -#define CALLER_ADDR3 ((unsigned long)return_address(3)) -#define CALLER_ADDR4 ((unsigned long)return_address(4)) -#define CALLER_ADDR5 ((unsigned long)return_address(5)) -#define CALLER_ADDR6 ((unsigned long)return_address(6)) +#define ftrace_return_address(n) return_address(n)  #endif /* ifndef __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 540a044153a..53e69dae796 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -3,68 +3,144 @@  #ifdef __KERNEL__ -#ifdef CONFIG_SMP - -#include <asm-generic/futex.h> - -#else /* !SMP, we can work around lack of atomic ops by disabling preemption */ -  #include <linux/futex.h> -#include <linux/preempt.h>  #include <linux/uaccess.h>  #include <asm/errno.h> -#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)	\ -	__asm__ __volatile__(					\ -	"1:	ldrt	%1, [%2]\n"				\ -	"	" insn "\n"					\ -	"2:	strt	%0, [%2]\n"				\ -	"	mov	%0, #0\n"				\ +#define __futex_atomic_ex_table(err_reg)			\  	"3:\n"							\  	"	.pushsection __ex_table,\"a\"\n"		\  	"	.align	3\n"					\  	"	.long	1b, 4f, 2b, 4f\n"			\  	"	.popsection\n"					\  	"	.pushsection .fixup,\"ax\"\n"			\ -	"4:	mov	%0, %4\n"				\ +	"	.align	2\n"					\ +	"4:	mov	%0, " err_reg "\n"			\  	"	b	3b\n"					\ -	"	.popsection"					\ -	: "=&r" (ret), "=&r" (oldval)				\ +	"	.popsection" + +#ifdef CONFIG_SMP + +#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\ +	smp_mb();						\ +	prefetchw(uaddr);					\ +	__asm__ __volatile__(					\ +	"1:	ldrex	%1, [%3]\n"				\ +	"	" insn "\n"					\ +	"2:	strex	%2, %0, [%3]\n"				\ +	"	teq	%2, #0\n"				\ +	"	bne	1b\n"					\ +	"	mov	%0, #0\n"				\ +	__futex_atomic_ex_table("%5")				\ +	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\ +	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\ +	: "cc", "memory") + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +			      u32 oldval, u32 newval) +{ +	int ret; +	u32 val; + +	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +		return -EFAULT; + +	smp_mb(); +	/* Prefetching cannot fault */ +	prefetchw(uaddr); +	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" +	"1:	ldrex	%1, [%4]\n" +	"	teq	%1, %2\n" +	"	ite	eq	@ explicit IT needed for the 2b label\n" +	"2:	strexeq	%0, %3, [%4]\n" +	"	movne	%0, #0\n" +	"	teq	%0, #0\n" +	"	bne	1b\n" +	__futex_atomic_ex_table("%5") +	: "=&r" (ret), "=&r" (val) +	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) +	: "cc", "memory"); +	smp_mb(); + +	*uval = val; +	return ret; +} + +#else /* !SMP, we can work around lack of atomic ops by disabling preemption */ + +#include <linux/preempt.h> +#include <asm/domain.h> + +#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\ +	__asm__ __volatile__(					\ +	"1:	" TUSER(ldr) "	%1, [%3]\n"			\ +	"	" insn "\n"					\ +	"2:	" TUSER(str) "	%0, [%3]\n"			\ +	"	mov	%0, #0\n"				\ +	__futex_atomic_ex_table("%5")				\ +	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\  	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\  	: "cc", "memory")  static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, +			      u32 oldval, u32 newval) +{ +	int ret = 0; +	u32 val; + +	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) +		return -EFAULT; + +	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" +	"1:	" TUSER(ldr) "	%1, [%4]\n" +	"	teq	%1, %2\n" +	"	it	eq	@ explicit IT needed for the 2b label\n" +	"2:	" TUSER(streq) "	%3, [%4]\n" +	__futex_atomic_ex_table("%5") +	: "+r" (ret), "=&r" (val) +	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) +	: "cc", "memory"); + +	*uval = val; +	return ret; +} + +#endif /* !SMP */ + +static inline int +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)  {  	int op = (encoded_op >> 28) & 7;  	int cmp = (encoded_op >> 24) & 15;  	int oparg = (encoded_op << 8) >> 20;  	int cmparg = (encoded_op << 20) >> 20; -	int oldval = 0, ret; +	int oldval = 0, ret, tmp;  	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))  		oparg = 1 << oparg; -	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) +	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))  		return -EFAULT;  	pagefault_disable();	/* implies preempt_disable() */  	switch (op) {  	case FUTEX_OP_SET: -		__futex_atomic_op("mov	%0, %3", ret, oldval, uaddr, oparg); +		__futex_atomic_op("mov	%0, %4", ret, oldval, tmp, uaddr, oparg);  		break;  	case FUTEX_OP_ADD: -		__futex_atomic_op("add	%0, %1, %3", ret, oldval, uaddr, oparg); +		__futex_atomic_op("add	%0, %1, %4", ret, oldval, tmp, uaddr, oparg);  		break;  	case FUTEX_OP_OR: -		__futex_atomic_op("orr	%0, %1, %3", ret, oldval, uaddr, oparg); +		__futex_atomic_op("orr	%0, %1, %4", ret, oldval, tmp, uaddr, oparg);  		break;  	case FUTEX_OP_ANDN: -		__futex_atomic_op("and	%0, %1, %3", ret, oldval, uaddr, ~oparg); +		__futex_atomic_op("and	%0, %1, %4", ret, oldval, tmp, uaddr, ~oparg);  		break;  	case FUTEX_OP_XOR: -		__futex_atomic_op("eor	%0, %1, %3", ret, oldval, uaddr, oparg); +		__futex_atomic_op("eor	%0, %1, %4", ret, oldval, tmp, uaddr, oparg);  		break;  	default:  		ret = -ENOSYS; @@ -86,40 +162,5 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)  	return ret;  } -static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) -{ -	int val; - -	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) -		return -EFAULT; - -	pagefault_disable();	/* implies preempt_disable() */ - -	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" -	"1:	ldrt	%0, [%3]\n" -	"	teq	%0, %1\n" -	"	it	eq	@ explicit IT needed for the 2b label\n" -	"2:	streqt	%2, [%3]\n" -	"3:\n" -	"	.pushsection __ex_table,\"a\"\n" -	"	.align	3\n" -	"	.long	1b, 4f, 2b, 4f\n" -	"	.popsection\n" -	"	.pushsection .fixup,\"ax\"\n" -	"4:	mov	%0, %4\n" -	"	b	3b\n" -	"	.popsection" -	: "=&r" (val) -	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) -	: "cc", "memory"); - -	pagefault_enable();	/* subsumes preempt_enable() */ - -	return val; -} - -#endif /* !SMP */ -  #endif /* __KERNEL__ */  #endif /* _ASM_ARM_FUTEX_H */ diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h new file mode 100644 index 00000000000..a3c24cd5b7c --- /dev/null +++ b/arch/arm/include/asm/glue-cache.h @@ -0,0 +1,166 @@ +/* + *  arch/arm/include/asm/glue-cache.h + * + *  Copyright (C) 1999-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_CACHE_H +#define ASM_GLUE_CACHE_H + +#include <asm/glue.h> + +/* + *	Cache Model + *	=========== + */ +#undef _CACHE +#undef MULTI_CACHE + +#if defined(CONFIG_CPU_CACHE_V4) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE v4 +# endif +#endif + +#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ +    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ +    defined(CONFIG_CPU_ARM1026) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_FA526) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE fa +# endif +#endif + +#if defined(CONFIG_CPU_ARM926T) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE arm926 +# endif +#endif + +#if defined(CONFIG_CPU_ARM940T) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE arm940 +# endif +#endif + +#if defined(CONFIG_CPU_ARM946E) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE arm946 +# endif +#endif + +#if defined(CONFIG_CPU_CACHE_V4WB) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE v4wb +# endif +#endif + +#if defined(CONFIG_CPU_XSCALE) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE xscale +# endif +#endif + +#if defined(CONFIG_CPU_XSC3) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE xsc3 +# endif +#endif + +#if defined(CONFIG_CPU_MOHAWK) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE mohawk +# endif +#endif + +#if defined(CONFIG_CPU_FEROCEON) +# define MULTI_CACHE 1 +#endif + +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE v6 +# endif +#endif + +#if defined(CONFIG_CPU_V7) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE v7 +# endif +#endif + +#if defined(CONFIG_CPU_V7M) +# ifdef _CACHE +#  define MULTI_CACHE 1 +# else +#  define _CACHE nop +# endif +#endif + +#if !defined(_CACHE) && !defined(MULTI_CACHE) +#error Unknown cache maintenance model +#endif + +#ifndef __ASSEMBLER__ +static inline void nop_flush_icache_all(void) { } +static inline void nop_flush_kern_cache_all(void) { } +static inline void nop_flush_kern_cache_louis(void) { } +static inline void nop_flush_user_cache_all(void) { } +static inline void nop_flush_user_cache_range(unsigned long a, +		unsigned long b, unsigned int c) { } + +static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { } +static inline int nop_coherent_user_range(unsigned long a, +		unsigned long b) { return 0; } +static inline void nop_flush_kern_dcache_area(void *a, size_t s) { } + +static inline void nop_dma_flush_range(const void *a, const void *b) { } + +static inline void nop_dma_map_area(const void *s, size_t l, int f) { } +static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } +#endif + +#ifndef MULTI_CACHE +#define __cpuc_flush_icache_all		__glue(_CACHE,_flush_icache_all) +#define __cpuc_flush_kern_all		__glue(_CACHE,_flush_kern_cache_all) +#define __cpuc_flush_kern_louis		__glue(_CACHE,_flush_kern_cache_louis) +#define __cpuc_flush_user_all		__glue(_CACHE,_flush_user_cache_all) +#define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range) +#define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range) +#define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range) +#define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area) + +#define dmac_map_area			__glue(_CACHE,_dma_map_area) +#define dmac_unmap_area			__glue(_CACHE,_dma_unmap_area) +#define dmac_flush_range		__glue(_CACHE,_dma_flush_range) +#endif + +#endif diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h new file mode 100644 index 00000000000..04e18b65665 --- /dev/null +++ b/arch/arm/include/asm/glue-df.h @@ -0,0 +1,102 @@ +/* + *  arch/arm/include/asm/glue-df.h + * + *  Copyright (C) 1997-1999 Russell King + *  Copyright (C) 2000-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_DF_H +#define ASM_GLUE_DF_H + +#include <asm/glue.h> + +/* + *	Data Abort Model + *	================ + * + *	We have the following to choose from: + *	  arm7		- ARM7 style + *	  v4_early	- ARMv4 without Thumb early abort handler + *	  v4t_late	- ARMv4 with Thumb late abort handler + *	  v4t_early	- ARMv4 with Thumb early abort handler + *	  v5t_early	- ARMv5 with Thumb early abort handler + *	  v5tj_early	- ARMv5 with Thumb and Java early abort handler + *	  xscale	- ARMv5 with Thumb with Xscale extensions + *	  v6_early	- ARMv6 generic early abort handler + *	  v7_early	- ARMv7 generic early abort handler + */ +#undef CPU_DABORT_HANDLER +#undef MULTI_DABORT + +#ifdef CONFIG_CPU_ABRT_EV4 +# ifdef CPU_DABORT_HANDLER +#  define MULTI_DABORT 1 +# else +#  define CPU_DABORT_HANDLER v4_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_LV4T +# ifdef CPU_DABORT_HANDLER +#  define MULTI_DABORT 1 +# else +#  define CPU_DABORT_HANDLER v4t_late_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV4T +# ifdef CPU_DABORT_HANDLER +#  define MULTI_DABORT 1 +# else +#  define CPU_DABORT_HANDLER v4t_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV5T +# ifdef CPU_DABORT_HANDLER +#  define MULTI_DABORT 1 +# else +#  define CPU_DABORT_HANDLER v5t_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV5TJ +# ifdef CPU_DABORT_HANDLER +#  define MULTI_DABORT 1 +# else +#  define CPU_DABORT_HANDLER v5tj_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV6 +# ifdef CPU_DABORT_HANDLER +#  define MULTI_DABORT 1 +# else +#  define CPU_DABORT_HANDLER v6_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_EV7 +# ifdef CPU_DABORT_HANDLER +#  define MULTI_DABORT 1 +# else +#  define CPU_DABORT_HANDLER v7_early_abort +# endif +#endif + +#ifdef CONFIG_CPU_ABRT_NOMMU +# ifdef CPU_DABORT_HANDLER +#  define MULTI_DABORT 1 +# else +#  define CPU_DABORT_HANDLER nommu_early_abort +# endif +#endif + +#ifndef CPU_DABORT_HANDLER +#error Unknown data abort handler type +#endif + +#endif diff --git a/arch/arm/include/asm/glue-pf.h b/arch/arm/include/asm/glue-pf.h new file mode 100644 index 00000000000..d385f37c13f --- /dev/null +++ b/arch/arm/include/asm/glue-pf.h @@ -0,0 +1,57 @@ +/* + *  arch/arm/include/asm/glue-pf.h + * + *  Copyright (C) 1997-1999 Russell King + *  Copyright (C) 2000-2002 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_PF_H +#define ASM_GLUE_PF_H + +#include <asm/glue.h> + +/* + *	Prefetch Abort Model + *	================ + * + *	We have the following to choose from: + *	  legacy	- no IFSR, no IFAR + *	  v6		- ARMv6: IFSR, no IFAR + *	  v7		- ARMv7: IFSR and IFAR + */ + +#undef CPU_PABORT_HANDLER +#undef MULTI_PABORT + +#ifdef CONFIG_CPU_PABRT_LEGACY +# ifdef CPU_PABORT_HANDLER +#  define MULTI_PABORT 1 +# else +#  define CPU_PABORT_HANDLER legacy_pabort +# endif +#endif + +#ifdef CONFIG_CPU_PABRT_V6 +# ifdef CPU_PABORT_HANDLER +#  define MULTI_PABORT 1 +# else +#  define CPU_PABORT_HANDLER v6_pabort +# endif +#endif + +#ifdef CONFIG_CPU_PABRT_V7 +# ifdef CPU_PABORT_HANDLER +#  define MULTI_PABORT 1 +# else +#  define CPU_PABORT_HANDLER v7_pabort +# endif +#endif + +#ifndef CPU_PABORT_HANDLER +#error Unknown prefetch abort handler type +#endif + +#endif diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h new file mode 100644 index 00000000000..74a8b84f3cb --- /dev/null +++ b/arch/arm/include/asm/glue-proc.h @@ -0,0 +1,264 @@ +/* + *  arch/arm/include/asm/glue-proc.h + * + *  Copyright (C) 1997-1999 Russell King + *  Copyright (C) 2000 Deep Blue Solutions Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASM_GLUE_PROC_H +#define ASM_GLUE_PROC_H + +#include <asm/glue.h> + +/* + * Work out if we need multiple CPU support + */ +#undef MULTI_CPU +#undef CPU_NAME + +/* + * CPU_NAME - the prefix for CPU related functions + */ + +#ifdef CONFIG_CPU_ARM7TDMI +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm7tdmi +# endif +#endif + +#ifdef CONFIG_CPU_ARM720T +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm720 +# endif +#endif + +#ifdef CONFIG_CPU_ARM740T +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm740 +# endif +#endif + +#ifdef CONFIG_CPU_ARM9TDMI +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm9tdmi +# endif +#endif + +#ifdef CONFIG_CPU_ARM920T +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm920 +# endif +#endif + +#ifdef CONFIG_CPU_ARM922T +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm922 +# endif +#endif + +#ifdef CONFIG_CPU_FA526 +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_fa526 +# endif +#endif + +#ifdef CONFIG_CPU_ARM925T +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm925 +# endif +#endif + +#ifdef CONFIG_CPU_ARM926T +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm926 +# endif +#endif + +#ifdef CONFIG_CPU_ARM940T +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm940 +# endif +#endif + +#ifdef CONFIG_CPU_ARM946E +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm946 +# endif +#endif + +#ifdef CONFIG_CPU_SA110 +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_sa110 +# endif +#endif + +#ifdef CONFIG_CPU_SA1100 +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_sa1100 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1020 +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm1020 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1020E +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm1020e +# endif +#endif + +#ifdef CONFIG_CPU_ARM1022 +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm1022 +# endif +#endif + +#ifdef CONFIG_CPU_ARM1026 +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_arm1026 +# endif +#endif + +#ifdef CONFIG_CPU_XSCALE +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_xscale +# endif +#endif + +#ifdef CONFIG_CPU_XSC3 +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_xsc3 +# endif +#endif + +#ifdef CONFIG_CPU_MOHAWK +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_mohawk +# endif +#endif + +#ifdef CONFIG_CPU_FEROCEON +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_feroceon +# endif +#endif + +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_v6 +# endif +#endif + +#ifdef CONFIG_CPU_V7 +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_v7 +# endif +#endif + +#ifdef CONFIG_CPU_V7M +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_v7m +# endif +#endif + +#ifdef CONFIG_CPU_PJ4B +# ifdef CPU_NAME +#  undef  MULTI_CPU +#  define MULTI_CPU +# else +#  define CPU_NAME cpu_pj4b +# endif +#endif + +#ifndef MULTI_CPU +#define cpu_proc_init			__glue(CPU_NAME,_proc_init) +#define cpu_proc_fin			__glue(CPU_NAME,_proc_fin) +#define cpu_reset			__glue(CPU_NAME,_reset) +#define cpu_do_idle			__glue(CPU_NAME,_do_idle) +#define cpu_dcache_clean_area		__glue(CPU_NAME,_dcache_clean_area) +#define cpu_do_switch_mm		__glue(CPU_NAME,_switch_mm) +#define cpu_set_pte_ext			__glue(CPU_NAME,_set_pte_ext) +#define cpu_suspend_size		__glue(CPU_NAME,_suspend_size) +#define cpu_do_suspend			__glue(CPU_NAME,_do_suspend) +#define cpu_do_resume			__glue(CPU_NAME,_do_resume) +#endif + +#endif diff --git a/arch/arm/include/asm/glue.h b/arch/arm/include/asm/glue.h index 234a3fc1c78..fbf71d75ec8 100644 --- a/arch/arm/include/asm/glue.h +++ b/arch/arm/include/asm/glue.h @@ -10,12 +10,11 @@   *   *  This file provides the glue to stick the processor-specific bits   *  into the kernel in an efficient manner.  The idea is to use branches - *  when we're only targetting one class of TLB, or indirect calls - *  when we're targetting multiple classes of TLBs. + *  when we're only targeting one class of TLB, or indirect calls + *  when we're targeting multiple classes of TLBs.   */  #ifdef __KERNEL__ -  #ifdef __STDC__  #define ____glue(name,fn)	name##fn  #else @@ -23,141 +22,4 @@  #endif  #define __glue(name,fn)		____glue(name,fn) - - -/* - *	Data Abort Model - *	================ - * - *	We have the following to choose from: - *	  arm6          - ARM6 style - *	  arm7		- ARM7 style - *	  v4_early	- ARMv4 without Thumb early abort handler - *	  v4t_late	- ARMv4 with Thumb late abort handler - *	  v4t_early	- ARMv4 with Thumb early abort handler - *	  v5tej_early	- ARMv5 with Thumb and Java early abort handler - *	  xscale	- ARMv5 with Thumb with Xscale extensions - *	  v6_early	- ARMv6 generic early abort handler - *	  v7_early	- ARMv7 generic early abort handler - */ -#undef CPU_DABORT_HANDLER -#undef MULTI_DABORT - -#if defined(CONFIG_CPU_ARM610) -# ifdef CPU_DABORT_HANDLER -#  define MULTI_DABORT 1 -# else -#  define CPU_DABORT_HANDLER cpu_arm6_data_abort -# endif -#endif - -#if defined(CONFIG_CPU_ARM710) -# ifdef CPU_DABORT_HANDLER -#  define MULTI_DABORT 1 -# else -#  define CPU_DABORT_HANDLER cpu_arm7_data_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_LV4T -# ifdef CPU_DABORT_HANDLER -#  define MULTI_DABORT 1 -# else -#  define CPU_DABORT_HANDLER v4t_late_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV4 -# ifdef CPU_DABORT_HANDLER -#  define MULTI_DABORT 1 -# else -#  define CPU_DABORT_HANDLER v4_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV4T -# ifdef CPU_DABORT_HANDLER -#  define MULTI_DABORT 1 -# else -#  define CPU_DABORT_HANDLER v4t_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV5TJ -# ifdef CPU_DABORT_HANDLER -#  define MULTI_DABORT 1 -# else -#  define CPU_DABORT_HANDLER v5tj_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV5T -# ifdef CPU_DABORT_HANDLER -#  define MULTI_DABORT 1 -# else -#  define CPU_DABORT_HANDLER v5t_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV6 -# ifdef CPU_DABORT_HANDLER -#  define MULTI_DABORT 1 -# else -#  define CPU_DABORT_HANDLER v6_early_abort -# endif -#endif - -#ifdef CONFIG_CPU_ABRT_EV7 -# ifdef CPU_DABORT_HANDLER -#  define MULTI_DABORT 1 -# else -#  define CPU_DABORT_HANDLER v7_early_abort -# endif -#endif - -#ifndef CPU_DABORT_HANDLER -#error Unknown data abort handler type -#endif - -/* - *	Prefetch Abort Model - *	================ - * - *	We have the following to choose from: - *	  legacy	- no IFSR, no IFAR - *	  v6		- ARMv6: IFSR, no IFAR - *	  v7		- ARMv7: IFSR and IFAR - */ - -#undef CPU_PABORT_HANDLER -#undef MULTI_PABORT - -#ifdef CONFIG_CPU_PABRT_LEGACY -# ifdef CPU_PABORT_HANDLER -#  define MULTI_PABORT 1 -# else -#  define CPU_PABORT_HANDLER legacy_pabort -# endif -#endif - -#ifdef CONFIG_CPU_PABRT_V6 -# ifdef CPU_PABORT_HANDLER -#  define MULTI_PABORT 1 -# else -#  define CPU_PABORT_HANDLER v6_pabort -# endif -#endif - -#ifdef CONFIG_CPU_PABRT_V7 -# ifdef CPU_PABORT_HANDLER -#  define MULTI_PABORT 1 -# else -#  define CPU_PABORT_HANDLER v7_pabort -# endif -#endif - -#ifndef CPU_PABORT_HANDLER -#error Unknown prefetch abort handler type -#endif -  #endif diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h index 166a7a3e284..477e0206e01 100644 --- a/arch/arm/include/asm/gpio.h +++ b/arch/arm/include/asm/gpio.h @@ -1,7 +1,32 @@  #ifndef _ARCH_ARM_GPIO_H  #define _ARCH_ARM_GPIO_H +#if CONFIG_ARCH_NR_GPIO > 0 +#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO +#endif +  /* not all ARM platforms necessarily support this API ... */ +#ifdef CONFIG_NEED_MACH_GPIO_H  #include <mach/gpio.h> +#endif + +#ifndef __ARM_GPIOLIB_COMPLEX +/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */ +#include <asm-generic/gpio.h> + +/* The trivial gpiolib dispatchers */ +#define gpio_get_value  __gpio_get_value +#define gpio_set_value  __gpio_set_value +#define gpio_cansleep   __gpio_cansleep +#endif + +/* + * Provide a default gpio_to_irq() which should satisfy every case. + * However, some platforms want to do this differently, so allow them + * to override it. + */ +#ifndef gpio_to_irq +#define gpio_to_irq	__gpio_to_irq +#endif  #endif /* _ARCH_ARM_GPIO_H */ diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 6d7485aff95..fe3ea776dc3 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -5,29 +5,27 @@  #include <linux/threads.h>  #include <asm/irq.h> +#define NR_IPI	8 +  typedef struct {  	unsigned int __softirq_pending; -	unsigned int local_timer_irqs; +#ifdef CONFIG_SMP +	unsigned int ipi_irqs[NR_IPI]; +#endif  } ____cacheline_aligned irq_cpustat_t;  #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */ -#if NR_IRQS > 512 -#define HARDIRQ_BITS	10 -#elif NR_IRQS > 256 -#define HARDIRQ_BITS	9 +#define __inc_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)++ +#define __get_irq_stat(cpu, member)	__IRQ_STAT(cpu, member) + +#ifdef CONFIG_SMP +u64 smp_irq_stat_cpu(unsigned int cpu);  #else -#define HARDIRQ_BITS	8 +#define smp_irq_stat_cpu(cpu)	0  #endif -/* - * The hardirq mask has to be large enough to have space - * for potentially all IRQ sources in the system nesting - * on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif +#define arch_irq_stat_cpu	smp_irq_stat_cpu  #define __ARCH_IRQ_EXIT_IRQS_DISABLED	1 diff --git a/arch/arm/include/asm/hardware/arm_timer.h b/arch/arm/include/asm/hardware/arm_timer.h index c0f4e7bf22d..d6030ff599d 100644 --- a/arch/arm/include/asm/hardware/arm_timer.h +++ b/arch/arm/include/asm/hardware/arm_timer.h @@ -9,7 +9,12 @@   *   * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview   * can have 16-bit or 32-bit selectable via a bit in the control register. + * + * Every SP804 contains two identical timers.   */ +#define TIMER_1_BASE	0x00 +#define TIMER_2_BASE	0x20 +  #define TIMER_LOAD	0x00			/* ACVR rw */  #define TIMER_VALUE	0x04			/* ACVR ro */  #define TIMER_CTRL	0x08			/* ACVR rw */ diff --git a/arch/arm/include/asm/hardware/cache-feroceon-l2.h b/arch/arm/include/asm/hardware/cache-feroceon-l2.h new file mode 100644 index 00000000000..12e1588dc4f --- /dev/null +++ b/arch/arm/include/asm/hardware/cache-feroceon-l2.h @@ -0,0 +1,13 @@ +/* + * arch/arm/include/asm/hardware/cache-feroceon-l2.h + * + * Copyright (C) 2008 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +extern void __init feroceon_l2_init(int l2_wt_override); +extern int __init feroceon_of_init(void); + diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index cc42d5fdee1..3a5ec1c2565 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -20,12 +20,14 @@  #ifndef __ASM_ARM_HARDWARE_L2X0_H  #define __ASM_ARM_HARDWARE_L2X0_H +#include <linux/errno.h> +  #define L2X0_CACHE_ID			0x000  #define L2X0_CACHE_TYPE			0x004  #define L2X0_CTRL			0x100  #define L2X0_AUX_CTRL			0x104 -#define L2X0_TAG_LATENCY_CTRL		0x108 -#define L2X0_DATA_LATENCY_CTRL		0x10C +#define L310_TAG_LATENCY_CTRL		0x108 +#define L310_DATA_LATENCY_CTRL		0x10C  #define L2X0_EVENT_CNT_CTRL		0x200  #define L2X0_EVENT_CNT1_CFG		0x204  #define L2X0_EVENT_CNT0_CFG		0x208 @@ -36,6 +38,7 @@  #define L2X0_RAW_INTR_STAT		0x21C  #define L2X0_INTR_CLEAR			0x220  #define L2X0_CACHE_SYNC			0x730 +#define L2X0_DUMMY_REG			0x740  #define L2X0_INV_LINE_PA		0x770  #define L2X0_INV_WAY			0x77C  #define L2X0_CLEAN_LINE_PA		0x7B0 @@ -44,25 +47,135 @@  #define L2X0_CLEAN_INV_LINE_PA		0x7F0  #define L2X0_CLEAN_INV_LINE_IDX		0x7F8  #define L2X0_CLEAN_INV_WAY		0x7FC -#define L2X0_LOCKDOWN_WAY_D		0x900 -#define L2X0_LOCKDOWN_WAY_I		0x904 +/* + * The lockdown registers repeat 8 times for L310, the L210 has only one + * D and one I lockdown register at 0x0900 and 0x0904. + */ +#define L2X0_LOCKDOWN_WAY_D_BASE	0x900 +#define L2X0_LOCKDOWN_WAY_I_BASE	0x904 +#define L2X0_LOCKDOWN_STRIDE		0x08 +#define L310_ADDR_FILTER_START		0xC00 +#define L310_ADDR_FILTER_END		0xC04  #define L2X0_TEST_OPERATION		0xF00  #define L2X0_LINE_DATA			0xF10  #define L2X0_LINE_TAG			0xF30  #define L2X0_DEBUG_CTRL			0xF40 -#define L2X0_PREFETCH_CTRL		0xF60 -#define L2X0_POWER_CTRL			0xF80 -#define   L2X0_DYNAMIC_CLK_GATING_EN	(1 << 1) -#define   L2X0_STNDBY_MODE_EN		(1 << 0) +#define L310_PREFETCH_CTRL		0xF60 +#define L310_POWER_CTRL			0xF80 +#define   L310_DYNAMIC_CLK_GATING_EN	(1 << 1) +#define   L310_STNDBY_MODE_EN		(1 << 0)  /* Registers shifts and masks */  #define L2X0_CACHE_ID_PART_MASK		(0xf << 6)  #define L2X0_CACHE_ID_PART_L210		(1 << 6) +#define L2X0_CACHE_ID_PART_L220		(2 << 6)  #define L2X0_CACHE_ID_PART_L310		(3 << 6) -#define L2X0_AUX_CTRL_WAY_SIZE_MASK	(0x3 << 17) +#define L2X0_CACHE_ID_RTL_MASK          0x3f +#define L210_CACHE_ID_RTL_R0P2_02	0x00 +#define L210_CACHE_ID_RTL_R0P1		0x01 +#define L210_CACHE_ID_RTL_R0P2_01	0x02 +#define L210_CACHE_ID_RTL_R0P3		0x03 +#define L210_CACHE_ID_RTL_R0P4		0x0b +#define L210_CACHE_ID_RTL_R0P5		0x0f +#define L220_CACHE_ID_RTL_R1P7_01REL0	0x06 +#define L310_CACHE_ID_RTL_R0P0		0x00 +#define L310_CACHE_ID_RTL_R1P0		0x02 +#define L310_CACHE_ID_RTL_R2P0		0x04 +#define L310_CACHE_ID_RTL_R3P0		0x05 +#define L310_CACHE_ID_RTL_R3P1		0x06 +#define L310_CACHE_ID_RTL_R3P1_50REL0	0x07 +#define L310_CACHE_ID_RTL_R3P2		0x08 +#define L310_CACHE_ID_RTL_R3P3		0x09 + +/* L2C auxiliary control register - bits common to L2C-210/220/310 */ +#define L2C_AUX_CTRL_WAY_SIZE_SHIFT		17 +#define L2C_AUX_CTRL_WAY_SIZE_MASK		(7 << 17) +#define L2C_AUX_CTRL_WAY_SIZE(n)		((n) << 17) +#define L2C_AUX_CTRL_EVTMON_ENABLE		BIT(20) +#define L2C_AUX_CTRL_PARITY_ENABLE		BIT(21) +#define L2C_AUX_CTRL_SHARED_OVERRIDE		BIT(22) +/* L2C-210/220 common bits */ +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT	0 +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK	(7 << 0) +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT	3 +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK	(7 << 3) +#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT		6 +#define L2X0_AUX_CTRL_TAG_LATENCY_MASK		(7 << 6) +#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT	9 +#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK	(7 << 9) +#define L2X0_AUX_CTRL_ASSOC_SHIFT		13 +#define L2X0_AUX_CTRL_ASSOC_MASK		(15 << 13) +/* L2C-210 specific bits */ +#define L210_AUX_CTRL_WRAP_DISABLE		BIT(12) +#define L210_AUX_CTRL_WA_OVERRIDE		BIT(23) +#define L210_AUX_CTRL_EXCLUSIVE_ABORT		BIT(24) +/* L2C-220 specific bits */ +#define L220_AUX_CTRL_EXCLUSIVE_CACHE		BIT(12) +#define L220_AUX_CTRL_FWA_SHIFT			23 +#define L220_AUX_CTRL_FWA_MASK			(3 << 23) +#define L220_AUX_CTRL_NS_LOCKDOWN		BIT(26) +#define L220_AUX_CTRL_NS_INT_CTRL		BIT(27) +/* L2C-310 specific bits */ +#define L310_AUX_CTRL_FULL_LINE_ZERO		BIT(0)	/* R2P0+ */ +#define L310_AUX_CTRL_HIGHPRIO_SO_DEV		BIT(10)	/* R2P0+ */ +#define L310_AUX_CTRL_STORE_LIMITATION		BIT(11)	/* R2P0+ */ +#define L310_AUX_CTRL_EXCLUSIVE_CACHE		BIT(12) +#define L310_AUX_CTRL_ASSOCIATIVITY_16		BIT(16) +#define L310_AUX_CTRL_CACHE_REPLACE_RR		BIT(25)	/* R2P0+ */ +#define L310_AUX_CTRL_NS_LOCKDOWN		BIT(26) +#define L310_AUX_CTRL_NS_INT_CTRL		BIT(27) +#define L310_AUX_CTRL_DATA_PREFETCH		BIT(28) +#define L310_AUX_CTRL_INSTR_PREFETCH		BIT(29) +#define L310_AUX_CTRL_EARLY_BRESP		BIT(30)	/* R2P0+ */ + +#define L310_LATENCY_CTRL_SETUP(n)		((n) << 0) +#define L310_LATENCY_CTRL_RD(n)			((n) << 4) +#define L310_LATENCY_CTRL_WR(n)			((n) << 8) + +#define L310_ADDR_FILTER_EN		1 + +#define L310_PREFETCH_CTRL_OFFSET_MASK		0x1f +#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR	BIT(23) +#define L310_PREFETCH_CTRL_PREFETCH_DROP	BIT(24) +#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP	BIT(27) +#define L310_PREFETCH_CTRL_DATA_PREFETCH	BIT(28) +#define L310_PREFETCH_CTRL_INSTR_PREFETCH	BIT(29) +#define L310_PREFETCH_CTRL_DBL_LINEFILL		BIT(30) + +#define L2X0_CTRL_EN			1 + +#define L2X0_WAY_SIZE_SHIFT		3  #ifndef __ASSEMBLY__ -extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); +extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask); +#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) +extern int l2x0_of_init(u32 aux_val, u32 aux_mask); +#else +static inline int l2x0_of_init(u32 aux_val, u32 aux_mask) +{ +	return -ENODEV; +}  #endif +struct l2x0_regs { +	unsigned long phy_base; +	unsigned long aux_ctrl; +	/* +	 * Whether the following registers need to be saved/restored +	 * depends on platform +	 */ +	unsigned long tag_latency; +	unsigned long data_latency; +	unsigned long filter_start; +	unsigned long filter_end; +	unsigned long prefetch_ctrl; +	unsigned long pwr_ctrl; +	unsigned long ctrl; +	unsigned long aux2_ctrl; +}; + +extern struct l2x0_regs l2x0_saved_regs; + +#endif /* __ASSEMBLY__ */ +  #endif diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h index 538f17ca905..295e2e40151 100644 --- a/arch/arm/include/asm/hardware/cache-tauros2.h +++ b/arch/arm/include/asm/hardware/cache-tauros2.h @@ -8,4 +8,7 @@   * warranty of any kind, whether express or implied.   */ -extern void __init tauros2_init(void); +#define CACHE_TAUROS2_PREFETCH_ON	(1 << 0) +#define CACHE_TAUROS2_LINEFILL_BURST8	(1 << 1) + +extern void __init tauros2_init(unsigned int features); diff --git a/arch/arm/include/asm/hardware/clps7111.h b/arch/arm/include/asm/hardware/clps7111.h deleted file mode 100644 index 44477225aed..00000000000 --- a/arch/arm/include/asm/hardware/clps7111.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - *  arch/arm/include/asm/hardware/clps7111.h - * - *  This file contains the hardware definitions of the CLPS7111 internal - *  registers. - * - *  Copyright (C) 2000 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA - */ -#ifndef __ASM_HARDWARE_CLPS7111_H -#define __ASM_HARDWARE_CLPS7111_H - -#define CLPS7111_PHYS_BASE	(0x80000000) - -#ifndef __ASSEMBLY__ -#define clps_readb(off)		__raw_readb(CLPS7111_BASE + (off)) -#define clps_readw(off)		__raw_readw(CLPS7111_BASE + (off)) -#define clps_readl(off)		__raw_readl(CLPS7111_BASE + (off)) -#define clps_writeb(val,off)	__raw_writeb(val, CLPS7111_BASE + (off)) -#define clps_writew(val,off)	__raw_writew(val, CLPS7111_BASE + (off)) -#define clps_writel(val,off)	__raw_writel(val, CLPS7111_BASE + (off)) -#endif - -#define PADR		(0x0000) -#define PBDR		(0x0001) -#define PDDR		(0x0003) -#define PADDR		(0x0040) -#define PBDDR		(0x0041) -#define PDDDR		(0x0043) -#define PEDR		(0x0080) -#define PEDDR		(0x00c0) -#define SYSCON1		(0x0100) -#define SYSFLG1		(0x0140) -#define MEMCFG1		(0x0180) -#define MEMCFG2		(0x01c0) -#define DRFPR		(0x0200) -#define INTSR1		(0x0240) -#define INTMR1		(0x0280) -#define LCDCON		(0x02c0) -#define TC1D            (0x0300) -#define TC2D		(0x0340) -#define RTCDR		(0x0380) -#define RTCMR		(0x03c0) -#define PMPCON		(0x0400) -#define CODR		(0x0440) -#define UARTDR1		(0x0480) -#define UBRLCR1		(0x04c0) -#define SYNCIO		(0x0500) -#define PALLSW		(0x0540) -#define PALMSW		(0x0580) -#define STFCLR		(0x05c0) -#define BLEOI		(0x0600) -#define MCEOI		(0x0640) -#define TEOI		(0x0680) -#define TC1EOI		(0x06c0) -#define TC2EOI		(0x0700) -#define RTCEOI		(0x0740) -#define UMSEOI		(0x0780) -#define COEOI		(0x07c0) -#define HALT		(0x0800) -#define STDBY		(0x0840) - -#define FBADDR		(0x1000) -#define SYSCON2		(0x1100) -#define SYSFLG2		(0x1140) -#define INTSR2		(0x1240) -#define INTMR2		(0x1280) -#define UARTDR2		(0x1480) -#define UBRLCR2		(0x14c0) -#define SS2DR		(0x1500) -#define SRXEOF		(0x1600) -#define SS2POP		(0x16c0) -#define KBDEOI		(0x1700) - -/* common bits: SYSCON1 / SYSCON2 */ -#define SYSCON_UARTEN		(1 << 8) - -#define SYSCON1_KBDSCAN(x)	((x) & 15) -#define SYSCON1_KBDSCANMASK	(15) -#define SYSCON1_TC1M		(1 << 4) -#define SYSCON1_TC1S		(1 << 5) -#define SYSCON1_TC2M		(1 << 6) -#define SYSCON1_TC2S		(1 << 7) -#define SYSCON1_UART1EN		SYSCON_UARTEN -#define SYSCON1_BZTOG		(1 << 9) -#define SYSCON1_BZMOD		(1 << 10) -#define SYSCON1_DBGEN		(1 << 11) -#define SYSCON1_LCDEN		(1 << 12) -#define SYSCON1_CDENTX		(1 << 13) -#define SYSCON1_CDENRX		(1 << 14) -#define SYSCON1_SIREN		(1 << 15) -#define SYSCON1_ADCKSEL(x)	(((x) & 3) << 16) -#define SYSCON1_ADCKSEL_MASK	(3 << 16) -#define SYSCON1_EXCKEN		(1 << 18) -#define SYSCON1_WAKEDIS		(1 << 19) -#define SYSCON1_IRTXM		(1 << 20) - -/* common bits: SYSFLG1 / SYSFLG2 */ -#define SYSFLG_UBUSY		(1 << 11) -#define SYSFLG_URXFE		(1 << 22) -#define SYSFLG_UTXFF		(1 << 23) - -#define SYSFLG1_MCDR		(1 << 0) -#define SYSFLG1_DCDET		(1 << 1) -#define SYSFLG1_WUDR		(1 << 2) -#define SYSFLG1_WUON		(1 << 3) -#define SYSFLG1_CTS		(1 << 8) -#define SYSFLG1_DSR		(1 << 9) -#define SYSFLG1_DCD		(1 << 10) -#define SYSFLG1_UBUSY		SYSFLG_UBUSY -#define SYSFLG1_NBFLG		(1 << 12) -#define SYSFLG1_RSTFLG		(1 << 13) -#define SYSFLG1_PFFLG		(1 << 14) -#define SYSFLG1_CLDFLG		(1 << 15) -#define SYSFLG1_URXFE		SYSFLG_URXFE -#define SYSFLG1_UTXFF		SYSFLG_UTXFF -#define SYSFLG1_CRXFE		(1 << 24) -#define SYSFLG1_CTXFF		(1 << 25) -#define SYSFLG1_SSIBUSY		(1 << 26) -#define SYSFLG1_ID		(1 << 29) - -#define SYSFLG2_SSRXOF		(1 << 0) -#define SYSFLG2_RESVAL		(1 << 1) -#define SYSFLG2_RESFRM		(1 << 2) -#define SYSFLG2_SS2RXFE		(1 << 3) -#define SYSFLG2_SS2TXFF		(1 << 4) -#define SYSFLG2_SS2TXUF		(1 << 5) -#define SYSFLG2_CKMODE		(1 << 6) -#define SYSFLG2_UBUSY		SYSFLG_UBUSY -#define SYSFLG2_URXFE		SYSFLG_URXFE -#define SYSFLG2_UTXFF		SYSFLG_UTXFF - -#define LCDCON_GSEN		(1 << 30) -#define LCDCON_GSMD		(1 << 31) - -#define SYSCON2_SERSEL		(1 << 0) -#define SYSCON2_KBD6		(1 << 1) -#define SYSCON2_DRAMZ		(1 << 2) -#define SYSCON2_KBWEN		(1 << 3) -#define SYSCON2_SS2TXEN		(1 << 4) -#define SYSCON2_PCCARD1		(1 << 5) -#define SYSCON2_PCCARD2		(1 << 6) -#define SYSCON2_SS2RXEN		(1 << 7) -#define SYSCON2_UART2EN		SYSCON_UARTEN -#define SYSCON2_SS2MAEN		(1 << 9) -#define SYSCON2_OSTB		(1 << 12) -#define SYSCON2_CLKENSL		(1 << 13) -#define SYSCON2_BUZFREQ		(1 << 14) - -/* common bits: UARTDR1 / UARTDR2 */ -#define UARTDR_FRMERR		(1 << 8) -#define UARTDR_PARERR		(1 << 9) -#define UARTDR_OVERR		(1 << 10) - -/* common bits: UBRLCR1 / UBRLCR2 */ -#define UBRLCR_BAUD_MASK	((1 << 12) - 1) -#define UBRLCR_BREAK		(1 << 12) -#define UBRLCR_PRTEN		(1 << 13) -#define UBRLCR_EVENPRT		(1 << 14) -#define UBRLCR_XSTOP		(1 << 15) -#define UBRLCR_FIFOEN		(1 << 16) -#define UBRLCR_WRDLEN5		(0 << 17) -#define UBRLCR_WRDLEN6		(1 << 17) -#define UBRLCR_WRDLEN7		(2 << 17) -#define UBRLCR_WRDLEN8		(3 << 17) -#define UBRLCR_WRDLEN_MASK	(3 << 17) - -#define SYNCIO_SMCKEN		(1 << 13) -#define SYNCIO_TXFRMEN		(1 << 14) - -#endif /* __ASM_HARDWARE_CLPS7111_H */ diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h index 7ecd793b8f5..ad774f37c47 100644 --- a/arch/arm/include/asm/hardware/coresight.h +++ b/arch/arm/include/asm/hardware/coresight.h @@ -24,8 +24,8 @@  #define TRACER_TIMEOUT 10000  #define etm_writel(t, v, x) \ -	(__raw_writel((v), (t)->etm_regs + (x))) -#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x))) +	(writel_relaxed((v), (t)->etm_regs + (x))) +#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))  /* CoreSight Management Registers */  #define CSMR_LOCKACCESS 0xfb0 @@ -36,7 +36,7 @@  /* CoreSight Component Registers */  #define CSCR_CLASS	0xff4 -#define UNLOCK_MAGIC	0xc5acce55 +#define CS_LAR_KEY	0xc5acce55  /* ETM control register, "ETM Architecture", 3.3.1 */  #define ETMR_CTRL		0 @@ -142,16 +142,16 @@  #define ETBFF_TRIGFL		BIT(10)  #define etb_writel(t, v, x) \ -	(__raw_writel((v), (t)->etb_regs + (x))) -#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x))) +	(writel_relaxed((v), (t)->etb_regs + (x))) +#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))  #define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)  #define etm_unlock(t) \ -	do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) +	do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)  #define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)  #define etb_unlock(t) \ -	do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) +	do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)  #endif /* __ASM_HARDWARE_CORESIGHT_H */ diff --git a/arch/arm/include/asm/hardware/cs89712.h b/arch/arm/include/asm/hardware/cs89712.h deleted file mode 100644 index f75626933e9..00000000000 --- a/arch/arm/include/asm/hardware/cs89712.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - *  arch/arm/include/asm/hardware/cs89712.h - * - *  This file contains the hardware definitions of the CS89712 - *  additional internal registers. - * - *  Copyright (C) 2001 Thomas Gleixner autronix automation <gleixner@autronix.de> - *			 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA - */ -#ifndef __ASM_HARDWARE_CS89712_H -#define __ASM_HARDWARE_CS89712_H - -/* -*	CS89712 additional registers -*/ -                                   -#define PCDR			0x0002	/* Port C Data register ---------------------------- */ -#define PCDDR			0x0042	/* Port C Data Direction register ------------------ */ -#define SDCONF			0x2300  /* SDRAM Configuration register ---------------------*/ -#define SDRFPR			0x2340  /* SDRAM Refresh period register --------------------*/ - -#define SDCONF_ACTIVE		(1 << 10) -#define SDCONF_CLKCTL		(1 << 9) -#define SDCONF_WIDTH_4		(0 << 7) -#define SDCONF_WIDTH_8		(1 << 7) -#define SDCONF_WIDTH_16		(2 << 7) -#define SDCONF_WIDTH_32		(3 << 7) -#define SDCONF_SIZE_16		(0 << 5) -#define SDCONF_SIZE_64		(1 << 5) -#define SDCONF_SIZE_128		(2 << 5) -#define SDCONF_SIZE_256		(3 << 5) -#define SDCONF_CASLAT_2		(2) -#define SDCONF_CASLAT_3		(3) - -#endif /* __ASM_HARDWARE_CS89712_H */ diff --git a/arch/arm/include/asm/hardware/debug-8250.S b/arch/arm/include/asm/hardware/debug-8250.S deleted file mode 100644 index 22c689255e6..00000000000 --- a/arch/arm/include/asm/hardware/debug-8250.S +++ /dev/null @@ -1,29 +0,0 @@ -/* - * arch/arm/include/asm/hardware/debug-8250.S - * - *  Copyright (C) 1994-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/serial_reg.h> - -		.macro	senduart,rd,rx -		strb	\rd, [\rx, #UART_TX << UART_SHIFT] -		.endm - -		.macro	busyuart,rd,rx -1002:		ldrb	\rd, [\rx, #UART_LSR << UART_SHIFT] -		and	\rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE -		teq	\rd, #UART_LSR_TEMT | UART_LSR_THRE -		bne	1002b -		.endm - -		.macro	waituart,rd,rx -#ifdef FLOW_CONTROL -1001:		ldrb	\rd, [\rx, #UART_MSR << UART_SHIFT] -		tst	\rd, #UART_MSR_CTS -		beq	1001b -#endif -		.endm diff --git a/arch/arm/include/asm/hardware/debug-pl01x.S b/arch/arm/include/asm/hardware/debug-pl01x.S deleted file mode 100644 index f9fd083eff6..00000000000 --- a/arch/arm/include/asm/hardware/debug-pl01x.S +++ /dev/null @@ -1,29 +0,0 @@ -/* arch/arm/include/asm/hardware/debug-pl01x.S - * - * Debugging macro include header - * - *  Copyright (C) 1994-1999 Russell King - *  Moved from linux/arch/arm/kernel/debug.S by Ben Dooks - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * -*/ -#include <linux/amba/serial.h> - -		.macro	senduart,rd,rx -		strb	\rd, [\rx, #UART01x_DR] -		.endm - -		.macro	waituart,rd,rx -1001:		ldr	\rd, [\rx, #UART01x_FR] -		tst	\rd, #UART01x_FR_TXFF -		bne	1001b -		.endm - -		.macro	busyuart,rd,rx -1001:		ldr	\rd, [\rx, #UART01x_FR] -		tst	\rd, #UART01x_FR_BUSY -		bne	1001b -		.endm diff --git a/arch/arm/include/asm/hardware/entry-macro-iomd.S b/arch/arm/include/asm/hardware/entry-macro-iomd.S index e0af4983723..8c215acd9b5 100644 --- a/arch/arm/include/asm/hardware/entry-macro-iomd.S +++ b/arch/arm/include/asm/hardware/entry-macro-iomd.S @@ -11,14 +11,6 @@  /* IOC / IOMD based hardware */  #include <asm/hardware/iomd.h> -		.macro	disable_fiq -		mov	r12, #ioc_base_high -		.if	ioc_base_low -		orr	r12, r12, #ioc_base_low -		.endif -		strb	r12, [r12, #0x38]	@ Disable FIQ register -		.endm -  		.macro	get_irqnr_and_base, irqnr, irqstat, base, tmp  		ldrb	\irqstat, [\base, #IOMD_IRQREQB]	@ get high priority first  		ldr	\tmp, =irq_prio_h diff --git a/arch/arm/include/asm/hardware/ep7211.h b/arch/arm/include/asm/hardware/ep7211.h deleted file mode 100644 index 654d5f625c4..00000000000 --- a/arch/arm/include/asm/hardware/ep7211.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - *  arch/arm/include/asm/hardware/ep7211.h - * - *  This file contains the hardware definitions of the EP7211 internal - *  registers. - * - *  Copyright (C) 2001 Blue Mug, Inc.  All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA - */ -#ifndef __ASM_HARDWARE_EP7211_H -#define __ASM_HARDWARE_EP7211_H - -#include <asm/hardware/clps7111.h> - -/* - * define EP7211_BASE to be the base address of the region - * you want to access. - */ - -#define EP7211_PHYS_BASE	(0x80000000) - -/* - * XXX miket@bluemug.com: need to introduce EP7211 registers (those not - * present in 7212) here. - */ - -#endif /* __ASM_HARDWARE_EP7211_H */ diff --git a/arch/arm/include/asm/hardware/ep7212.h b/arch/arm/include/asm/hardware/ep7212.h deleted file mode 100644 index 3b43bbeaf1d..00000000000 --- a/arch/arm/include/asm/hardware/ep7212.h +++ /dev/null @@ -1,83 +0,0 @@ -/* - *  arch/arm/include/asm/hardware/ep7212.h - * - *  This file contains the hardware definitions of the EP7212 internal - *  registers. - * - *  Copyright (C) 2000 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA - */ -#ifndef __ASM_HARDWARE_EP7212_H -#define __ASM_HARDWARE_EP7212_H - -/* - * define EP7212_BASE to be the base address of the region - * you want to access. - */ - -#define EP7212_PHYS_BASE	(0x80000000) - -#ifndef __ASSEMBLY__ -#define ep_readl(off)		__raw_readl(EP7212_BASE + (off)) -#define ep_writel(val,off)	__raw_writel(val, EP7212_BASE + (off)) -#endif - -/* - * These registers are specific to the EP7212 only - */ -#define DAIR			0x2000 -#define DAIR0			0x2040 -#define DAIDR1			0x2080 -#define DAIDR2			0x20c0 -#define DAISR			0x2100 -#define SYSCON3			0x2200 -#define INTSR3			0x2240 -#define INTMR3			0x2280 -#define LEDFLSH			0x22c0 - -#define DAIR_DAIEN		(1 << 16) -#define DAIR_ECS		(1 << 17) -#define DAIR_LCTM		(1 << 19) -#define DAIR_LCRM		(1 << 20) -#define DAIR_RCTM		(1 << 21) -#define DAIR_RCRM		(1 << 22) -#define DAIR_LBM		(1 << 23) - -#define DAIDR2_FIFOEN		(1 << 15) -#define DAIDR2_FIFOLEFT		(0x0d << 16) -#define DAIDR2_FIFORIGHT	(0x11 << 16) - -#define DAISR_RCTS		(1 << 0) -#define DAISR_RCRS		(1 << 1) -#define DAISR_LCTS		(1 << 2) -#define DAISR_LCRS		(1 << 3) -#define DAISR_RCTU		(1 << 4) -#define DAISR_RCRO		(1 << 5) -#define DAISR_LCTU		(1 << 6) -#define DAISR_LCRO		(1 << 7) -#define DAISR_RCNF		(1 << 8) -#define DAISR_RCNE		(1 << 9) -#define DAISR_LCNF		(1 << 10) -#define DAISR_LCNE		(1 << 11) -#define DAISR_FIFO		(1 << 12) - -#define SYSCON3_ADCCON		(1 << 0) -#define SYSCON3_DAISEL		(1 << 3) -#define SYSCON3_ADCCKNSEN	(1 << 4) -#define SYSCON3_FASTWAKE	(1 << 8) -#define SYSCON3_DAIEN		(1 << 9) - -#endif /* __ASM_HARDWARE_EP7212_H */ diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h deleted file mode 100644 index 7f34333bb54..00000000000 --- a/arch/arm/include/asm/hardware/gic.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - *  arch/arm/include/asm/hardware/gic.h - * - *  Copyright (C) 2002 ARM Limited, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_ARM_HARDWARE_GIC_H -#define __ASM_ARM_HARDWARE_GIC_H - -#include <linux/compiler.h> - -#define GIC_CPU_CTRL			0x00 -#define GIC_CPU_PRIMASK			0x04 -#define GIC_CPU_BINPOINT		0x08 -#define GIC_CPU_INTACK			0x0c -#define GIC_CPU_EOI			0x10 -#define GIC_CPU_RUNNINGPRI		0x14 -#define GIC_CPU_HIGHPRI			0x18 - -#define GIC_DIST_CTRL			0x000 -#define GIC_DIST_CTR			0x004 -#define GIC_DIST_ENABLE_SET		0x100 -#define GIC_DIST_ENABLE_CLEAR		0x180 -#define GIC_DIST_PENDING_SET		0x200 -#define GIC_DIST_PENDING_CLEAR		0x280 -#define GIC_DIST_ACTIVE_BIT		0x300 -#define GIC_DIST_PRI			0x400 -#define GIC_DIST_TARGET			0x800 -#define GIC_DIST_CONFIG			0xc00 -#define GIC_DIST_SOFTINT		0xf00 - -#ifndef __ASSEMBLY__ -void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start); -void gic_cpu_init(unsigned int gic_nr, void __iomem *base); -void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); -void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); -#endif - -#endif diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 9b28f1243bd..240b29ef17d 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,  	return slot_cnt;  } -static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) -{ -	return 0; -} - -static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, -					struct iop_adma_chan *chan) -{ -	union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - -	switch (chan->device->id) { -	case DMA0_ID: -	case DMA1_ID: -		return hw_desc.dma->dest_addr; -	case AAU_ID: -		return hw_desc.aau->dest_addr; -	default: -		BUG(); -	} -	return 0; -} - - -static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, -					  struct iop_adma_chan *chan) -{ -	BUG(); -	return 0; -} -  static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,  					struct iop_adma_chan *chan)  { diff --git a/arch/arm/include/asm/hardware/iop3xx-gpio.h b/arch/arm/include/asm/hardware/iop3xx-gpio.h deleted file mode 100644 index b69d972b1f7..00000000000 --- a/arch/arm/include/asm/hardware/iop3xx-gpio.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * arch/arm/include/asm/hardware/iop3xx-gpio.h - * - * IOP3xx GPIO wrappers - * - * Copyright (c) 2008 Arnaud Patard <arnaud.patard@rtp-net.org> - * Based on IXP4XX gpio.h file - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef __ASM_ARM_HARDWARE_IOP3XX_GPIO_H -#define __ASM_ARM_HARDWARE_IOP3XX_GPIO_H - -#include <mach/hardware.h> -#include <asm-generic/gpio.h> - -#define IOP3XX_N_GPIOS	8 - -static inline int gpio_get_value(unsigned gpio) -{ -	if (gpio > IOP3XX_N_GPIOS) -		return __gpio_get_value(gpio); - -	return gpio_line_get(gpio); -} - -static inline void gpio_set_value(unsigned gpio, int value) -{ -	if (gpio > IOP3XX_N_GPIOS) { -		__gpio_set_value(gpio, value); -		return; -	} -	gpio_line_set(gpio, value); -} - -static inline int gpio_cansleep(unsigned gpio) -{ -	if (gpio < IOP3XX_N_GPIOS) -		return 0; -	else -		return __gpio_cansleep(gpio); -} - -/* - * The GPIOs are not generating any interrupt - * Note : manuals are not clear about this - */ -static inline int gpio_to_irq(int gpio) -{ -	return -EINVAL; -} - -static inline int irq_to_gpio(int gpio) -{ -	return -EINVAL; -} - -#endif - diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 5daea2961d4..2594a95ff19 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -18,16 +18,9 @@  /*   * IOP3XX GPIO handling   */ -#define GPIO_IN			0 -#define GPIO_OUT		1 -#define GPIO_LOW		0 -#define GPIO_HIGH		1  #define IOP3XX_GPIO_LINE(x)	(x)  #ifndef __ASSEMBLY__ -extern void gpio_line_config(int line, int direction); -extern int  gpio_line_get(int line); -extern void gpio_line_set(int line, int value);  extern int init_atu;  extern int iop3xx_get_init_atu(void);  #endif @@ -37,7 +30,7 @@ extern int iop3xx_get_init_atu(void);   * IOP3XX processor registers   */  #define IOP3XX_PERIPHERAL_PHYS_BASE	0xffffe000 -#define IOP3XX_PERIPHERAL_VIRT_BASE	0xfeffe000 +#define IOP3XX_PERIPHERAL_VIRT_BASE	0xfedfe000  #define IOP3XX_PERIPHERAL_SIZE		0x00002000  #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\  					IOP3XX_PERIPHERAL_SIZE - 1) @@ -168,11 +161,6 @@ extern int iop3xx_get_init_atu(void);  /* PERCR0 DOESN'T EXIST - index from 1! */  #define IOP3XX_PERCR0		(volatile u32 *)IOP3XX_REG_ADDR(0x0710) -/* General Purpose I/O  */ -#define IOP3XX_GPOE		(volatile u32 *)IOP3XX_GPIO_REG(0x0000) -#define IOP3XX_GPID		(volatile u32 *)IOP3XX_GPIO_REG(0x0004) -#define IOP3XX_GPOD		(volatile u32 *)IOP3XX_GPIO_REG(0x0008) -  /* Timers  */  #define IOP3XX_TU_TMR0		(volatile u32 *)IOP3XX_TIMER_REG(0x0000)  #define IOP3XX_TU_TMR1		(volatile u32 *)IOP3XX_TIMER_REG(0x0004) @@ -217,23 +205,18 @@ extern int iop3xx_get_init_atu(void);  #define IOP3XX_PCI_LOWER_MEM_PA	0x80000000  #define IOP3XX_PCI_MEM_WINDOW_SIZE	0x08000000 -#define IOP3XX_PCI_IO_WINDOW_SIZE	0x00010000  #define IOP3XX_PCI_LOWER_IO_PA		0x90000000 -#define IOP3XX_PCI_LOWER_IO_VA		0xfe000000 -#define IOP3XX_PCI_LOWER_IO_BA		0x90000000 -#define IOP3XX_PCI_UPPER_IO_PA		(IOP3XX_PCI_LOWER_IO_PA +\ -					IOP3XX_PCI_IO_WINDOW_SIZE - 1) -#define IOP3XX_PCI_UPPER_IO_VA		(IOP3XX_PCI_LOWER_IO_VA +\ -					IOP3XX_PCI_IO_WINDOW_SIZE - 1) -#define IOP3XX_PCI_IO_PHYS_TO_VIRT(addr) (((u32) (addr) -\ -					IOP3XX_PCI_LOWER_IO_PA) +\ -					IOP3XX_PCI_LOWER_IO_VA) - +#define IOP3XX_PCI_LOWER_IO_BA		0x00000000  #ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <linux/reboot.h> +  void iop3xx_map_io(void);  void iop_init_cp6_handler(void);  void iop_init_time(unsigned long tickrate); +void iop3xx_restart(enum reboot_mode, const char *);  static inline u32 read_tmr0(void)  { diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 59b8c3892f7..250760e0810 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h @@ -49,7 +49,6 @@ struct iop_adma_device {  /**   * struct iop_adma_chan - internal representation of an ADMA device   * @pending: allows batching of hardware operations - * @completed_cookie: identifier for the most recently completed operation   * @lock: serializes enqueue/dequeue operations to the slot pool   * @mmr_base: memory mapped register base   * @chain: device chain view of the descriptors @@ -62,7 +61,6 @@ struct iop_adma_device {   */  struct iop_adma_chan {  	int pending; -	dma_cookie_t completed_cookie;  	spinlock_t lock; /* protects the descriptor slot pool */  	void __iomem *mmr_base;  	struct list_head chain; @@ -84,8 +82,6 @@ struct iop_adma_chan {   * @slot_cnt: total slots used in an transaction (group of operations)   * @slots_per_op: number of slots per operation   * @idx: pool index - * @unmap_src_cnt: number of xor sources - * @unmap_len: transaction bytecount   * @tx_list: list of descriptors that are associated with one operation   * @async_tx: support for the async_tx api   * @group_list: list of slots that make up a multi-descriptor transaction @@ -101,8 +97,6 @@ struct iop_adma_desc_slot {  	u16 slot_cnt;  	u16 slots_per_op;  	u16 idx; -	u16 unmap_src_cnt; -	size_t unmap_len;  	struct list_head tx_list;  	struct dma_async_tx_descriptor async_tx;  	union { diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index 21fa272301f..d36a73d7c0e 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h @@ -9,7 +9,10 @@  #ifndef __ASM_HARDWARE_IT8152_H  #define __ASM_HARDWARE_IT8152_H -extern unsigned long it8152_base_address; + +#include <mach/irqs.h> + +extern void __iomem *it8152_base_address;  #define IT8152_IO_BASE			(it8152_base_address + 0x03e00000)  #define IT8152_CFGREG_BASE		(it8152_base_address + 0x03f00000) @@ -76,6 +79,7 @@ extern unsigned long it8152_base_address;    IT8152_PD_IRQ(0)  Audio controller (ACR)   */  #define IT8152_IRQ(x)   (IRQ_BOARD_START + (x)) +#define IT8152_LAST_IRQ	(IRQ_BOARD_START + 40)  /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */  #define IT8152_LD_IRQ_COUNT     9 @@ -104,8 +108,8 @@ struct pci_sys_data;  extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);  extern void it8152_init_irq(void); -extern int it8152_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin); +extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);  extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); -extern struct pci_bus *it8152_pci_scan_bus(int nr, struct pci_sys_data *sys); +extern struct pci_ops it8152_ops;  #endif /* __ASM_HARDWARE_IT8152_H */ diff --git a/arch/arm/include/asm/hardware/linkup-l1110.h b/arch/arm/include/asm/hardware/linkup-l1110.h deleted file mode 100644 index 7ec91168a57..00000000000 --- a/arch/arm/include/asm/hardware/linkup-l1110.h +++ /dev/null @@ -1,48 +0,0 @@ -/* -* -* Definitions for H3600 Handheld Computer -* -* Copyright 2001 Compaq Computer Corporation. -* -* Use consistent with the GNU GPL is permitted, -* provided that this copyright notice is -* preserved in its entirety in all copies and derived works. -* -* COMPAQ COMPUTER CORPORATION MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, -* AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS -* FITNESS FOR ANY PARTICULAR PURPOSE. -* -* Author: Jamey Hicks. -* -*/ - -/* LinkUp Systems PCCard/CompactFlash Interface for SA-1100 */ - -/* PC Card Status Register */ -#define LINKUP_PRS_S1	(1 << 0) /* voltage control bits S1-S4 */ -#define LINKUP_PRS_S2	(1 << 1) -#define LINKUP_PRS_S3	(1 << 2) -#define LINKUP_PRS_S4	(1 << 3) -#define LINKUP_PRS_BVD1	(1 << 4) -#define LINKUP_PRS_BVD2	(1 << 5) -#define LINKUP_PRS_VS1	(1 << 6) -#define LINKUP_PRS_VS2	(1 << 7) -#define LINKUP_PRS_RDY	(1 << 8) -#define LINKUP_PRS_CD1	(1 << 9) -#define LINKUP_PRS_CD2	(1 << 10) - -/* PC Card Command Register */ -#define LINKUP_PRC_S1	(1 << 0) -#define LINKUP_PRC_S2	(1 << 1) -#define LINKUP_PRC_S3	(1 << 2) -#define LINKUP_PRC_S4	(1 << 3) -#define LINKUP_PRC_RESET (1 << 4) -#define LINKUP_PRC_APOE	(1 << 5) /* Auto Power Off Enable: clears S1-S4 when either nCD goes high */ -#define LINKUP_PRC_CFE	(1 << 6) /* CompactFlash mode Enable: addresses A[10:0] only, A[25:11] high */ -#define LINKUP_PRC_SOE	(1 << 7) /* signal output driver enable */ -#define LINKUP_PRC_SSP	(1 << 8) /* sock select polarity: 0 for socket 0, 1 for socket 1 */ -#define LINKUP_PRC_MBZ	(1 << 15) /* must be zero */ - -struct linkup_l1110 { -	volatile short prc; -}; diff --git a/arch/arm/include/asm/hardware/pci_v3.h b/arch/arm/include/asm/hardware/pci_v3.h deleted file mode 100644 index 2811c7e2cfd..00000000000 --- a/arch/arm/include/asm/hardware/pci_v3.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - *  arch/arm/include/asm/hardware/pci_v3.h - * - *  Internal header file PCI V3 chip - * - *  Copyright (C) ARM Limited - *  Copyright (C) 2000-2001 Deep Blue Solutions Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA - */ -#ifndef ASM_ARM_HARDWARE_PCI_V3_H -#define ASM_ARM_HARDWARE_PCI_V3_H - -/* ------------------------------------------------------------------------------- - *  V3 Local Bus to PCI Bridge definitions - * ------------------------------------------------------------------------------- - *  Registers (these are taken from page 129 of the EPC User's Manual Rev 1.04 - *  All V3 register names are prefaced by V3_ to avoid clashing with any other - *  PCI definitions.  Their names match the user's manual. - *  - *  I'm assuming that I20 is disabled. - *  - */ -#define V3_PCI_VENDOR                   0x00000000 -#define V3_PCI_DEVICE                   0x00000002 -#define V3_PCI_CMD                      0x00000004 -#define V3_PCI_STAT                     0x00000006 -#define V3_PCI_CC_REV                   0x00000008 -#define V3_PCI_HDR_CFG                  0x0000000C -#define V3_PCI_IO_BASE                  0x00000010 -#define V3_PCI_BASE0                    0x00000014 -#define V3_PCI_BASE1                    0x00000018 -#define V3_PCI_SUB_VENDOR               0x0000002C -#define V3_PCI_SUB_ID                   0x0000002E -#define V3_PCI_ROM                      0x00000030 -#define V3_PCI_BPARAM                   0x0000003C -#define V3_PCI_MAP0                     0x00000040 -#define V3_PCI_MAP1                     0x00000044 -#define V3_PCI_INT_STAT                 0x00000048 -#define V3_PCI_INT_CFG                  0x0000004C  -#define V3_LB_BASE0                     0x00000054 -#define V3_LB_BASE1                     0x00000058 -#define V3_LB_MAP0                      0x0000005E -#define V3_LB_MAP1                      0x00000062 -#define V3_LB_BASE2                     0x00000064 -#define V3_LB_MAP2                      0x00000066 -#define V3_LB_SIZE                      0x00000068 -#define V3_LB_IO_BASE                   0x0000006E -#define V3_FIFO_CFG                     0x00000070 -#define V3_FIFO_PRIORITY                0x00000072 -#define V3_FIFO_STAT                    0x00000074 -#define V3_LB_ISTAT                     0x00000076 -#define V3_LB_IMASK                     0x00000077 -#define V3_SYSTEM                       0x00000078 -#define V3_LB_CFG                       0x0000007A -#define V3_PCI_CFG                      0x0000007C -#define V3_DMA_PCI_ADR0                 0x00000080 -#define V3_DMA_PCI_ADR1                 0x00000090 -#define V3_DMA_LOCAL_ADR0               0x00000084 -#define V3_DMA_LOCAL_ADR1               0x00000094 -#define V3_DMA_LENGTH0                  0x00000088 -#define V3_DMA_LENGTH1                  0x00000098 -#define V3_DMA_CSR0                     0x0000008B -#define V3_DMA_CSR1                     0x0000009B -#define V3_DMA_CTLB_ADR0                0x0000008C -#define V3_DMA_CTLB_ADR1                0x0000009C -#define V3_DMA_DELAY                    0x000000E0 -#define V3_MAIL_DATA                    0x000000C0 -#define V3_PCI_MAIL_IEWR                0x000000D0 -#define V3_PCI_MAIL_IERD                0x000000D2 -#define V3_LB_MAIL_IEWR                 0x000000D4 -#define V3_LB_MAIL_IERD                 0x000000D6 -#define V3_MAIL_WR_STAT                 0x000000D8 -#define V3_MAIL_RD_STAT                 0x000000DA -#define V3_QBA_MAP                      0x000000DC - -/*  PCI COMMAND REGISTER bits - */ -#define V3_COMMAND_M_FBB_EN             (1 << 9) -#define V3_COMMAND_M_SERR_EN            (1 << 8) -#define V3_COMMAND_M_PAR_EN             (1 << 6) -#define V3_COMMAND_M_MASTER_EN          (1 << 2) -#define V3_COMMAND_M_MEM_EN             (1 << 1) -#define V3_COMMAND_M_IO_EN              (1 << 0) - -/*  SYSTEM REGISTER bits - */ -#define V3_SYSTEM_M_RST_OUT             (1 << 15) -#define V3_SYSTEM_M_LOCK                (1 << 14) - -/*  PCI_CFG bits - */ -#define V3_PCI_CFG_M_I2O_EN		(1 << 15) -#define V3_PCI_CFG_M_IO_REG_DIS		(1 << 14) -#define V3_PCI_CFG_M_IO_DIS		(1 << 13) -#define V3_PCI_CFG_M_EN3V		(1 << 12) -#define V3_PCI_CFG_M_RETRY_EN           (1 << 10) -#define V3_PCI_CFG_M_AD_LOW1            (1 << 9) -#define V3_PCI_CFG_M_AD_LOW0            (1 << 8) - -/*  PCI_BASE register bits (PCI -> Local Bus) - */ -#define V3_PCI_BASE_M_ADR_BASE          0xFFF00000 -#define V3_PCI_BASE_M_ADR_BASEL         0x000FFF00 -#define V3_PCI_BASE_M_PREFETCH          (1 << 3) -#define V3_PCI_BASE_M_TYPE              (3 << 1) -#define V3_PCI_BASE_M_IO                (1 << 0) - -/*  PCI MAP register bits (PCI -> Local bus) - */ -#define V3_PCI_MAP_M_MAP_ADR            0xFFF00000 -#define V3_PCI_MAP_M_RD_POST_INH        (1 << 15) -#define V3_PCI_MAP_M_ROM_SIZE           (3 << 10) -#define V3_PCI_MAP_M_SWAP               (3 << 8) -#define V3_PCI_MAP_M_ADR_SIZE           0x000000F0 -#define V3_PCI_MAP_M_REG_EN             (1 << 1) -#define V3_PCI_MAP_M_ENABLE             (1 << 0) - -/* - *  LB_BASE0,1 register bits (Local bus -> PCI) - */ -#define V3_LB_BASE_ADR_BASE		0xfff00000 -#define V3_LB_BASE_SWAP			(3 << 8) -#define V3_LB_BASE_ADR_SIZE		(15 << 4) -#define V3_LB_BASE_PREFETCH		(1 << 3) -#define V3_LB_BASE_ENABLE		(1 << 0) - -#define V3_LB_BASE_ADR_SIZE_1MB		(0 << 4) -#define V3_LB_BASE_ADR_SIZE_2MB		(1 << 4) -#define V3_LB_BASE_ADR_SIZE_4MB		(2 << 4) -#define V3_LB_BASE_ADR_SIZE_8MB		(3 << 4) -#define V3_LB_BASE_ADR_SIZE_16MB	(4 << 4) -#define V3_LB_BASE_ADR_SIZE_32MB	(5 << 4) -#define V3_LB_BASE_ADR_SIZE_64MB	(6 << 4) -#define V3_LB_BASE_ADR_SIZE_128MB	(7 << 4) -#define V3_LB_BASE_ADR_SIZE_256MB	(8 << 4) -#define V3_LB_BASE_ADR_SIZE_512MB	(9 << 4) -#define V3_LB_BASE_ADR_SIZE_1GB		(10 << 4) -#define V3_LB_BASE_ADR_SIZE_2GB		(11 << 4) - -#define v3_addr_to_lb_base(a)	((a) & V3_LB_BASE_ADR_BASE) - -/* - *  LB_MAP0,1 register bits (Local bus -> PCI) - */ -#define V3_LB_MAP_MAP_ADR		0xfff0 -#define V3_LB_MAP_TYPE			(7 << 1) -#define V3_LB_MAP_AD_LOW_EN		(1 << 0) - -#define V3_LB_MAP_TYPE_IACK		(0 << 1) -#define V3_LB_MAP_TYPE_IO		(1 << 1) -#define V3_LB_MAP_TYPE_MEM		(3 << 1) -#define V3_LB_MAP_TYPE_CONFIG		(5 << 1) -#define V3_LB_MAP_TYPE_MEM_MULTIPLE	(6 << 1) - -#define v3_addr_to_lb_map(a)	(((a) >> 16) & V3_LB_MAP_MAP_ADR) - -/* - *  LB_BASE2 register bits (Local bus -> PCI IO) - */ -#define V3_LB_BASE2_ADR_BASE		0xff00 -#define V3_LB_BASE2_SWAP		(3 << 6) -#define V3_LB_BASE2_ENABLE		(1 << 0) - -#define v3_addr_to_lb_base2(a)	(((a) >> 16) & V3_LB_BASE2_ADR_BASE) - -/* - *  LB_MAP2 register bits (Local bus -> PCI IO) - */ -#define V3_LB_MAP2_MAP_ADR		0xff00 - -#define v3_addr_to_lb_map2(a)	(((a) >> 16) & V3_LB_MAP2_MAP_ADR) - -#endif diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h deleted file mode 100644 index f35b86e68dd..00000000000 --- a/arch/arm/include/asm/hardware/pl080.h +++ /dev/null @@ -1,140 +0,0 @@ -/* arch/arm/include/asm/hardware/pl080.h - * - * Copyright 2008 Openmoko, Inc. - * Copyright 2008 Simtec Electronics - *      http://armlinux.simtec.co.uk/ - *      Ben Dooks <ben@simtec.co.uk> - * - * ARM PrimeCell PL080 DMA controller - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -/* Note, there are some Samsung updates to this controller block which - * make it not entierly compatible with the PL080 specification from - * ARM. When in doubt, check the Samsung documentation first. - * - * The Samsung defines are PL080S, and add an extra controll register, - * the ability to move more than 2^11 counts of data and some extra - * OneNAND features. -*/ - -#define PL080_INT_STATUS			(0x00) -#define PL080_TC_STATUS				(0x04) -#define PL080_TC_CLEAR				(0x08) -#define PL080_ERR_STATUS			(0x0C) -#define PL080_ERR_CLEAR				(0x10) -#define PL080_RAW_TC_STATUS			(0x14) -#define PL080_RAW_ERR_STATUS			(0x18) -#define PL080_EN_CHAN				(0x1c) -#define PL080_SOFT_BREQ				(0x20) -#define PL080_SOFT_SREQ				(0x24) -#define PL080_SOFT_LBREQ			(0x28) -#define PL080_SOFT_LSREQ			(0x2C) - -#define PL080_CONFIG				(0x30) -#define PL080_CONFIG_M2_BE			(1 << 2) -#define PL080_CONFIG_M1_BE			(1 << 1) -#define PL080_CONFIG_ENABLE			(1 << 0) - -#define PL080_SYNC				(0x34) - -/* Per channel configuration registers */ - -#define PL080_Cx_STRIDE				(0x20) -#define PL080_Cx_BASE(x)			((0x100 + (x * 0x20))) -#define PL080_Cx_SRC_ADDR(x)			((0x100 + (x * 0x20))) -#define PL080_Cx_DST_ADDR(x)			((0x104 + (x * 0x20))) -#define PL080_Cx_LLI(x)				((0x108 + (x * 0x20))) -#define PL080_Cx_CONTROL(x)			((0x10C + (x * 0x20))) -#define PL080_Cx_CONFIG(x)			((0x110 + (x * 0x20))) -#define PL080S_Cx_CONTROL2(x)			((0x110 + (x * 0x20))) -#define PL080S_Cx_CONFIG(x)			((0x114 + (x * 0x20))) - -#define PL080_CH_SRC_ADDR			(0x00) -#define PL080_CH_DST_ADDR			(0x04) -#define PL080_CH_LLI				(0x08) -#define PL080_CH_CONTROL			(0x0C) -#define PL080_CH_CONFIG				(0x10) -#define PL080S_CH_CONTROL2			(0x10) -#define PL080S_CH_CONFIG			(0x14) - -#define PL080_LLI_ADDR_MASK			(0x3fffffff << 2) -#define PL080_LLI_ADDR_SHIFT			(2) -#define PL080_LLI_LM_AHB2			(1 << 0) - -#define PL080_CONTROL_TC_IRQ_EN			(1 << 31) -#define PL080_CONTROL_PROT_MASK			(0x7 << 28) -#define PL080_CONTROL_PROT_SHIFT		(28) -#define PL080_CONTROL_PROT_CACHE		(1 << 30) -#define PL080_CONTROL_PROT_BUFF			(1 << 29) -#define PL080_CONTROL_PROT_SYS			(1 << 28) -#define PL080_CONTROL_DST_INCR			(1 << 27) -#define PL080_CONTROL_SRC_INCR			(1 << 26) -#define PL080_CONTROL_DST_AHB2			(1 << 25) -#define PL080_CONTROL_SRC_AHB2			(1 << 24) -#define PL080_CONTROL_DWIDTH_MASK		(0x7 << 21) -#define PL080_CONTROL_DWIDTH_SHIFT		(21) -#define PL080_CONTROL_SWIDTH_MASK		(0x7 << 18) -#define PL080_CONTROL_SWIDTH_SHIFT		(18) -#define PL080_CONTROL_DB_SIZE_MASK		(0x7 << 15) -#define PL080_CONTROL_DB_SIZE_SHIFT		(15) -#define PL080_CONTROL_SB_SIZE_MASK		(0x7 << 12) -#define PL080_CONTROL_SB_SIZE_SHIFT		(12) -#define PL080_CONTROL_TRANSFER_SIZE_MASK	(0xfff << 0) -#define PL080_CONTROL_TRANSFER_SIZE_SHIFT	(0) - -#define PL080_BSIZE_1				(0x0) -#define PL080_BSIZE_4				(0x1) -#define PL080_BSIZE_8				(0x2) -#define PL080_BSIZE_16				(0x3) -#define PL080_BSIZE_32				(0x4) -#define PL080_BSIZE_64				(0x5) -#define PL080_BSIZE_128				(0x6) -#define PL080_BSIZE_256				(0x7) - -#define PL080_WIDTH_8BIT			(0x0) -#define PL080_WIDTH_16BIT			(0x1) -#define PL080_WIDTH_32BIT			(0x2) - -#define PL080_CONFIG_HALT			(1 << 18) -#define PL080_CONFIG_ACTIVE			(1 << 17)  /* RO */ -#define PL080_CONFIG_LOCK			(1 << 16) -#define PL080_CONFIG_TC_IRQ_MASK		(1 << 15) -#define PL080_CONFIG_ERR_IRQ_MASK		(1 << 14) -#define PL080_CONFIG_FLOW_CONTROL_MASK		(0x7 << 11) -#define PL080_CONFIG_FLOW_CONTROL_SHIFT		(11) -#define PL080_CONFIG_DST_SEL_MASK		(0xf << 6) -#define PL080_CONFIG_DST_SEL_SHIFT		(6) -#define PL080_CONFIG_SRC_SEL_MASK		(0xf << 1) -#define PL080_CONFIG_SRC_SEL_SHIFT		(1) -#define PL080_CONFIG_ENABLE			(1 << 0) - -#define PL080_FLOW_MEM2MEM			(0x0) -#define PL080_FLOW_MEM2PER			(0x1) -#define PL080_FLOW_PER2MEM			(0x2) -#define PL080_FLOW_SRC2DST			(0x3) -#define PL080_FLOW_SRC2DST_DST			(0x4) -#define PL080_FLOW_MEM2PER_PER			(0x5) -#define PL080_FLOW_PER2MEM_PER			(0x6) -#define PL080_FLOW_SRC2DST_SRC			(0x7) - -/* DMA linked list chain structure */ - -struct pl080_lli { -	u32	src_addr; -	u32	dst_addr; -	u32	next_lli; -	u32	control0; -}; - -struct pl080s_lli { -	u32	src_addr; -	u32	dst_addr; -	u32	next_lli; -	u32	control0; -	u32	control1; -}; - diff --git a/arch/arm/include/asm/hardware/pl330.h b/arch/arm/include/asm/hardware/pl330.h deleted file mode 100644 index 575fa8186ca..00000000000 --- a/arch/arm/include/asm/hardware/pl330.h +++ /dev/null @@ -1,217 +0,0 @@ -/* linux/include/asm/hardware/pl330.h - * - * Copyright (C) 2010 Samsung Electronics Co. Ltd. - *	Jaswinder Singh <jassi.brar@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __PL330_CORE_H -#define __PL330_CORE_H - -#define PL330_MAX_CHAN		8 -#define PL330_MAX_IRQS		32 -#define PL330_MAX_PERI		32 - -enum pl330_srccachectrl { -	SCCTRL0 = 0, /* Noncacheable and nonbufferable */ -	SCCTRL1, /* Bufferable only */ -	SCCTRL2, /* Cacheable, but do not allocate */ -	SCCTRL3, /* Cacheable and bufferable, but do not allocate */ -	SINVALID1, -	SINVALID2, -	SCCTRL6, /* Cacheable write-through, allocate on reads only */ -	SCCTRL7, /* Cacheable write-back, allocate on reads only */ -}; - -enum pl330_dstcachectrl { -	DCCTRL0 = 0, /* Noncacheable and nonbufferable */ -	DCCTRL1, /* Bufferable only */ -	DCCTRL2, /* Cacheable, but do not allocate */ -	DCCTRL3, /* Cacheable and bufferable, but do not allocate */ -	DINVALID1 = 8, -	DINVALID2, -	DCCTRL6, /* Cacheable write-through, allocate on writes only */ -	DCCTRL7, /* Cacheable write-back, allocate on writes only */ -}; - -/* Populated by the PL330 core driver for DMA API driver's info */ -struct pl330_config { -	u32	periph_id; -	u32	pcell_id; -#define DMAC_MODE_NS	(1 << 0) -	unsigned int	mode; -	unsigned int	data_bus_width:10; /* In number of bits */ -	unsigned int	data_buf_dep:10; -	unsigned int	num_chan:4; -	unsigned int	num_peri:6; -	u32		peri_ns; -	unsigned int	num_events:6; -	u32		irq_ns; -}; - -/* Handle to the DMAC provided to the PL330 core */ -struct pl330_info { -	/* Owning device */ -	struct device *dev; -	/* Size of MicroCode buffers for each channel. */ -	unsigned mcbufsz; -	/* ioremap'ed address of PL330 registers. */ -	void __iomem	*base; -	/* Client can freely use it. */ -	void	*client_data; -	/* PL330 core data, Client must not touch it. */ -	void	*pl330_data; -	/* Populated by the PL330 core driver during pl330_add */ -	struct pl330_config	pcfg; -	/* -	 * If the DMAC has some reset mechanism, then the -	 * client may want to provide pointer to the method. -	 */ -	void (*dmac_reset)(struct pl330_info *pi); -}; - -enum pl330_byteswap { -	SWAP_NO = 0, -	SWAP_2, -	SWAP_4, -	SWAP_8, -	SWAP_16, -}; - -/** - * Request Configuration. - * The PL330 core does not modify this and uses the last - * working configuration if the request doesn't provide any. - * - * The Client may want to provide this info only for the - * first request and a request with new settings. - */ -struct pl330_reqcfg { -	/* Address Incrementing */ -	unsigned dst_inc:1; -	unsigned src_inc:1; - -	/* -	 * For now, the SRC & DST protection levels -	 * and burst size/length are assumed same. -	 */ -	bool nonsecure; -	bool privileged; -	bool insnaccess; -	unsigned brst_len:5; -	unsigned brst_size:3; /* in power of 2 */ - -	enum pl330_dstcachectrl dcctl; -	enum pl330_srccachectrl scctl; -	enum pl330_byteswap swap; -}; - -/* - * One cycle of DMAC operation. - * There may be more than one xfer in a request. - */ -struct pl330_xfer { -	u32 src_addr; -	u32 dst_addr; -	/* Size to xfer */ -	u32 bytes; -	/* -	 * Pointer to next xfer in the list. -	 * The last xfer in the req must point to NULL. -	 */ -	struct pl330_xfer *next; -}; - -/* The xfer callbacks are made with one of these arguments. */ -enum pl330_op_err { -	/* The all xfers in the request were success. */ -	PL330_ERR_NONE, -	/* If req aborted due to global error. */ -	PL330_ERR_ABORT, -	/* If req failed due to problem with Channel. */ -	PL330_ERR_FAIL, -}; - -enum pl330_reqtype { -	MEMTOMEM, -	MEMTODEV, -	DEVTOMEM, -	DEVTODEV, -}; - -/* A request defining Scatter-Gather List ending with NULL xfer. */ -struct pl330_req { -	enum pl330_reqtype rqtype; -	/* Index of peripheral for the xfer. */ -	unsigned peri:5; -	/* Unique token for this xfer, set by the client. */ -	void *token; -	/* Callback to be called after xfer. */ -	void (*xfer_cb)(void *token, enum pl330_op_err err); -	/* If NULL, req will be done at last set parameters. */ -	struct pl330_reqcfg *cfg; -	/* Pointer to first xfer in the request. */ -	struct pl330_xfer *x; -}; - -/* - * To know the status of the channel and DMAC, the client - * provides a pointer to this structure. The PL330 core - * fills it with current information. - */ -struct pl330_chanstatus { -	/* -	 * If the DMAC engine halted due to some error, -	 * the client should remove-add DMAC. -	 */ -	bool dmac_halted; -	/* -	 * If channel is halted due to some error, -	 * the client should ABORT/FLUSH and START the channel. -	 */ -	bool faulting; -	/* Location of last load */ -	u32 src_addr; -	/* Location of last store */ -	u32 dst_addr; -	/* -	 * Pointer to the currently active req, NULL if channel is -	 * inactive, even though the requests may be present. -	 */ -	struct pl330_req *top_req; -	/* Pointer to req waiting second in the queue if any. */ -	struct pl330_req *wait_req; -}; - -enum pl330_chan_op { -	/* Start the channel */ -	PL330_OP_START, -	/* Abort the active xfer */ -	PL330_OP_ABORT, -	/* Stop xfer and flush queue */ -	PL330_OP_FLUSH, -}; - -extern int pl330_add(struct pl330_info *); -extern void pl330_del(struct pl330_info *pi); -extern int pl330_update(const struct pl330_info *pi); -extern void pl330_release_channel(void *ch_id); -extern void *pl330_request_channel(const struct pl330_info *pi); -extern int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus); -extern int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op); -extern int pl330_submit_req(void *ch_id, struct pl330_req *r); - -#endif	/* __PL330_CORE_H */ diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h index 92ed254c175..7c2bbc7f0be 100644 --- a/arch/arm/include/asm/hardware/sa1111.h +++ b/arch/arm/include/asm/hardware/sa1111.h @@ -132,34 +132,10 @@  #define SKPCR_DCLKEN	(1<<7)  #define SKPCR_PWMCLKEN	(1<<8) -/* - * USB Host controller - */ +/* USB Host controller */  #define SA1111_USB		0x0400  /* - * Offsets from SA1111_USB_BASE - */ -#define SA1111_USB_STATUS	0x0118 -#define SA1111_USB_RESET	0x011c -#define SA1111_USB_IRQTEST	0x0120 - -#define USB_RESET_FORCEIFRESET	(1 << 0) -#define USB_RESET_FORCEHCRESET	(1 << 1) -#define USB_RESET_CLKGENRESET	(1 << 2) -#define USB_RESET_SIMSCALEDOWN	(1 << 3) -#define USB_RESET_USBINTTEST	(1 << 4) -#define USB_RESET_SLEEPSTBYEN	(1 << 5) -#define USB_RESET_PWRSENSELOW	(1 << 6) -#define USB_RESET_PWRCTRLLOW	(1 << 7) - -#define USB_STATUS_IRQHCIRMTWKUP  (1 <<  7) -#define USB_STATUS_IRQHCIBUFFACC  (1 <<  8) -#define USB_STATUS_NIRQHCIM       (1 <<  9) -#define USB_STATUS_NHCIMFCLR      (1 << 10) -#define USB_STATUS_USBPWRSENSE    (1 << 11) - -/*   * Serial Audio Controller   *   * Registers @@ -327,22 +303,6 @@   *    PC_SSR		GPIO Block C Sleep State   */ -#define _PA_DDR		_SA1111( 0x1000 ) -#define _PA_DRR		_SA1111( 0x1004 ) -#define _PA_DWR		_SA1111( 0x1004 ) -#define _PA_SDR		_SA1111( 0x1008 ) -#define _PA_SSR		_SA1111( 0x100c ) -#define _PB_DDR		_SA1111( 0x1010 ) -#define _PB_DRR		_SA1111( 0x1014 ) -#define _PB_DWR		_SA1111( 0x1014 ) -#define _PB_SDR		_SA1111( 0x1018 ) -#define _PB_SSR		_SA1111( 0x101c ) -#define _PC_DDR		_SA1111( 0x1020 ) -#define _PC_DRR		_SA1111( 0x1024 ) -#define _PC_DWR		_SA1111( 0x1024 ) -#define _PC_SDR		_SA1111( 0x1028 ) -#define _PC_SSR		_SA1111( 0x102c ) -  #define SA1111_GPIO	0x1000  #define SA1111_GPIO_PADDR	(0x000) @@ -425,106 +385,30 @@  #define SA1111_WAKEPOL0		0x0034  #define SA1111_WAKEPOL1		0x0038 -/* - * PS/2 Trackpad and Mouse Interfaces - * - * Registers - *    PS2CR		Control Register - *    PS2STAT		Status Register - *    PS2DATA		Transmit/Receive Data register - *    PS2CLKDIV		Clock Division Register - *    PS2PRECNT		Clock Precount Register - *    PS2TEST1		Test register 1 - *    PS2TEST2		Test register 2 - *    PS2TEST3		Test register 3 - *    PS2TEST4		Test register 4 - */ - +/* PS/2 Trackpad and Mouse Interfaces */  #define SA1111_KBD		0x0a00  #define SA1111_MSE		0x0c00 -/* - * These are offsets from the above bases. - */ -#define SA1111_PS2CR		0x0000 -#define SA1111_PS2STAT		0x0004 -#define SA1111_PS2DATA		0x0008 -#define SA1111_PS2CLKDIV	0x000c -#define SA1111_PS2PRECNT	0x0010 - -#define PS2CR_ENA		0x08 -#define PS2CR_FKD		0x02 -#define PS2CR_FKC		0x01 - -#define PS2STAT_STP		0x0100 -#define PS2STAT_TXE		0x0080 -#define PS2STAT_TXB		0x0040 -#define PS2STAT_RXF		0x0020 -#define PS2STAT_RXB		0x0010 -#define PS2STAT_ENA		0x0008 -#define PS2STAT_RXP		0x0004 -#define PS2STAT_KBD		0x0002 -#define PS2STAT_KBC		0x0001 +/* PCMCIA Interface */ +#define SA1111_PCMCIA		0x1600 -/* - * PCMCIA Interface - * - * Registers - *    PCSR	Status Register - *    PCCR	Control Register - *    PCSSR	Sleep State Register - */ - -#define SA1111_PCMCIA	0x1600 - -/* - * These are offsets from the above base. - */ -#define SA1111_PCCR	0x0000 -#define SA1111_PCSSR	0x0004 -#define SA1111_PCSR	0x0008 - -#define PCSR_S0_READY	(1<<0) -#define PCSR_S1_READY	(1<<1) -#define PCSR_S0_DETECT	(1<<2) -#define PCSR_S1_DETECT	(1<<3) -#define PCSR_S0_VS1	(1<<4) -#define PCSR_S0_VS2	(1<<5) -#define PCSR_S1_VS1	(1<<6) -#define PCSR_S1_VS2	(1<<7) -#define PCSR_S0_WP	(1<<8) -#define PCSR_S1_WP	(1<<9) -#define PCSR_S0_BVD1	(1<<10) -#define PCSR_S0_BVD2	(1<<11) -#define PCSR_S1_BVD1	(1<<12) -#define PCSR_S1_BVD2	(1<<13) - -#define PCCR_S0_RST	(1<<0) -#define PCCR_S1_RST	(1<<1) -#define PCCR_S0_FLT	(1<<2) -#define PCCR_S1_FLT	(1<<3) -#define PCCR_S0_PWAITEN	(1<<4) -#define PCCR_S1_PWAITEN	(1<<5) -#define PCCR_S0_PSE	(1<<6) -#define PCCR_S1_PSE	(1<<7) - -#define PCSSR_S0_SLEEP	(1<<0) -#define PCSSR_S1_SLEEP	(1<<1)  extern struct bus_type sa1111_bus_type; -#define SA1111_DEVID_SBI	0 -#define SA1111_DEVID_SK		1 -#define SA1111_DEVID_USB	2 -#define SA1111_DEVID_SAC	3 -#define SA1111_DEVID_SSP	4 -#define SA1111_DEVID_PS2	5 -#define SA1111_DEVID_GPIO	6 -#define SA1111_DEVID_INT	7 -#define SA1111_DEVID_PCMCIA	8 +#define SA1111_DEVID_SBI	(1 << 0) +#define SA1111_DEVID_SK		(1 << 1) +#define SA1111_DEVID_USB	(1 << 2) +#define SA1111_DEVID_SAC	(1 << 3) +#define SA1111_DEVID_SSP	(1 << 4) +#define SA1111_DEVID_PS2	(3 << 5) +#define SA1111_DEVID_PS2_KBD	(1 << 5) +#define SA1111_DEVID_PS2_MSE	(1 << 6) +#define SA1111_DEVID_GPIO	(1 << 7) +#define SA1111_DEVID_INT	(1 << 8) +#define SA1111_DEVID_PCMCIA	(1 << 9)  struct sa1111_dev {  	struct device	dev; @@ -548,6 +432,7 @@ struct sa1111_driver {  	int (*remove)(struct sa1111_dev *);  	int (*suspend)(struct sa1111_dev *, pm_message_t);  	int (*resume)(struct sa1111_dev *); +	void (*shutdown)(struct sa1111_dev *);  };  #define SA1111_DRV(_d)	container_of((_d), struct sa1111_driver, drv) @@ -555,9 +440,10 @@ struct sa1111_driver {  #define SA1111_DRIVER_NAME(_sadev) ((_sadev)->dev.driver->name)  /* - * These frob the SKPCR register. + * These frob the SKPCR register, and call platform specific + * enable/disable functions.   */ -void sa1111_enable_device(struct sa1111_dev *); +int sa1111_enable_device(struct sa1111_dev *);  void sa1111_disable_device(struct sa1111_dev *);  unsigned int sa1111_pll_clock(struct sa1111_dev *); @@ -580,6 +466,10 @@ void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned i  struct sa1111_platform_data {  	int	irq_base;	/* base for cascaded on-chip IRQs */ +	unsigned disable_devs; +	void	*data; +	int	(*enable)(void *, unsigned); +	void	(*disable)(void *, unsigned);  };  #endif  /* _ASM_ARCH_SA1111 */ diff --git a/arch/arm/include/asm/hardware/scoop.h b/arch/arm/include/asm/hardware/scoop.h index ebb3ceaa8fa..58cdf5d8412 100644 --- a/arch/arm/include/asm/hardware/scoop.h +++ b/arch/arm/include/asm/hardware/scoop.h @@ -61,7 +61,6 @@ struct scoop_pcmcia_dev {  struct scoop_pcmcia_config {  	struct scoop_pcmcia_dev *devs;  	int num_devs; -	void (*pcmcia_init)(void);  	void (*power_ctrl)(struct device *scoop, unsigned short cpr, int nr);  }; diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h deleted file mode 100644 index a101f10bb5b..00000000000 --- a/arch/arm/include/asm/hardware/sp810.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * arch/arm/include/asm/hardware/sp810.h - * - * ARM PrimeXsys System Controller SP810 header file - * - * Copyright (C) 2009 ST Microelectronics - * Viresh Kumar<viresh.kumar@st.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __ASM_ARM_SP810_H -#define __ASM_ARM_SP810_H - -#include <linux/io.h> - -/* sysctl registers offset */ -#define SCCTRL			0x000 -#define SCSYSSTAT		0x004 -#define SCIMCTRL		0x008 -#define SCIMSTAT		0x00C -#define SCXTALCTRL		0x010 -#define SCPLLCTRL		0x014 -#define SCPLLFCTRL		0x018 -#define SCPERCTRL0		0x01C -#define SCPERCTRL1		0x020 -#define SCPEREN			0x024 -#define SCPERDIS		0x028 -#define SCPERCLKEN		0x02C -#define SCPERSTAT		0x030 -#define SCSYSID0		0xEE0 -#define SCSYSID1		0xEE4 -#define SCSYSID2		0xEE8 -#define SCSYSID3		0xEEC -#define SCITCR			0xF00 -#define SCITIR0			0xF04 -#define SCITIR1			0xF08 -#define SCITOR			0xF0C -#define SCCNTCTRL		0xF10 -#define SCCNTDATA		0xF14 -#define SCCNTSTEP		0xF18 -#define SCPERIPHID0		0xFE0 -#define SCPERIPHID1		0xFE4 -#define SCPERIPHID2		0xFE8 -#define SCPERIPHID3		0xFEC -#define SCPCELLID0		0xFF0 -#define SCPCELLID1		0xFF4 -#define SCPCELLID2		0xFF8 -#define SCPCELLID3		0xFFC - -static inline void sysctl_soft_reset(void __iomem *base) -{ -	/* writing any value to SCSYSSTAT reg will reset system */ -	writel(0, base + SCSYSSTAT); -} - -#endif	/* __ASM_ARM_SP810_H */ diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h new file mode 100644 index 00000000000..bb28af7c32d --- /dev/null +++ b/arch/arm/include/asm/hardware/timer-sp.h @@ -0,0 +1,23 @@ +struct clk; + +void __sp804_clocksource_and_sched_clock_init(void __iomem *, +					      const char *, struct clk *, int); +void __sp804_clockevents_init(void __iomem *, unsigned int, +			      struct clk *, const char *); + +static inline void sp804_clocksource_init(void __iomem *base, const char *name) +{ +	__sp804_clocksource_and_sched_clock_init(base, name, NULL, 0); +} + +static inline void sp804_clocksource_and_sched_clock_init(void __iomem *base, +							  const char *name) +{ +	__sp804_clocksource_and_sched_clock_init(base, name, NULL, 1); +} + +static inline void sp804_clockevents_init(void __iomem *base, unsigned int irq, const char *name) +{ +	__sp804_clockevents_init(base, irq, NULL, name); + +} diff --git a/arch/arm/include/asm/hardware/uengine.h b/arch/arm/include/asm/hardware/uengine.h deleted file mode 100644 index b442d65c659..00000000000 --- a/arch/arm/include/asm/hardware/uengine.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Generic library functions for the microengines found on the Intel - * IXP2000 series of network processors. - * - * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> - * Dedicated to Marija Kulikova. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as - * published by the Free Software Foundation; either version 2.1 of the - * License, or (at your option) any later version. - */ - -#ifndef __IXP2000_UENGINE_H -#define __IXP2000_UENGINE_H - -extern u32 ixp2000_uengine_mask; - -struct ixp2000_uengine_code -{ -	u32	cpu_model_bitmask; -	u8	cpu_min_revision; -	u8	cpu_max_revision; - -	u32	uengine_parameters; - -	struct ixp2000_reg_value { -		int	reg; -		u32	value; -	} *initial_reg_values; - -	int	num_insns; -	u8	*insns; -}; - -u32 ixp2000_uengine_csr_read(int uengine, int offset); -void ixp2000_uengine_csr_write(int uengine, int offset, u32 value); -void ixp2000_uengine_reset(u32 uengine_mask); -void ixp2000_uengine_set_mode(int uengine, u32 mode); -void ixp2000_uengine_load_microcode(int uengine, u8 *ucode, int insns); -void ixp2000_uengine_init_context(int uengine, int context, int pc); -void ixp2000_uengine_start_contexts(int uengine, u8 ctx_mask); -void ixp2000_uengine_stop_contexts(int uengine, u8 ctx_mask); -int ixp2000_uengine_load(int uengine, struct ixp2000_uengine_code *c); - -#define IXP2000_UENGINE_8_CONTEXTS		0x00000000 -#define IXP2000_UENGINE_4_CONTEXTS		0x80000000 -#define IXP2000_UENGINE_PRN_UPDATE_EVERY	0x40000000 -#define IXP2000_UENGINE_PRN_UPDATE_ON_ACCESS	0x00000000 -#define IXP2000_UENGINE_NN_FROM_SELF		0x00100000 -#define IXP2000_UENGINE_NN_FROM_PREVIOUS	0x00000000 -#define IXP2000_UENGINE_ASSERT_EMPTY_AT_3	0x000c0000 -#define IXP2000_UENGINE_ASSERT_EMPTY_AT_2	0x00080000 -#define IXP2000_UENGINE_ASSERT_EMPTY_AT_1	0x00040000 -#define IXP2000_UENGINE_ASSERT_EMPTY_AT_0	0x00000000 -#define IXP2000_UENGINE_LM_ADDR1_GLOBAL		0x00020000 -#define IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT	0x00000000 -#define IXP2000_UENGINE_LM_ADDR0_GLOBAL		0x00010000 -#define IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT	0x00000000 - - -#endif diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h deleted file mode 100644 index 5d72550a809..00000000000 --- a/arch/arm/include/asm/hardware/vic.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - *  arch/arm/include/asm/hardware/vic.h - * - *  Copyright (c) ARM Limited 2003.  All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA - */ -#ifndef __ASM_ARM_HARDWARE_VIC_H -#define __ASM_ARM_HARDWARE_VIC_H - -#define VIC_IRQ_STATUS			0x00 -#define VIC_FIQ_STATUS			0x04 -#define VIC_RAW_STATUS			0x08 -#define VIC_INT_SELECT			0x0c	/* 1 = FIQ, 0 = IRQ */ -#define VIC_INT_ENABLE			0x10	/* 1 = enable, 0 = disable */ -#define VIC_INT_ENABLE_CLEAR		0x14 -#define VIC_INT_SOFT			0x18 -#define VIC_INT_SOFT_CLEAR		0x1c -#define VIC_PROTECT			0x20 -#define VIC_PL190_VECT_ADDR		0x30	/* PL190 only */ -#define VIC_PL190_DEF_VECT_ADDR		0x34	/* PL190 only */ - -#define VIC_VECT_ADDR0			0x100	/* 0 to 15 (0..31 PL192) */ -#define VIC_VECT_CNTL0			0x200	/* 0 to 15 (0..31 PL192) */ -#define VIC_ITCR			0x300	/* VIC test control register */ - -#define VIC_VECT_CNTL_ENABLE		(1 << 5) - -#define VIC_PL192_VECT_ADDR		0xF00 - -#ifndef __ASSEMBLY__ -void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); -#endif - -#endif diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 1fc684e70ab..535579511ed 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -18,15 +18,45 @@  	} while (0)  extern pte_t *pkmap_page_table; - -#define ARCH_NEEDS_KMAP_HIGH_GET +extern pte_t *fixmap_page_table;  extern void *kmap_high(struct page *page); -extern void *kmap_high_get(struct page *page);  extern void kunmap_high(struct page *page); -extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); -extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); +/* + * The reason for kmap_high_get() is to ensure that the currently kmap'd + * page usage count does not decrease to zero while we're using its + * existing virtual mapping in an atomic context.  With a VIVT cache this + * is essential to do, but with a VIPT cache this is only an optimization + * so not to pay the price of establishing a second mapping if an existing + * one can be used.  However, on platforms without hardware TLB maintenance + * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since + * the locking involved must also disable IRQs which is incompatible with + * the IPI mechanism used by global TLB operations. + */ +#define ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) +#undef ARCH_NEEDS_KMAP_HIGH_GET +#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) +#error "The sum of features in your kernel config cannot be supported together" +#endif +#endif + +/* + * Needed to be able to broadcast the TLB invalidation for kmap. + */ +#ifdef CONFIG_ARM_ERRATA_798181 +#undef ARCH_NEEDS_KMAP_HIGH_GET +#endif + +#ifdef ARCH_NEEDS_KMAP_HIGH_GET +extern void *kmap_high_get(struct page *page); +#else +static inline void *kmap_high_get(struct page *page) +{ +	return NULL; +} +#endif  /*   * The following functions are already defined by <linux/highmem.h> @@ -35,7 +65,7 @@ extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);  #ifdef CONFIG_HIGHMEM  extern void *kmap(struct page *page);  extern void kunmap(struct page *page); -extern void *__kmap_atomic(struct page *page); +extern void *kmap_atomic(struct page *page);  extern void __kunmap_atomic(void *kvaddr);  extern void *kmap_atomic_pfn(unsigned long pfn);  extern struct page *kmap_atomic_to_page(const void *ptr); diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h new file mode 100644 index 00000000000..d4014fbe5ea --- /dev/null +++ b/arch/arm/include/asm/hugetlb-3level.h @@ -0,0 +1,71 @@ +/* + * arch/arm/include/asm/hugetlb-3level.h + * + * Copyright (C) 2012 ARM Ltd. + * + * Based on arch/x86/include/asm/hugetlb.h. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_ARM_HUGETLB_3LEVEL_H +#define _ASM_ARM_HUGETLB_3LEVEL_H + + +/* + * If our huge pte is non-zero then mark the valid bit. + * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero + * ptes. + * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes). + */ +static inline pte_t huge_ptep_get(pte_t *ptep) +{ +	pte_t retval = *ptep; +	if (pte_val(retval)) +		pte_val(retval) |= L_PTE_VALID; +	return retval; +} + +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, +				   pte_t *ptep, pte_t pte) +{ +	set_pte_at(mm, addr, ptep, pte); +} + +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, +					 unsigned long addr, pte_t *ptep) +{ +	ptep_clear_flush(vma, addr, ptep); +} + +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, +					   unsigned long addr, pte_t *ptep) +{ +	ptep_set_wrprotect(mm, addr, ptep); +} + +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, +					    unsigned long addr, pte_t *ptep) +{ +	return ptep_get_and_clear(mm, addr, ptep); +} + +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, +					     unsigned long addr, pte_t *ptep, +					     pte_t pte, int dirty) +{ +	return ptep_set_access_flags(vma, addr, ptep, pte, dirty); +} + +#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */ diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h new file mode 100644 index 00000000000..1f1b1cd112f --- /dev/null +++ b/arch/arm/include/asm/hugetlb.h @@ -0,0 +1,84 @@ +/* + * arch/arm/include/asm/hugetlb.h + * + * Copyright (C) 2012 ARM Ltd. + * + * Based on arch/x86/include/asm/hugetlb.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_ARM_HUGETLB_H +#define _ASM_ARM_HUGETLB_H + +#include <asm/page.h> +#include <asm-generic/hugetlb.h> + +#include <asm/hugetlb-3level.h> + +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, +					  unsigned long addr, unsigned long end, +					  unsigned long floor, +					  unsigned long ceiling) +{ +	free_pgd_range(tlb, addr, end, floor, ceiling); +} + + +static inline int is_hugepage_only_range(struct mm_struct *mm, +					 unsigned long addr, unsigned long len) +{ +	return 0; +} + +static inline int prepare_hugepage_range(struct file *file, +					 unsigned long addr, unsigned long len) +{ +	struct hstate *h = hstate_file(file); +	if (len & ~huge_page_mask(h)) +		return -EINVAL; +	if (addr & ~huge_page_mask(h)) +		return -EINVAL; +	return 0; +} + +static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) +{ +} + +static inline int huge_pte_none(pte_t pte) +{ +	return pte_none(pte); +} + +static inline pte_t huge_pte_wrprotect(pte_t pte) +{ +	return pte_wrprotect(pte); +} + +static inline int arch_prepare_hugepage(struct page *page) +{ +	return 0; +} + +static inline void arch_release_hugepage(struct page *page) +{ +} + +static inline void arch_clear_hugepage_flags(struct page *page) +{ +	clear_bit(PG_dcache_clean, &page->flags); +} + +#endif /* _ASM_ARM_HUGETLB_H */ diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index 4d8ae9d67ab..8e427c7b442 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -20,8 +20,8 @@ struct arch_hw_breakpoint_ctrl {  struct arch_hw_breakpoint {  	u32	address;  	u32	trigger; -	struct perf_event *suspended_wp; -	struct arch_hw_breakpoint_ctrl ctrl; +	struct	arch_hw_breakpoint_ctrl step_ctrl; +	struct	arch_hw_breakpoint_ctrl ctrl;  };  static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl) @@ -50,6 +50,8 @@ static inline void decode_ctrl_reg(u32 reg,  #define ARM_DEBUG_ARCH_V6_1	2  #define ARM_DEBUG_ARCH_V7_ECP14	3  #define ARM_DEBUG_ARCH_V7_MM	4 +#define ARM_DEBUG_ARCH_V7_1	5 +#define ARM_DEBUG_ARCH_V8	6  /* Breakpoint */  #define ARM_BREAKPOINT_EXECUTE	0 @@ -57,6 +59,7 @@ static inline void decode_ctrl_reg(u32 reg,  /* Watchpoints */  #define ARM_BREAKPOINT_LOAD	1  #define ARM_BREAKPOINT_STORE	2 +#define ARM_FSR_ACCESS_MASK	(1 << 11)  /* Privilege Levels */  #define ARM_BREAKPOINT_PRIV	1 @@ -83,6 +86,9 @@ static inline void decode_ctrl_reg(u32 reg,  #define ARM_DSCR_HDBGEN		(1 << 14)  #define ARM_DSCR_MDBGEN		(1 << 15) +/* OSLSR os lock model bits */ +#define ARM_OSLSR_OSLM0		(1 << 0) +  /* opcode2 numbers for the co-processor instructions. */  #define ARM_OP2_BVR		4  #define ARM_OP2_BCR		5 @@ -96,12 +102,12 @@ static inline void decode_ctrl_reg(u32 reg,  #define ARM_BASE_WCR		112  /* Accessor macros for the debug registers. */ -#define ARM_DBG_READ(M, OP2, VAL) do {\ -	asm volatile("mrc p14, 0, %0, c0," #M ", " #OP2 : "=r" (VAL));\ +#define ARM_DBG_READ(N, M, OP2, VAL) do {\ +	asm volatile("mrc p14, 0, %0, " #N "," #M ", " #OP2 : "=r" (VAL));\  } while (0) -#define ARM_DBG_WRITE(M, OP2, VAL) do {\ -	asm volatile("mcr p14, 0, %0, c0," #M ", " #OP2 : : "r" (VAL));\ +#define ARM_DBG_WRITE(N, M, OP2, VAL) do {\ +	asm volatile("mcr p14, 0, %0, " #N "," #M ", " #OP2 : : "r" (VAL));\  } while (0)  struct notifier_block; diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h index 5586b7c8ef6..a71b417b185 100644 --- a/arch/arm/include/asm/hw_irq.h +++ b/arch/arm/include/asm/hw_irq.h @@ -10,14 +10,6 @@ static inline void ack_bad_irq(int irq)  	irq_err_count++;  } -/* - * Obsolete inline function for calling irq descriptor handlers. - */ -static inline void desc_handle_irq(unsigned int irq, struct irq_desc *desc) -{ -	desc->handle_irq(irq, desc); -} -  void set_irq_flags(unsigned int irq, unsigned int flags);  #define IRQF_VALID	(1 << 0) diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index c1062c31710..6e183fd269f 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -1,33 +1,15 @@  #ifndef __ASMARM_HWCAP_H  #define __ASMARM_HWCAP_H -/* - * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP - */ -#define HWCAP_SWP	1 -#define HWCAP_HALF	2 -#define HWCAP_THUMB	4 -#define HWCAP_26BIT	8	/* Play it safe */ -#define HWCAP_FAST_MULT	16 -#define HWCAP_FPA	32 -#define HWCAP_VFP	64 -#define HWCAP_EDSP	128 -#define HWCAP_JAVA	256 -#define HWCAP_IWMMXT	512 -#define HWCAP_CRUNCH	1024 -#define HWCAP_THUMBEE	2048 -#define HWCAP_NEON	4096 -#define HWCAP_VFPv3	8192 -#define HWCAP_VFPv3D16	16384 -#define HWCAP_TLS	32768 +#include <uapi/asm/hwcap.h> -#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#if !defined(__ASSEMBLY__)  /*   * This yields a mask that user programs can use to figure out what   * instruction set this cpu supports.   */  #define ELF_HWCAP	(elf_hwcap) -extern unsigned int elf_hwcap; +#define ELF_HWCAP2	(elf_hwcap2) +extern unsigned int elf_hwcap, elf_hwcap2;  #endif -  #endif diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h new file mode 100644 index 00000000000..b90d9e523d6 --- /dev/null +++ b/arch/arm/include/asm/hypervisor.h @@ -0,0 +1,6 @@ +#ifndef _ASM_ARM_HYPERVISOR_H +#define _ASM_ARM_HYPERVISOR_H + +#include <asm/xen/hypervisor.h> + +#endif diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h new file mode 100644 index 00000000000..bf863edb517 --- /dev/null +++ b/arch/arm/include/asm/idmap.h @@ -0,0 +1,14 @@ +#ifndef __ASM_IDMAP_H +#define __ASM_IDMAP_H + +#include <linux/compiler.h> +#include <asm/pgtable.h> + +/* Tag a function as requiring to be executed via an identity mapping. */ +#define __idmap __section(.idmap.text) noinline notrace + +extern pgd_t *idmap_pgd; + +void setup_mm_for_reboot(void); + +#endif	/* __ASM_IDMAP_H */ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 815efa2d4e0..3d23418cbdd 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -24,9 +24,11 @@  #ifdef __KERNEL__  #include <linux/types.h> +#include <linux/blk_types.h>  #include <asm/byteorder.h>  #include <asm/memory.h> -#include <asm/system.h> +#include <asm-generic/pci_iomap.h> +#include <xen/xen.h>  /*   * ISA I/O bus memory addresses are 1:1 with the physical address. @@ -36,6 +38,12 @@  #define isa_bus_to_virt phys_to_virt  /* + * Atomic MMIO-wide IO modify + */ +extern void atomic_io_modify(void __iomem *reg, u32 mask, u32 set); +extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set); + +/*   * Generic IO read/write.  These perform native-endian accesses.  Note   * that some architectures will want to re-define __raw_{read,write}w.   */ @@ -47,13 +55,68 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);  extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);  extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); -#define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a) = (v)) -#define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)) -#define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile unsigned int __force   *)(a) = (v)) +#if __LINUX_ARM_ARCH__ < 6 +/* + * Half-word accesses are problematic with RiscPC due to limitations of + * the bus. Rather than special-case the machine, just let the compiler + * generate the access for CPUs prior to ARMv6. + */ +#define __raw_readw(a)         (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) +#define __raw_writew(v,a)      ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))) +#else +/* + * When running under a hypervisor, we want to avoid I/O accesses with + * writeback addressing modes as these incur a significant performance + * overhead (the address generation must be emulated in software). + */ +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ +	asm volatile("strh %1, %0" +		     : "+Q" (*(volatile u16 __force *)addr) +		     : "r" (val)); +} + +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ +	u16 val; +	asm volatile("ldrh %1, %0" +		     : "+Q" (*(volatile u16 __force *)addr), +		       "=r" (val)); +	return val; +} +#endif + +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ +	asm volatile("strb %1, %0" +		     : "+Qo" (*(volatile u8 __force *)addr) +		     : "r" (val)); +} + +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ +	asm volatile("str %1, %0" +		     : "+Qo" (*(volatile u32 __force *)addr) +		     : "r" (val)); +} + +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ +	u8 val; +	asm volatile("ldrb %1, %0" +		     : "+Qo" (*(volatile u8 __force *)addr), +		       "=r" (val)); +	return val; +} -#define __raw_readb(a)		(__chk_io_ptr(a), *(volatile unsigned char __force  *)(a)) -#define __raw_readw(a)		(__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) -#define __raw_readl(a)		(__chk_io_ptr(a), *(volatile unsigned int __force   *)(a)) +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ +	u32 val; +	asm volatile("ldr %1, %0" +		     : "+Qo" (*(volatile u32 __force *)addr), +		       "=r" (val)); +	return val; +}  /*   * Architecture ioremap implementation. @@ -75,12 +138,18 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);   */  extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,  	size_t, unsigned int, void *); -extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, +extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,  	void *);  extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int); +extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);  extern void __iounmap(volatile void __iomem *addr); +extern void __arm_iounmap(volatile void __iomem *addr); + +extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, +	unsigned int, void *); +extern void (*arch_iounmap)(volatile void __iomem *);  /*   * Bad read/write accesses... @@ -95,10 +164,61 @@ static inline void __iomem *__typesafe_io(unsigned long addr)  	return (void __iomem *)addr;  } +#define IOMEM(x)	((void __force __iomem *)(x)) + +/* IO barriers */ +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#include <asm/barrier.h> +#define __iormb()		rmb() +#define __iowmb()		wmb() +#else +#define __iormb()		do { } while (0) +#define __iowmb()		do { } while (0) +#endif + +/* PCI fixed i/o mapping */ +#define PCI_IO_VIRT_BASE	0xfee00000 + +#if defined(CONFIG_PCI) +void pci_ioremap_set_mem_type(int mem_type); +#else +static inline void pci_ioremap_set_mem_type(int mem_type) {} +#endif + +extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); +  /*   * Now, pick up the machine-defined IO definitions   */ +#ifdef CONFIG_NEED_MACH_IO_H  #include <mach/io.h> +#elif defined(CONFIG_PCI) +#define IO_SPACE_LIMIT	((resource_size_t)0xfffff) +#define __io(a)		__typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT)) +#else +#define __io(a)		__typesafe_io((a) & IO_SPACE_LIMIT) +#endif + +/* + * This is the limit of PC card/PCI/ISA IO space, which is by default + * 64K if we have PC card, PCI or ISA support.  Otherwise, default to + * zero to prevent ISA/PCI drivers claiming IO space (and potentially + * oopsing.) + * + * Only set this larger if you really need inb() et.al. to operate over + * a larger address space.  Note that SOC_COMMON ioremaps each sockets + * IO space area, and so inb() et.al. must be defined to operate as per + * readb() et.al. on such platforms. + */ +#ifndef IO_SPACE_LIMIT +#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE) +#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff) +#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD) +#define IO_SPACE_LIMIT ((resource_size_t)0xffff) +#else +#define IO_SPACE_LIMIT ((resource_size_t)0) +#endif +#endif  /*   *  IO port access primitives @@ -125,17 +245,17 @@ static inline void __iomem *__typesafe_io(unsigned long addr)   * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.   */  #ifdef __io -#define outb(v,p)		__raw_writeb(v,__io(p)) -#define outw(v,p)		__raw_writew((__force __u16) \ -					cpu_to_le16(v),__io(p)) -#define outl(v,p)		__raw_writel((__force __u32) \ -					cpu_to_le32(v),__io(p)) +#define outb(v,p)	({ __iowmb(); __raw_writeb(v,__io(p)); }) +#define outw(v,p)	({ __iowmb(); __raw_writew((__force __u16) \ +					cpu_to_le16(v),__io(p)); }) +#define outl(v,p)	({ __iowmb(); __raw_writel((__force __u32) \ +					cpu_to_le32(v),__io(p)); }) -#define inb(p)	({ __u8 __v = __raw_readb(__io(p)); __v; }) +#define inb(p)	({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })  #define inw(p)	({ __u16 __v = le16_to_cpu((__force __le16) \ -			__raw_readw(__io(p))); __v; }) +			__raw_readw(__io(p))); __iormb(); __v; })  #define inl(p)	({ __u32 __v = le32_to_cpu((__force __le32) \ -			__raw_readl(__io(p))); __v; }) +			__raw_readl(__io(p))); __iormb(); __v; })  #define outsb(p,d,l)		__raw_writesb(__io(p),d,l)  #define outsw(p,d,l)		__raw_writesw(__io(p),d,l) @@ -179,26 +299,16 @@ extern void _memset_io(volatile void __iomem *, int, size_t);   * Again, this are defined to perform little endian accesses.  See the   * IO port primitives for more information.   */ -#ifdef __mem_pci -#define readb_relaxed(c) ({ u8  __v = __raw_readb(__mem_pci(c)); __v; }) -#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \ -					__raw_readw(__mem_pci(c))); __v; }) -#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \ -					__raw_readl(__mem_pci(c))); __v; }) - -#define writeb_relaxed(v,c)	((void)__raw_writeb(v,__mem_pci(c))) -#define writew_relaxed(v,c)	((void)__raw_writew((__force u16) \ -					cpu_to_le16(v),__mem_pci(c))) -#define writel_relaxed(v,c)	((void)__raw_writel((__force u32) \ -					cpu_to_le32(v),__mem_pci(c))) +#ifndef readl +#define readb_relaxed(c) ({ u8  __r = __raw_readb(c); __r; }) +#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ +					__raw_readw(c)); __r; }) +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ +					__raw_readl(c)); __r; }) -#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE -#define __iormb()		rmb() -#define __iowmb()		wmb() -#else -#define __iormb()		do { } while (0) -#define __iowmb()		do { } while (0) -#endif +#define writeb_relaxed(v,c)	__raw_writeb(v,c) +#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c) +#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)  #define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })  #define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; }) @@ -208,51 +318,32 @@ extern void _memset_io(volatile void __iomem *, int, size_t);  #define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })  #define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); }) -#define readsb(p,d,l)		__raw_readsb(__mem_pci(p),d,l) -#define readsw(p,d,l)		__raw_readsw(__mem_pci(p),d,l) -#define readsl(p,d,l)		__raw_readsl(__mem_pci(p),d,l) +#define readsb(p,d,l)		__raw_readsb(p,d,l) +#define readsw(p,d,l)		__raw_readsw(p,d,l) +#define readsl(p,d,l)		__raw_readsl(p,d,l) -#define writesb(p,d,l)		__raw_writesb(__mem_pci(p),d,l) -#define writesw(p,d,l)		__raw_writesw(__mem_pci(p),d,l) -#define writesl(p,d,l)		__raw_writesl(__mem_pci(p),d,l) +#define writesb(p,d,l)		__raw_writesb(p,d,l) +#define writesw(p,d,l)		__raw_writesw(p,d,l) +#define writesl(p,d,l)		__raw_writesl(p,d,l) -#define memset_io(c,v,l)	_memset_io(__mem_pci(c),(v),(l)) -#define memcpy_fromio(a,c,l)	_memcpy_fromio((a),__mem_pci(c),(l)) -#define memcpy_toio(c,a,l)	_memcpy_toio(__mem_pci(c),(a),(l)) +#define memset_io(c,v,l)	_memset_io(c,(v),(l)) +#define memcpy_fromio(a,c,l)	_memcpy_fromio((a),c,(l)) +#define memcpy_toio(c,a,l)	_memcpy_toio(c,(a),(l)) -#elif !defined(readb) - -#define readb(c)			(__readwrite_bug("readb"),0) -#define readw(c)			(__readwrite_bug("readw"),0) -#define readl(c)			(__readwrite_bug("readl"),0) -#define writeb(v,c)			__readwrite_bug("writeb") -#define writew(v,c)			__readwrite_bug("writew") -#define writel(v,c)			__readwrite_bug("writel") - -#define check_signature(io,sig,len)	(0) - -#endif	/* __mem_pci */ +#endif	/* readl */  /*   * ioremap and friends.   *   * ioremap takes a PCI memory address, as specified in - * Documentation/IO-mapping.txt. + * Documentation/io-mapping.txt.   *   */ -#ifndef __arch_ioremap -#define ioremap(cookie,size)		__arm_ioremap(cookie, size, MT_DEVICE) -#define ioremap_nocache(cookie,size)	__arm_ioremap(cookie, size, MT_DEVICE) -#define ioremap_cached(cookie,size)	__arm_ioremap(cookie, size, MT_DEVICE_CACHED) -#define ioremap_wc(cookie,size)		__arm_ioremap(cookie, size, MT_DEVICE_WC) -#define iounmap(cookie)			__iounmap(cookie) -#else -#define ioremap(cookie,size)		__arch_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_nocache(cookie,size)	__arch_ioremap((cookie), (size), MT_DEVICE) -#define ioremap_cached(cookie,size)	__arch_ioremap((cookie), (size), MT_DEVICE_CACHED) -#define ioremap_wc(cookie,size)		__arch_ioremap((cookie), (size), MT_DEVICE_WC) -#define iounmap(cookie)			__arch_iounmap(cookie) -#endif +#define ioremap(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE) +#define ioremap_nocache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE) +#define ioremap_cache(cookie,size)	__arm_ioremap((cookie), (size), MT_DEVICE_CACHED) +#define ioremap_wc(cookie,size)		__arm_ioremap((cookie), (size), MT_DEVICE_WC) +#define iounmap				__arm_iounmap  /*   * io{read,write}{8,16,32} macros @@ -262,9 +353,15 @@ extern void _memset_io(volatile void __iomem *, int, size_t);  #define ioread16(p)	({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })  #define ioread32(p)	({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; }) -#define iowrite8(v,p)	({ __iowmb(); (void)__raw_writeb(v, p); }) -#define iowrite16(v,p)	({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); }) -#define iowrite32(v,p)	({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); }) +#define ioread16be(p)	({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32be(p)	({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) + +#define iowrite8(v,p)	({ __iowmb(); __raw_writeb(v, p); }) +#define iowrite16(v,p)	({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); }) +#define iowrite32(v,p)	({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); }) + +#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) +#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })  #define ioread8_rep(p,d,c)	__raw_readsb(p,d,c)  #define ioread16_rep(p,d,c)	__raw_readsw(p,d,c) @@ -280,7 +377,6 @@ extern void ioport_unmap(void __iomem *addr);  struct pci_dev; -extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);  extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);  /* @@ -290,9 +386,16 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);  #define BIOVEC_MERGEABLE(vec1, vec2)	\  	((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) +struct bio_vec; +extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, +				      const struct bio_vec *vec2); +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)				\ +	(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&				\ +	 (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) +  #ifdef CONFIG_MMU  #define ARCH_HAS_VALID_PHYS_ADDR_RANGE -extern int valid_phys_addr_range(unsigned long addr, size_t size); +extern int valid_phys_addr_range(phys_addr_t addr, size_t size);  extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);  extern int devmem_is_allowed(unsigned long pfn);  #endif diff --git a/arch/arm/include/asm/ioctl.h b/arch/arm/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe..00000000000 --- a/arch/arm/include/asm/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ioctl.h> diff --git a/arch/arm/include/asm/ioctls.h b/arch/arm/include/asm/ioctls.h deleted file mode 100644 index 9c962981612..00000000000 --- a/arch/arm/include/asm/ioctls.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_ARM_IOCTLS_H -#define __ASM_ARM_IOCTLS_H - -#define FIOQSIZE	0x545E - -#include <asm-generic/ioctls.h> - -#endif diff --git a/arch/arm/include/asm/ipcbuf.h b/arch/arm/include/asm/ipcbuf.h deleted file mode 100644 index 97683975f7d..00000000000 --- a/arch/arm/include/asm/ipcbuf.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __ASMARM_IPCBUF_H -#define __ASMARM_IPCBUF_H - -/* - * The ipc64_perm structure for arm architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 32-bit mode_t and seq - * - 2 miscellaneous 32-bit values - */ - -struct ipc64_perm -{ -	__kernel_key_t		key; -	__kernel_uid32_t	uid; -	__kernel_gid32_t	gid; -	__kernel_uid32_t	cuid; -	__kernel_gid32_t	cgid; -	__kernel_mode_t		mode; -	unsigned short		__pad1; -	unsigned short		seq; -	unsigned short		__pad2; -	unsigned long		__unused1; -	unsigned long		__unused2; -}; - -#endif /* __ASMARM_IPCBUF_H */ diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 2721a5814cb..53c15dec7af 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -1,14 +1,18 @@  #ifndef __ASM_ARM_IRQ_H  #define __ASM_ARM_IRQ_H +#define NR_IRQS_LEGACY	16 + +#ifndef CONFIG_SPARSE_IRQ  #include <mach/irqs.h> +#else +#define NR_IRQS NR_IRQS_LEGACY +#endif  #ifndef irq_canonicalize  #define irq_canonicalize(i)	(i)  #endif -#define NR_IRQS_LEGACY	16 -  /*   * Use this value to indicate lack of interrupt   * capability @@ -23,8 +27,14 @@ struct pt_regs;  extern void migrate_irqs(void);  extern void asm_do_IRQ(unsigned int, struct pt_regs *); +void handle_IRQ(unsigned int, struct pt_regs *);  void init_IRQ(void); +#ifdef CONFIG_MULTI_IRQ_HANDLER +extern void (*handle_arch_irq)(struct pt_regs *); +extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); +#endif +  #endif  #endif diff --git a/arch/arm/include/asm/irq_regs.h b/arch/arm/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b7027..00000000000 --- a/arch/arm/include/asm/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/irq_regs.h> diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h index 1e6cca55c75..3b763d6652a 100644 --- a/arch/arm/include/asm/irqflags.h +++ b/arch/arm/include/asm/irqflags.h @@ -8,6 +8,16 @@  /*   * CPU interrupt mask handling.   */ +#ifdef CONFIG_CPU_V7M +#define IRQMASK_REG_NAME_R "primask" +#define IRQMASK_REG_NAME_W "primask" +#define IRQMASK_I_BIT	1 +#else +#define IRQMASK_REG_NAME_R "cpsr" +#define IRQMASK_REG_NAME_W "cpsr_c" +#define IRQMASK_I_BIT	PSR_I_BIT +#endif +  #if __LINUX_ARM_ARCH__ >= 6  static inline unsigned long arch_local_irq_save(void) @@ -15,7 +25,7 @@ static inline unsigned long arch_local_irq_save(void)  	unsigned long flags;  	asm volatile( -		"	mrs	%0, cpsr	@ arch_local_irq_save\n" +		"	mrs	%0, " IRQMASK_REG_NAME_R "	@ arch_local_irq_save\n"  		"	cpsid	i"  		: "=r" (flags) : : "memory", "cc");  	return flags; @@ -129,7 +139,7 @@ static inline unsigned long arch_local_save_flags(void)  {  	unsigned long flags;  	asm volatile( -		"	mrs	%0, cpsr	@ local_save_flags" +		"	mrs	%0, " IRQMASK_REG_NAME_R "	@ local_save_flags"  		: "=r" (flags) : : "memory", "cc");  	return flags;  } @@ -140,7 +150,7 @@ static inline unsigned long arch_local_save_flags(void)  static inline void arch_local_irq_restore(unsigned long flags)  {  	asm volatile( -		"	msr	cpsr_c, %0	@ local_irq_restore" +		"	msr	" IRQMASK_REG_NAME_W ", %0	@ local_irq_restore"  		:  		: "r" (flags)  		: "memory", "cc"); @@ -148,8 +158,8 @@ static inline void arch_local_irq_restore(unsigned long flags)  static inline int arch_irqs_disabled_flags(unsigned long flags)  { -	return flags & PSR_I_BIT; +	return flags & IRQMASK_I_BIT;  } -#endif -#endif +#endif /* ifdef __KERNEL__ */ +#endif /* ifndef __ASM_ARM_IRQFLAGS_H */ diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h new file mode 100644 index 00000000000..70f9b9bfb1f --- /dev/null +++ b/arch/arm/include/asm/jump_label.h @@ -0,0 +1,40 @@ +#ifndef _ASM_ARM_JUMP_LABEL_H +#define _ASM_ARM_JUMP_LABEL_H + +#ifdef __KERNEL__ + +#include <linux/types.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +#ifdef CONFIG_THUMB2_KERNEL +#define JUMP_LABEL_NOP	"nop.w" +#else +#define JUMP_LABEL_NOP	"nop" +#endif + +static __always_inline bool arch_static_branch(struct static_key *key) +{ +	asm_volatile_goto("1:\n\t" +		 JUMP_LABEL_NOP "\n\t" +		 ".pushsection __jump_table,  \"aw\"\n\t" +		 ".word 1b, %l[l_yes], %c0\n\t" +		 ".popsection\n\t" +		 : :  "i" (key) :  : l_yes); + +	return false; +l_yes: +	return true; +} + +#endif /* __KERNEL__ */ + +typedef u32 jump_label_t; + +struct jump_entry { +	jump_label_t code; +	jump_label_t target; +	jump_label_t key; +}; + +#endif diff --git a/arch/arm/include/asm/kdebug.h b/arch/arm/include/asm/kdebug.h deleted file mode 100644 index 6ece1b03766..00000000000 --- a/arch/arm/include/asm/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/kdebug.h> diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h index 8ec9ef5c3c7..c2b9b4bdec0 100644 --- a/arch/arm/include/asm/kexec.h +++ b/arch/arm/include/asm/kexec.h @@ -33,13 +33,26 @@ static inline void crash_setup_regs(struct pt_regs *newregs,  	if (oldregs) {  		memcpy(newregs, oldregs, sizeof(*newregs));  	} else { -		__asm__ __volatile__ ("stmia %0, {r0 - r15}" -				      : : "r" (&newregs->ARM_r0)); -		__asm__ __volatile__ ("mrs %0, cpsr" -				      : "=r" (newregs->ARM_cpsr)); +		__asm__ __volatile__ ( +			"stmia	%[regs_base], {r0-r12}\n\t" +			"mov	%[_ARM_sp], sp\n\t" +			"str	lr, %[_ARM_lr]\n\t" +			"adr	%[_ARM_pc], 1f\n\t" +			"mrs	%[_ARM_cpsr], cpsr\n\t" +		"1:" +			: [_ARM_pc] "=r" (newregs->ARM_pc), +			  [_ARM_cpsr] "=r" (newregs->ARM_cpsr), +			  [_ARM_sp] "=r" (newregs->ARM_sp), +			  [_ARM_lr] "=o" (newregs->ARM_lr) +			: [regs_base] "r" (&newregs->ARM_r0) +			: "memory" +		);  	}  } +/* Function pointer to optional machine-specific reinitialization */ +extern void (*kexec_reinit)(void); +  #endif /* __ASSEMBLY__ */  #endif /* CONFIG_KEXEC */ diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h index 48066ce9ea3..0a9d5dd9329 100644 --- a/arch/arm/include/asm/kgdb.h +++ b/arch/arm/include/asm/kgdb.h @@ -11,6 +11,7 @@  #define __ARM_KGDB_H__  #include <linux/ptrace.h> +#include <asm/opcodes.h>  /*   * GDB assumes that we're a user process being debugged, so @@ -41,7 +42,7 @@  static inline void arch_kgdb_breakpoint(void)  { -	asm(".word 0xe7ffdeff"); +	asm(__inst_arm(0xe7ffdeff));  }  extern void kgdb_handle_bus_error(void); diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index e51b1e81df0..83eb2f77291 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h @@ -4,30 +4,6 @@  /*   * This is the "bare minimum".  AIO seems to require this.   */ -enum km_type { -	KM_BOUNCE_READ, -	KM_SKB_SUNRPC_DATA, -	KM_SKB_DATA_SOFTIRQ, -	KM_USER0, -	KM_USER1, -	KM_BIO_SRC_IRQ, -	KM_BIO_DST_IRQ, -	KM_PTE0, -	KM_PTE1, -	KM_IRQ0, -	KM_IRQ1, -	KM_SOFTIRQ0, -	KM_SOFTIRQ1, -	KM_L1_CACHE, -	KM_L2_CACHE, -	KM_KDB, -	KM_TYPE_NR -}; - -#ifdef CONFIG_DEBUG_HIGHMEM -#define KM_NMI		(-1) -#define KM_NMI_PTE	(-1) -#define KM_IRQ_PTE	(-1) -#endif +#define KM_TYPE_NR 16  #endif diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index bb8a19bd582..49fa0dfaad3 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -18,32 +18,20 @@  #include <linux/types.h>  #include <linux/ptrace.h> -#include <linux/percpu.h> +#include <linux/notifier.h>  #define __ARCH_WANT_KPROBES_INSN_SLOT  #define MAX_INSN_SIZE			2  #define MAX_STACK_SIZE			64	/* 32 would probably be OK */ -/* - * This undefined instruction must be unique and - * reserved solely for kprobes' use. - */ -#define KPROBE_BREAKPOINT_INSTRUCTION	0xe7f001f8 - -#define regs_return_value(regs)		((regs)->ARM_r0)  #define flush_insn_slot(p)		do { } while (0)  #define kretprobe_blacklist_size	0  typedef u32 kprobe_opcode_t; -  struct kprobe; -typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); +#include <asm/probes.h> -/* Architecture specific copy of original instruction. */ -struct arch_specific_insn { -	kprobe_opcode_t		*insn; -	kprobe_insn_handler_t	*insn_handler; -}; +#define	arch_specific_insn	arch_probes_insn  struct prev_kprobe {  	struct kprobe *kp; @@ -59,20 +47,9 @@ struct kprobe_ctlblk {  };  void arch_remove_kprobe(struct kprobe *); -void kretprobe_trampoline(void); -  int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);  int kprobe_exceptions_notify(struct notifier_block *self,  			     unsigned long val, void *data); -enum kprobe_insn { -	INSN_REJECTED, -	INSN_GOOD, -	INSN_GOOD_NO_SLOT -}; - -enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t, -					struct arch_specific_insn *); -void __init arm_kprobe_decode_init(void);  #endif /* _ARM_KPROBES_H */ diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h new file mode 100644 index 00000000000..816db0bf2dd --- /dev/null +++ b/arch/arm/include/asm/kvm_arm.h @@ -0,0 +1,220 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_ARM_H__ +#define __ARM_KVM_ARM_H__ + +#include <linux/types.h> + +/* Hyp Configuration Register (HCR) bits */ +#define HCR_TGE		(1 << 27) +#define HCR_TVM		(1 << 26) +#define HCR_TTLB	(1 << 25) +#define HCR_TPU		(1 << 24) +#define HCR_TPC		(1 << 23) +#define HCR_TSW		(1 << 22) +#define HCR_TAC		(1 << 21) +#define HCR_TIDCP	(1 << 20) +#define HCR_TSC		(1 << 19) +#define HCR_TID3	(1 << 18) +#define HCR_TID2	(1 << 17) +#define HCR_TID1	(1 << 16) +#define HCR_TID0	(1 << 15) +#define HCR_TWE		(1 << 14) +#define HCR_TWI		(1 << 13) +#define HCR_DC		(1 << 12) +#define HCR_BSU		(3 << 10) +#define HCR_BSU_IS	(1 << 10) +#define HCR_FB		(1 << 9) +#define HCR_VA		(1 << 8) +#define HCR_VI		(1 << 7) +#define HCR_VF		(1 << 6) +#define HCR_AMO		(1 << 5) +#define HCR_IMO		(1 << 4) +#define HCR_FMO		(1 << 3) +#define HCR_PTW		(1 << 2) +#define HCR_SWIO	(1 << 1) +#define HCR_VM		1 + +/* + * The bits we set in HCR: + * TAC:		Trap ACTLR + * TSC:		Trap SMC + * TVM:		Trap VM ops (until MMU and caches are on) + * TSW:		Trap cache operations by set/way + * TWI:		Trap WFI + * TWE:		Trap WFE + * TIDCP:	Trap L2CTLR/L2ECTLR + * BSU_IS:	Upgrade barriers to the inner shareable domain + * FB:		Force broadcast of all maintainance operations + * AMO:		Override CPSR.A and enable signaling with VA + * IMO:		Override CPSR.I and enable signaling with VI + * FMO:		Override CPSR.F and enable signaling with VF + * SWIO:	Turn set/way invalidates into set/way clean+invalidate + */ +#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ +			HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ +			HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP) + +/* System Control Register (SCTLR) bits */ +#define SCTLR_TE	(1 << 30) +#define SCTLR_EE	(1 << 25) +#define SCTLR_V		(1 << 13) + +/* Hyp System Control Register (HSCTLR) bits */ +#define HSCTLR_TE	(1 << 30) +#define HSCTLR_EE	(1 << 25) +#define HSCTLR_FI	(1 << 21) +#define HSCTLR_WXN	(1 << 19) +#define HSCTLR_I	(1 << 12) +#define HSCTLR_C	(1 << 2) +#define HSCTLR_A	(1 << 1) +#define HSCTLR_M	1 +#define HSCTLR_MASK	(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \ +			 HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE) + +/* TTBCR and HTCR Registers bits */ +#define TTBCR_EAE	(1 << 31) +#define TTBCR_IMP	(1 << 30) +#define TTBCR_SH1	(3 << 28) +#define TTBCR_ORGN1	(3 << 26) +#define TTBCR_IRGN1	(3 << 24) +#define TTBCR_EPD1	(1 << 23) +#define TTBCR_A1	(1 << 22) +#define TTBCR_T1SZ	(7 << 16) +#define TTBCR_SH0	(3 << 12) +#define TTBCR_ORGN0	(3 << 10) +#define TTBCR_IRGN0	(3 << 8) +#define TTBCR_EPD0	(1 << 7) +#define TTBCR_T0SZ	(7 << 0) +#define HTCR_MASK	(TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0) + +/* Hyp System Trap Register */ +#define HSTR_T(x)	(1 << x) +#define HSTR_TTEE	(1 << 16) +#define HSTR_TJDBX	(1 << 17) + +/* Hyp Coprocessor Trap Register */ +#define HCPTR_TCP(x)	(1 << x) +#define HCPTR_TCP_MASK	(0x3fff) +#define HCPTR_TASE	(1 << 15) +#define HCPTR_TTA	(1 << 20) +#define HCPTR_TCPAC	(1 << 31) + +/* Hyp Debug Configuration Register bits */ +#define HDCR_TDRA	(1 << 11) +#define HDCR_TDOSA	(1 << 10) +#define HDCR_TDA	(1 << 9) +#define HDCR_TDE	(1 << 8) +#define HDCR_HPME	(1 << 7) +#define HDCR_TPM	(1 << 6) +#define HDCR_TPMCR	(1 << 5) +#define HDCR_HPMN_MASK	(0x1F) + +/* + * The architecture supports 40-bit IPA as input to the 2nd stage translations + * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address + * space. + */ +#define KVM_PHYS_SHIFT	(40) +#define KVM_PHYS_SIZE	(1ULL << KVM_PHYS_SHIFT) +#define KVM_PHYS_MASK	(KVM_PHYS_SIZE - 1ULL) +#define PTRS_PER_S2_PGD	(1ULL << (KVM_PHYS_SHIFT - 30)) +#define S2_PGD_ORDER	get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) + +/* Virtualization Translation Control Register (VTCR) bits */ +#define VTCR_SH0	(3 << 12) +#define VTCR_ORGN0	(3 << 10) +#define VTCR_IRGN0	(3 << 8) +#define VTCR_SL0	(3 << 6) +#define VTCR_S		(1 << 4) +#define VTCR_T0SZ	(0xf) +#define VTCR_MASK	(VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \ +			 VTCR_S | VTCR_T0SZ) +#define VTCR_HTCR_SH	(VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0) +#define VTCR_SL_L2	(0 << 6)	/* Starting-level: 2 */ +#define VTCR_SL_L1	(1 << 6)	/* Starting-level: 1 */ +#define KVM_VTCR_SL0	VTCR_SL_L1 +/* stage-2 input address range defined as 2^(32-T0SZ) */ +#define KVM_T0SZ	(32 - KVM_PHYS_SHIFT) +#define KVM_VTCR_T0SZ	(KVM_T0SZ & VTCR_T0SZ) +#define KVM_VTCR_S	((KVM_VTCR_T0SZ << 1) & VTCR_S) + +/* Virtualization Translation Table Base Register (VTTBR) bits */ +#if KVM_VTCR_SL0 == VTCR_SL_L2	/* see ARM DDI 0406C: B4-1720 */ +#define VTTBR_X		(14 - KVM_T0SZ) +#else +#define VTTBR_X		(5 - KVM_T0SZ) +#endif +#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) +#define VTTBR_BADDR_MASK  (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_VMID_SHIFT  (48LLU) +#define VTTBR_VMID_MASK	  (0xffLLU << VTTBR_VMID_SHIFT) + +/* Hyp Syndrome Register (HSR) bits */ +#define HSR_EC_SHIFT	(26) +#define HSR_EC		(0x3fU << HSR_EC_SHIFT) +#define HSR_IL		(1U << 25) +#define HSR_ISS		(HSR_IL - 1) +#define HSR_ISV_SHIFT	(24) +#define HSR_ISV		(1U << HSR_ISV_SHIFT) +#define HSR_SRT_SHIFT	(16) +#define HSR_SRT_MASK	(0xf << HSR_SRT_SHIFT) +#define HSR_FSC		(0x3f) +#define HSR_FSC_TYPE	(0x3c) +#define HSR_SSE		(1 << 21) +#define HSR_WNR		(1 << 6) +#define HSR_CV_SHIFT	(24) +#define HSR_CV		(1U << HSR_CV_SHIFT) +#define HSR_COND_SHIFT	(20) +#define HSR_COND	(0xfU << HSR_COND_SHIFT) + +#define FSC_FAULT	(0x04) +#define FSC_PERM	(0x0c) + +/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ +#define HPFAR_MASK	(~0xf) + +#define HSR_EC_UNKNOWN	(0x00) +#define HSR_EC_WFI	(0x01) +#define HSR_EC_CP15_32	(0x03) +#define HSR_EC_CP15_64	(0x04) +#define HSR_EC_CP14_MR	(0x05) +#define HSR_EC_CP14_LS	(0x06) +#define HSR_EC_CP_0_13	(0x07) +#define HSR_EC_CP10_ID	(0x08) +#define HSR_EC_JAZELLE	(0x09) +#define HSR_EC_BXJ	(0x0A) +#define HSR_EC_CP14_64	(0x0C) +#define HSR_EC_SVC_HYP	(0x11) +#define HSR_EC_HVC	(0x12) +#define HSR_EC_SMC	(0x13) +#define HSR_EC_IABT	(0x20) +#define HSR_EC_IABT_HYP	(0x21) +#define HSR_EC_DABT	(0x24) +#define HSR_EC_DABT_HYP	(0x25) + +#define HSR_WFI_IS_WFE		(1U << 0) + +#define HSR_HVC_IMM_MASK	((1UL << 16) - 1) + +#define HSR_DABT_S1PTW		(1U << 7) +#define HSR_DABT_CM		(1U << 8) +#define HSR_DABT_EA		(1U << 9) + +#endif /* __ARM_KVM_ARM_H__ */ diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h new file mode 100644 index 00000000000..53b3c4a50d5 --- /dev/null +++ b/arch/arm/include/asm/kvm_asm.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_ASM_H__ +#define __ARM_KVM_ASM_H__ + +/* 0 is reserved as an invalid value. */ +#define c0_MPIDR	1	/* MultiProcessor ID Register */ +#define c0_CSSELR	2	/* Cache Size Selection Register */ +#define c1_SCTLR	3	/* System Control Register */ +#define c1_ACTLR	4	/* Auxilliary Control Register */ +#define c1_CPACR	5	/* Coprocessor Access Control */ +#define c2_TTBR0	6	/* Translation Table Base Register 0 */ +#define c2_TTBR0_high	7	/* TTBR0 top 32 bits */ +#define c2_TTBR1	8	/* Translation Table Base Register 1 */ +#define c2_TTBR1_high	9	/* TTBR1 top 32 bits */ +#define c2_TTBCR	10	/* Translation Table Base Control R. */ +#define c3_DACR		11	/* Domain Access Control Register */ +#define c5_DFSR		12	/* Data Fault Status Register */ +#define c5_IFSR		13	/* Instruction Fault Status Register */ +#define c5_ADFSR	14	/* Auxilary Data Fault Status R */ +#define c5_AIFSR	15	/* Auxilary Instrunction Fault Status R */ +#define c6_DFAR		16	/* Data Fault Address Register */ +#define c6_IFAR		17	/* Instruction Fault Address Register */ +#define c7_PAR		18	/* Physical Address Register */ +#define c7_PAR_high	19	/* PAR top 32 bits */ +#define c9_L2CTLR	20	/* Cortex A15/A7 L2 Control Register */ +#define c10_PRRR	21	/* Primary Region Remap Register */ +#define c10_NMRR	22	/* Normal Memory Remap Register */ +#define c12_VBAR	23	/* Vector Base Address Register */ +#define c13_CID		24	/* Context ID Register */ +#define c13_TID_URW	25	/* Thread ID, User R/W */ +#define c13_TID_URO	26	/* Thread ID, User R/O */ +#define c13_TID_PRIV	27	/* Thread ID, Privileged */ +#define c14_CNTKCTL	28	/* Timer Control Register (PL1) */ +#define c10_AMAIR0	29	/* Auxilary Memory Attribute Indirection Reg0 */ +#define c10_AMAIR1	30	/* Auxilary Memory Attribute Indirection Reg1 */ +#define NR_CP15_REGS	31	/* Number of regs (incl. invalid) */ + +#define ARM_EXCEPTION_RESET	  0 +#define ARM_EXCEPTION_UNDEFINED   1 +#define ARM_EXCEPTION_SOFTWARE    2 +#define ARM_EXCEPTION_PREF_ABORT  3 +#define ARM_EXCEPTION_DATA_ABORT  4 +#define ARM_EXCEPTION_IRQ	  5 +#define ARM_EXCEPTION_FIQ	  6 +#define ARM_EXCEPTION_HVC	  7 + +#ifndef __ASSEMBLY__ +struct kvm; +struct kvm_vcpu; + +extern char __kvm_hyp_init[]; +extern char __kvm_hyp_init_end[]; + +extern char __kvm_hyp_exit[]; +extern char __kvm_hyp_exit_end[]; + +extern char __kvm_hyp_vector[]; + +extern char __kvm_hyp_code_start[]; +extern char __kvm_hyp_code_end[]; + +extern void __kvm_flush_vm_context(void); +extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); + +extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); +#endif + +#endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h new file mode 100644 index 00000000000..4917c2f7e45 --- /dev/null +++ b/arch/arm/include/asm/kvm_coproc.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2012 Rusty Russell IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_COPROC_H__ +#define __ARM_KVM_COPROC_H__ +#include <linux/kvm_host.h> + +void kvm_reset_coprocs(struct kvm_vcpu *vcpu); + +struct kvm_coproc_target_table { +	unsigned target; +	const struct coproc_reg *table; +	size_t num; +}; +void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); + +int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); + +unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices); +void kvm_coproc_table_init(void); + +struct kvm_one_reg; +int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); +int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); +#endif /* __ARM_KVM_COPROC_H__ */ diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h new file mode 100644 index 00000000000..0fa90c962ac --- /dev/null +++ b/arch/arm/include/asm/kvm_emulate.h @@ -0,0 +1,211 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_EMULATE_H__ +#define __ARM_KVM_EMULATE_H__ + +#include <linux/kvm_host.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_mmio.h> +#include <asm/kvm_arm.h> + +unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); +unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); + +bool kvm_condition_valid(struct kvm_vcpu *vcpu); +void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); +void kvm_inject_undefined(struct kvm_vcpu *vcpu); +void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); + +static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) +{ +	return 1; +} + +static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) +{ +	return &vcpu->arch.regs.usr_regs.ARM_pc; +} + +static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) +{ +	return &vcpu->arch.regs.usr_regs.ARM_cpsr; +} + +static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) +{ +	*vcpu_cpsr(vcpu) |= PSR_T_BIT; +} + +static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) +{ +	unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; +	return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); +} + +static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) +{ +	unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; +	return cpsr_mode > USR_MODE;; +} + +static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.fault.hsr; +} + +static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.fault.hxfar; +} + +static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) +{ +	return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; +} + +static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.fault.hyp_pc; +} + +static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; +} + +static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; +} + +static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; +} + +static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) +{ +	return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; +} + +static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; +} + +static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; +} + +/* Get Access Size from a data abort */ +static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) +{ +	switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { +	case 0: +		return 1; +	case 1: +		return 2; +	case 2: +		return 4; +	default: +		kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); +		return -EFAULT; +	} +} + +/* This one is not specific to Data Abort */ +static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_IL; +} + +static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; +} + +static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; +} + +static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; +} + +static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; +} + +static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) +{ +	return vcpu->arch.cp15[c0_MPIDR]; +} + +static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) +{ +	*vcpu_cpsr(vcpu) |= PSR_E_BIT; +} + +static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) +{ +	return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT); +} + +static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, +						    unsigned long data, +						    unsigned int len) +{ +	if (kvm_vcpu_is_be(vcpu)) { +		switch (len) { +		case 1: +			return data & 0xff; +		case 2: +			return be16_to_cpu(data & 0xffff); +		default: +			return be32_to_cpu(data); +		} +	} + +	return data;		/* Leave LE untouched */ +} + +static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, +						    unsigned long data, +						    unsigned int len) +{ +	if (kvm_vcpu_is_be(vcpu)) { +		switch (len) { +		case 1: +			return data & 0xff; +		case 2: +			return cpu_to_be16(data & 0xffff); +		default: +			return cpu_to_be32(data); +		} +	} + +	return data;		/* Leave LE untouched */ +} + +#endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h new file mode 100644 index 00000000000..193ceaf01bf --- /dev/null +++ b/arch/arm/include/asm/kvm_host.h @@ -0,0 +1,234 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_HOST_H__ +#define __ARM_KVM_HOST_H__ + +#include <asm/kvm.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_mmio.h> +#include <asm/fpstate.h> +#include <kvm/arm_arch_timer.h> + +#if defined(CONFIG_KVM_ARM_MAX_VCPUS) +#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS +#else +#define KVM_MAX_VCPUS 0 +#endif + +#define KVM_USER_MEM_SLOTS 32 +#define KVM_PRIVATE_MEM_SLOTS 4 +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_HAVE_ONE_REG + +#define KVM_VCPU_MAX_FEATURES 2 + +#include <kvm/arm_vgic.h> + +struct kvm_vcpu; +u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); +int kvm_target_cpu(void); +int kvm_reset_vcpu(struct kvm_vcpu *vcpu); +void kvm_reset_coprocs(struct kvm_vcpu *vcpu); + +struct kvm_arch { +	/* VTTBR value associated with below pgd and vmid */ +	u64    vttbr; + +	/* Timer */ +	struct arch_timer_kvm	timer; + +	/* +	 * Anything that is not used directly from assembly code goes +	 * here. +	 */ + +	/* The VMID generation used for the virt. memory system */ +	u64    vmid_gen; +	u32    vmid; + +	/* Stage-2 page table */ +	pgd_t *pgd; + +	/* Interrupt controller */ +	struct vgic_dist	vgic; +}; + +#define KVM_NR_MEM_OBJS     40 + +/* + * We don't want allocation failures within the mmu code, so we preallocate + * enough memory for a single page fault in a cache. + */ +struct kvm_mmu_memory_cache { +	int nobjs; +	void *objects[KVM_NR_MEM_OBJS]; +}; + +struct kvm_vcpu_fault_info { +	u32 hsr;		/* Hyp Syndrome Register */ +	u32 hxfar;		/* Hyp Data/Inst. Fault Address Register */ +	u32 hpfar;		/* Hyp IPA Fault Address Register */ +	u32 hyp_pc;		/* PC when exception was taken from Hyp mode */ +}; + +typedef struct vfp_hard_struct kvm_cpu_context_t; + +struct kvm_vcpu_arch { +	struct kvm_regs regs; + +	int target; /* Processor target */ +	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); + +	/* System control coprocessor (cp15) */ +	u32 cp15[NR_CP15_REGS]; + +	/* The CPU type we expose to the VM */ +	u32 midr; + +	/* HYP trapping configuration */ +	u32 hcr; + +	/* Interrupt related fields */ +	u32 irq_lines;		/* IRQ and FIQ levels */ + +	/* Exception Information */ +	struct kvm_vcpu_fault_info fault; + +	/* Floating point registers (VFP and Advanced SIMD/NEON) */ +	struct vfp_hard_struct vfp_guest; + +	/* Host FP context */ +	kvm_cpu_context_t *host_cpu_context; + +	/* VGIC state */ +	struct vgic_cpu vgic_cpu; +	struct arch_timer_cpu timer_cpu; + +	/* +	 * Anything that is not used directly from assembly code goes +	 * here. +	 */ +	/* dcache set/way operation pending */ +	int last_pcpu; +	cpumask_t require_dcache_flush; + +	/* Don't run the guest on this vcpu */ +	bool pause; + +	/* IO related fields */ +	struct kvm_decode mmio_decode; + +	/* Cache some mmu pages needed inside spinlock regions */ +	struct kvm_mmu_memory_cache mmu_page_cache; + +	/* Detect first run of a vcpu */ +	bool has_run_once; +}; + +struct kvm_vm_stat { +	u32 remote_tlb_flush; +}; + +struct kvm_vcpu_stat { +	u32 halt_wakeup; +}; + +struct kvm_vcpu_init; +int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, +			const struct kvm_vcpu_init *init); +int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); +unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); +struct kvm_one_reg; +int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +u64 kvm_call_hyp(void *hypfn, ...); +void force_vm_exit(const cpumask_t *mask); + +#define KVM_ARCH_WANT_MMU_NOTIFIER +struct kvm; +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); +int kvm_unmap_hva_range(struct kvm *kvm, +			unsigned long start, unsigned long end); +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); + +unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); + +/* We do not have shadow page tables, hence the empty hooks */ +static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ +	return 0; +} + +static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ +	return 0; +} + +struct kvm_vcpu *kvm_arm_get_running_vcpu(void); +struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); + +int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); +unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); +struct kvm_one_reg; +int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); + +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, +		int exception_index); + +static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, +				       phys_addr_t pgd_ptr, +				       unsigned long hyp_stack_ptr, +				       unsigned long vector_ptr) +{ +	/* +	 * Call initialization code, and switch to the full blown HYP +	 * code. The init code doesn't need to preserve these +	 * registers as r0-r3 are already callee saved according to +	 * the AAPCS. +	 * Note that we slightly misuse the prototype by casing the +	 * stack pointer to a void *. +	 * +	 * We don't have enough registers to perform the full init in +	 * one go.  Install the boot PGD first, and then install the +	 * runtime PGD, stack pointer and vectors. The PGDs are always +	 * passed as the third argument, in order to be passed into +	 * r2-r3 to the init code (yes, this is compliant with the +	 * PCS!). +	 */ + +	kvm_call_hyp(NULL, 0, boot_pgd_ptr); + +	kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); +} + +static inline int kvm_arch_dev_ioctl_check_extension(long ext) +{ +	return 0; +} + +int kvm_perf_init(void); +int kvm_perf_teardown(void); + +u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); +int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); + +#endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h new file mode 100644 index 00000000000..adcc0d7d317 --- /dev/null +++ b/arch/arm/include/asm/kvm_mmio.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_MMIO_H__ +#define __ARM_KVM_MMIO_H__ + +#include <linux/kvm_host.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_arm.h> + +struct kvm_decode { +	unsigned long rt; +	bool sign_extend; +}; + +/* + * The in-kernel MMIO emulation code wants to use a copy of run->mmio, + * which is an anonymous type. Use our own type instead. + */ +struct kvm_exit_mmio { +	phys_addr_t	phys_addr; +	u8		data[8]; +	u32		len; +	bool		is_write; +}; + +static inline void kvm_prepare_mmio(struct kvm_run *run, +				    struct kvm_exit_mmio *mmio) +{ +	run->mmio.phys_addr	= mmio->phys_addr; +	run->mmio.len		= mmio->len; +	run->mmio.is_write	= mmio->is_write; +	memcpy(run->mmio.data, mmio->data, mmio->len); +	run->exit_reason	= KVM_EXIT_MMIO; +} + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, +		 phys_addr_t fault_ipa); + +#endif	/* __ARM_KVM_MMIO_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h new file mode 100644 index 00000000000..5c7aa3c1519 --- /dev/null +++ b/arch/arm/include/asm/kvm_mmu.h @@ -0,0 +1,171 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_MMU_H__ +#define __ARM_KVM_MMU_H__ + +#include <asm/memory.h> +#include <asm/page.h> + +/* + * We directly use the kernel VA for the HYP, as we can directly share + * the mapping (HTTBR "covers" TTBR1). + */ +#define HYP_PAGE_OFFSET_MASK	UL(~0) +#define HYP_PAGE_OFFSET		PAGE_OFFSET +#define KERN_TO_HYP(kva)	(kva) + +/* + * Our virtual mapping for the boot-time MMU-enable code. Must be + * shared across all the page-tables. Conveniently, we use the vectors + * page, where no kernel data will ever be shared with HYP. + */ +#define TRAMPOLINE_VA		UL(CONFIG_VECTORS_BASE) + +#ifndef __ASSEMBLY__ + +#include <asm/cacheflush.h> +#include <asm/pgalloc.h> + +int create_hyp_mappings(void *from, void *to); +int create_hyp_io_mappings(void *from, void *to, phys_addr_t); +void free_boot_hyp_pgd(void); +void free_hyp_pgds(void); + +int kvm_alloc_stage2_pgd(struct kvm *kvm); +void kvm_free_stage2_pgd(struct kvm *kvm); +int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, +			  phys_addr_t pa, unsigned long size); + +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); + +phys_addr_t kvm_mmu_get_httbr(void); +phys_addr_t kvm_mmu_get_boot_httbr(void); +phys_addr_t kvm_get_idmap_vector(void); +int kvm_mmu_init(void); +void kvm_clear_hyp_idmap(void); + +static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) +{ +	*pmd = new_pmd; +	flush_pmd_entry(pmd); +} + +static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) +{ +	*pte = new_pte; +	/* +	 * flush_pmd_entry just takes a void pointer and cleans the necessary +	 * cache entries, so we can reuse the function for ptes. +	 */ +	flush_pmd_entry(pte); +} + +static inline bool kvm_is_write_fault(unsigned long hsr) +{ +	unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; +	if (hsr_ec == HSR_EC_IABT) +		return false; +	else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR)) +		return false; +	else +		return true; +} + +static inline void kvm_clean_pgd(pgd_t *pgd) +{ +	clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); +} + +static inline void kvm_clean_pmd_entry(pmd_t *pmd) +{ +	clean_pmd_entry(pmd); +} + +static inline void kvm_clean_pte(pte_t *pte) +{ +	clean_pte_table(pte); +} + +static inline void kvm_set_s2pte_writable(pte_t *pte) +{ +	pte_val(*pte) |= L_PTE_S2_RDWR; +} + +static inline void kvm_set_s2pmd_writable(pmd_t *pmd) +{ +	pmd_val(*pmd) |= L_PMD_S2_RDWR; +} + +/* Open coded p*d_addr_end that can deal with 64bit addresses */ +#define kvm_pgd_addr_end(addr, end)					\ +({	u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;		\ +	(__boundary - 1 < (end) - 1)? __boundary: (end);		\ +}) + +#define kvm_pud_addr_end(addr,end)		(end) + +#define kvm_pmd_addr_end(addr, end)					\ +({	u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK;		\ +	(__boundary - 1 < (end) - 1)? __boundary: (end);		\ +}) + +struct kvm; + +#define kvm_flush_dcache_to_poc(a,l)	__cpuc_flush_dcache_area((a), (l)) + +static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) +{ +	return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; +} + +static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, +					     unsigned long size) +{ +	if (!vcpu_has_cache_enabled(vcpu)) +		kvm_flush_dcache_to_poc((void *)hva, size); +	 +	/* +	 * If we are going to insert an instruction page and the icache is +	 * either VIPT or PIPT, there is a potential problem where the host +	 * (or another VM) may have used the same page as this guest, and we +	 * read incorrect data from the icache.  If we're using a PIPT cache, +	 * we can invalidate just that page, but if we are using a VIPT cache +	 * we need to invalidate the entire icache - damn shame - as written +	 * in the ARM ARM (DDI 0406C.b - Page B3-1393). +	 * +	 * VIVT caches are tagged using both the ASID and the VMID and doesn't +	 * need any kind of flushing (DDI 0406C.b - Page B3-1392). +	 */ +	if (icache_is_pipt()) { +		__cpuc_coherent_user_range(hva, hva + size); +	} else if (!icache_is_vivt_asid_tagged()) { +		/* any kind of VIPT cache */ +		__flush_icache_all(); +	} +} + +#define kvm_virt_to_phys(x)		virt_to_idmap((unsigned long)(x)) + +void stage2_flush_vm(struct kvm *kvm); + +#endif	/* !__ASSEMBLY__ */ + +#endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h new file mode 100644 index 00000000000..6bda945d31f --- /dev/null +++ b/arch/arm/include/asm/kvm_psci.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2012 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM_KVM_PSCI_H__ +#define __ARM_KVM_PSCI_H__ + +#define KVM_ARM_PSCI_0_1	1 +#define KVM_ARM_PSCI_0_2	2 + +int kvm_psci_version(struct kvm_vcpu *vcpu); +int kvm_psci_call(struct kvm_vcpu *vcpu); + +#endif /* __ARM_KVM_PSCI_H__ */ diff --git a/arch/arm/include/asm/leds.h b/arch/arm/include/asm/leds.h deleted file mode 100644 index c545739f39b..00000000000 --- a/arch/arm/include/asm/leds.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - *  arch/arm/include/asm/leds.h - * - *  Copyright (C) 1998 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - *  Event-driven interface for LEDs on machines - *  Added led_start and led_stop- Alex Holden, 28th Dec 1998. - */ -#ifndef ASM_ARM_LEDS_H -#define ASM_ARM_LEDS_H - - -typedef enum { -	led_idle_start, -	led_idle_end, -	led_timer, -	led_start, -	led_stop, -	led_claim,		/* override idle & timer leds */ -	led_release,		/* restore idle & timer leds */ -	led_start_timer_mode, -	led_stop_timer_mode, -	led_green_on, -	led_green_off, -	led_amber_on, -	led_amber_off, -	led_red_on, -	led_red_off, -	led_blue_on, -	led_blue_off, -	/* -	 * I want this between led_timer and led_start, but -	 * someone has decided to export this to user space -	 */ -	led_halted -} led_event_t; - -/* Use this routine to handle LEDs */ - -#ifdef CONFIG_LEDS -extern void (*leds_event)(led_event_t); -#else -#define leds_event(e) -#endif - -#endif diff --git a/arch/arm/include/asm/local.h b/arch/arm/include/asm/local.h deleted file mode 100644 index c11c530f74d..00000000000 --- a/arch/arm/include/asm/local.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/local.h> diff --git a/arch/arm/include/asm/local64.h b/arch/arm/include/asm/local64.h deleted file mode 100644 index 36c93b5cc23..00000000000 --- a/arch/arm/include/asm/local64.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/local64.h> diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h deleted file mode 100644 index 50c7e7cfd67..00000000000 --- a/arch/arm/include/asm/localtimer.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - *  arch/arm/include/asm/localtimer.h - * - *  Copyright (C) 2004-2005 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_ARM_LOCALTIMER_H -#define __ASM_ARM_LOCALTIMER_H - -struct clock_event_device; - -/* - * Setup a per-cpu timer, whether it be a local timer or dummy broadcast - */ -void percpu_timer_setup(void); - -/* - * Called from assembly, this is the local timer IRQ handler - */ -asmlinkage void do_local_timer(struct pt_regs *); - - -#ifdef CONFIG_LOCAL_TIMERS - -#ifdef CONFIG_HAVE_ARM_TWD - -#include "smp_twd.h" - -#define local_timer_ack()	twd_timer_ack() -#define local_timer_stop()	twd_timer_stop() - -#else - -/* - * Platform provides this to acknowledge a local timer IRQ. - * Returns true if the local timer IRQ is to be processed. - */ -int local_timer_ack(void); - -/* - * Stop a local timer interrupt. - */ -void local_timer_stop(void); - -#endif - -/* - * Setup a local timer interrupt for a CPU. - */ -void local_timer_setup(struct clock_event_device *); - -#else - -static inline void local_timer_stop(void) -{ -} - -#endif - -#endif diff --git a/arch/arm/include/asm/locks.h b/arch/arm/include/asm/locks.h deleted file mode 100644 index ef4c897772d..00000000000 --- a/arch/arm/include/asm/locks.h +++ /dev/null @@ -1,274 +0,0 @@ -/* - *  arch/arm/include/asm/locks.h - * - *  Copyright (C) 2000 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - *  Interrupt safe locking assembler.  - */ -#ifndef __ASM_PROC_LOCKS_H -#define __ASM_PROC_LOCKS_H - -#if __LINUX_ARM_ARCH__ >= 6 - -#define __down_op(ptr,fail)			\ -	({					\ -	__asm__ __volatile__(			\ -	"@ down_op\n"				\ -"1:	ldrex	lr, [%0]\n"			\ -"	sub	lr, lr, %1\n"			\ -"	strex	ip, lr, [%0]\n"			\ -"	teq	ip, #0\n"			\ -"	bne	1b\n"				\ -"	teq	lr, #0\n"			\ -"	movmi	ip, %0\n"			\ -"	blmi	" #fail				\ -	:					\ -	: "r" (ptr), "I" (1)			\ -	: "ip", "lr", "cc");			\ -	smp_mb();				\ -	}) - -#define __down_op_ret(ptr,fail)			\ -	({					\ -		unsigned int ret;		\ -	__asm__ __volatile__(			\ -	"@ down_op_ret\n"			\ -"1:	ldrex	lr, [%1]\n"			\ -"	sub	lr, lr, %2\n"			\ -"	strex	ip, lr, [%1]\n"			\ -"	teq	ip, #0\n"			\ -"	bne	1b\n"				\ -"	teq	lr, #0\n"			\ -"	movmi	ip, %1\n"			\ -"	movpl	ip, #0\n"			\ -"	blmi	" #fail "\n"			\ -"	mov	%0, ip"				\ -	: "=&r" (ret)				\ -	: "r" (ptr), "I" (1)			\ -	: "ip", "lr", "cc");			\ -	smp_mb();				\ -	ret;					\ -	}) - -#define __up_op(ptr,wake)			\ -	({					\ -	smp_mb();				\ -	__asm__ __volatile__(			\ -	"@ up_op\n"				\ -"1:	ldrex	lr, [%0]\n"			\ -"	add	lr, lr, %1\n"			\ -"	strex	ip, lr, [%0]\n"			\ -"	teq	ip, #0\n"			\ -"	bne	1b\n"				\ -"	cmp	lr, #0\n"			\ -"	movle	ip, %0\n"			\ -"	blle	" #wake				\ -	:					\ -	: "r" (ptr), "I" (1)			\ -	: "ip", "lr", "cc");			\ -	}) - -/* - * The value 0x01000000 supports up to 128 processors and - * lots of processes.  BIAS must be chosen such that sub'ing - * BIAS once per CPU will result in the long remaining - * negative. - */ -#define RW_LOCK_BIAS      0x01000000 -#define RW_LOCK_BIAS_STR "0x01000000" - -#define __down_op_write(ptr,fail)		\ -	({					\ -	__asm__ __volatile__(			\ -	"@ down_op_write\n"			\ -"1:	ldrex	lr, [%0]\n"			\ -"	sub	lr, lr, %1\n"			\ -"	strex	ip, lr, [%0]\n"			\ -"	teq	ip, #0\n"			\ -"	bne	1b\n"				\ -"	teq	lr, #0\n"			\ -"	movne	ip, %0\n"			\ -"	blne	" #fail				\ -	:					\ -	: "r" (ptr), "I" (RW_LOCK_BIAS)		\ -	: "ip", "lr", "cc");			\ -	smp_mb();				\ -	}) - -#define __up_op_write(ptr,wake)			\ -	({					\ -	smp_mb();				\ -	__asm__ __volatile__(			\ -	"@ up_op_write\n"			\ -"1:	ldrex	lr, [%0]\n"			\ -"	adds	lr, lr, %1\n"			\ -"	strex	ip, lr, [%0]\n"			\ -"	teq	ip, #0\n"			\ -"	bne	1b\n"				\ -"	movcs	ip, %0\n"			\ -"	blcs	" #wake				\ -	:					\ -	: "r" (ptr), "I" (RW_LOCK_BIAS)		\ -	: "ip", "lr", "cc");			\ -	}) - -#define __down_op_read(ptr,fail)		\ -	__down_op(ptr, fail) - -#define __up_op_read(ptr,wake)			\ -	({					\ -	smp_mb();				\ -	__asm__ __volatile__(			\ -	"@ up_op_read\n"			\ -"1:	ldrex	lr, [%0]\n"			\ -"	add	lr, lr, %1\n"			\ -"	strex	ip, lr, [%0]\n"			\ -"	teq	ip, #0\n"			\ -"	bne	1b\n"				\ -"	teq	lr, #0\n"			\ -"	moveq	ip, %0\n"			\ -"	bleq	" #wake				\ -	:					\ -	: "r" (ptr), "I" (1)			\ -	: "ip", "lr", "cc");			\ -	}) - -#else - -#define __down_op(ptr,fail)			\ -	({					\ -	__asm__ __volatile__(			\ -	"@ down_op\n"				\ -"	mrs	ip, cpsr\n"			\ -"	orr	lr, ip, #128\n"			\ -"	msr	cpsr_c, lr\n"			\ -"	ldr	lr, [%0]\n"			\ -"	subs	lr, lr, %1\n"			\ -"	str	lr, [%0]\n"			\ -"	msr	cpsr_c, ip\n"			\ -"	movmi	ip, %0\n"			\ -"	blmi	" #fail				\ -	:					\ -	: "r" (ptr), "I" (1)			\ -	: "ip", "lr", "cc");			\ -	smp_mb();				\ -	}) - -#define __down_op_ret(ptr,fail)			\ -	({					\ -		unsigned int ret;		\ -	__asm__ __volatile__(			\ -	"@ down_op_ret\n"			\ -"	mrs	ip, cpsr\n"			\ -"	orr	lr, ip, #128\n"			\ -"	msr	cpsr_c, lr\n"			\ -"	ldr	lr, [%1]\n"			\ -"	subs	lr, lr, %2\n"			\ -"	str	lr, [%1]\n"			\ -"	msr	cpsr_c, ip\n"			\ -"	movmi	ip, %1\n"			\ -"	movpl	ip, #0\n"			\ -"	blmi	" #fail "\n"			\ -"	mov	%0, ip"				\ -	: "=&r" (ret)				\ -	: "r" (ptr), "I" (1)			\ -	: "ip", "lr", "cc");			\ -	smp_mb();				\ -	ret;					\ -	}) - -#define __up_op(ptr,wake)			\ -	({					\ -	smp_mb();				\ -	__asm__ __volatile__(			\ -	"@ up_op\n"				\ -"	mrs	ip, cpsr\n"			\ -"	orr	lr, ip, #128\n"			\ -"	msr	cpsr_c, lr\n"			\ -"	ldr	lr, [%0]\n"			\ -"	adds	lr, lr, %1\n"			\ -"	str	lr, [%0]\n"			\ -"	msr	cpsr_c, ip\n"			\ -"	movle	ip, %0\n"			\ -"	blle	" #wake				\ -	:					\ -	: "r" (ptr), "I" (1)			\ -	: "ip", "lr", "cc");			\ -	}) - -/* - * The value 0x01000000 supports up to 128 processors and - * lots of processes.  BIAS must be chosen such that sub'ing - * BIAS once per CPU will result in the long remaining - * negative. - */ -#define RW_LOCK_BIAS      0x01000000 -#define RW_LOCK_BIAS_STR "0x01000000" - -#define __down_op_write(ptr,fail)		\ -	({					\ -	__asm__ __volatile__(			\ -	"@ down_op_write\n"			\ -"	mrs	ip, cpsr\n"			\ -"	orr	lr, ip, #128\n"			\ -"	msr	cpsr_c, lr\n"			\ -"	ldr	lr, [%0]\n"			\ -"	subs	lr, lr, %1\n"			\ -"	str	lr, [%0]\n"			\ -"	msr	cpsr_c, ip\n"			\ -"	movne	ip, %0\n"			\ -"	blne	" #fail				\ -	:					\ -	: "r" (ptr), "I" (RW_LOCK_BIAS)		\ -	: "ip", "lr", "cc");			\ -	smp_mb();				\ -	}) - -#define __up_op_write(ptr,wake)			\ -	({					\ -	__asm__ __volatile__(			\ -	"@ up_op_write\n"			\ -"	mrs	ip, cpsr\n"			\ -"	orr	lr, ip, #128\n"			\ -"	msr	cpsr_c, lr\n"			\ -"	ldr	lr, [%0]\n"			\ -"	adds	lr, lr, %1\n"			\ -"	str	lr, [%0]\n"			\ -"	msr	cpsr_c, ip\n"			\ -"	movcs	ip, %0\n"			\ -"	blcs	" #wake				\ -	:					\ -	: "r" (ptr), "I" (RW_LOCK_BIAS)		\ -	: "ip", "lr", "cc");			\ -	smp_mb();				\ -	}) - -#define __down_op_read(ptr,fail)		\ -	__down_op(ptr, fail) - -#define __up_op_read(ptr,wake)			\ -	({					\ -	smp_mb();				\ -	__asm__ __volatile__(			\ -	"@ up_op_read\n"			\ -"	mrs	ip, cpsr\n"			\ -"	orr	lr, ip, #128\n"			\ -"	msr	cpsr_c, lr\n"			\ -"	ldr	lr, [%0]\n"			\ -"	adds	lr, lr, %1\n"			\ -"	str	lr, [%0]\n"			\ -"	msr	cpsr_c, ip\n"			\ -"	moveq	ip, %0\n"			\ -"	bleq	" #wake				\ -	:					\ -	: "r" (ptr), "I" (1)			\ -	: "ip", "lr", "cc");			\ -	}) - -#endif - -#endif diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index d97a964207f..0406cb3f1af 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -8,41 +8,76 @@   * published by the Free Software Foundation.   */ +#include <linux/types.h> +  #ifndef __ASSEMBLY__ +#include <linux/reboot.h>  struct tag; -struct meminfo; -struct sys_timer; +struct pt_regs; +struct smp_operations; +#ifdef CONFIG_SMP +#define smp_ops(ops) (&(ops)) +#define smp_init_ops(ops) (&(ops)) +#else +#define smp_ops(ops) (struct smp_operations *)NULL +#define smp_init_ops(ops) (bool (*)(void))NULL +#endif  struct machine_desc { -	/* -	 * Note! The first two elements are used -	 * by assembler code in head.S, head-common.S -	 */  	unsigned int		nr;		/* architecture number	*/  	const char		*name;		/* architecture name	*/ -	unsigned long		boot_params;	/* tagged list		*/ +	unsigned long		atag_offset;	/* tagged list (relative) */ +	const char *const 	*dt_compat;	/* array of device tree +						 * 'compatible' strings	*/  	unsigned int		nr_irqs;	/* number of IRQs */ +#ifdef CONFIG_ZONE_DMA +	phys_addr_t		dma_zone_size;	/* size of DMA-able area */ +#endif +  	unsigned int		video_start;	/* start of video RAM	*/  	unsigned int		video_end;	/* end of video RAM	*/ -	unsigned int		reserve_lp0 :1;	/* never has lp0	*/ -	unsigned int		reserve_lp1 :1;	/* never has lp1	*/ -	unsigned int		reserve_lp2 :1;	/* never has lp2	*/ -	unsigned int		soft_reboot :1;	/* soft reboot		*/ -	void			(*fixup)(struct machine_desc *, -					 struct tag *, char **, -					 struct meminfo *); +	unsigned char		reserve_lp0 :1;	/* never has lp0	*/ +	unsigned char		reserve_lp1 :1;	/* never has lp1	*/ +	unsigned char		reserve_lp2 :1;	/* never has lp2	*/ +	enum reboot_mode	reboot_mode;	/* default restart mode	*/ +	unsigned		l2c_aux_val;	/* L2 cache aux value	*/ +	unsigned		l2c_aux_mask;	/* L2 cache aux mask	*/ +	void			(*l2c_write_sec)(unsigned long, unsigned); +	struct smp_operations	*smp;		/* SMP operations	*/ +	bool			(*smp_init)(void); +	void			(*fixup)(struct tag *, char **); +	void			(*dt_fixup)(void); +	void			(*init_meminfo)(void);  	void			(*reserve)(void);/* reserve mem blocks	*/  	void			(*map_io)(void);/* IO mapping function	*/ +	void			(*init_early)(void);  	void			(*init_irq)(void); -	struct sys_timer	*timer;		/* system tick timer	*/ +	void			(*init_time)(void);  	void			(*init_machine)(void); +	void			(*init_late)(void); +#ifdef CONFIG_MULTI_IRQ_HANDLER +	void			(*handle_irq)(struct pt_regs *); +#endif +	void			(*restart)(enum reboot_mode, const char *);  };  /* + * Current machine - only accessible during boot. + */ +extern const struct machine_desc *machine_desc; + +/* + * Machine type table - also only accessible during boot + */ +extern const struct machine_desc __arch_info_begin[], __arch_info_end[]; +#define for_each_machine_desc(p)			\ +	for (p = __arch_info_begin; p < __arch_info_end; p++) + +/*   * Set of macros to define architecture features.  This is built into   * a table by the linker.   */ @@ -56,4 +91,11 @@ static const struct machine_desc __mach_desc_##_type	\  #define MACHINE_END				\  }; +#define DT_MACHINE_START(_name, _namestr)		\ +static const struct machine_desc __mach_desc_##_name	\ + __used							\ + __attribute__((__section__(".arch.info.init"))) = {	\ +	.nr		= ~0,				\ +	.name		= _namestr, +  #endif diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index ce3eee9fe26..2092ee1e130 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -17,10 +17,8 @@ struct seq_file;  /*   * This is internal.  Do not use it.   */ -extern unsigned int arch_nr_irqs; -extern void (*init_arch_irq)(void); -extern void init_FIQ(void); -extern int show_fiq_list(struct seq_file *, void *); +extern void init_FIQ(int); +extern int show_fiq_list(struct seq_file *, int);  /*   * This is for easy migration, but should be changed in the source diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index d2fedb5aeb1..f98c7f32c9c 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -9,6 +9,9 @@   *   *  Page table mapping constructs and function prototypes   */ +#ifndef __ASM_MACH_MAP_H +#define __ASM_MACH_MAP_H +  #include <asm/io.h>  struct map_desc { @@ -19,19 +22,33 @@ struct map_desc {  };  /* types 0-3 are defined in asm/io.h */ -#define MT_UNCACHED		4 -#define MT_CACHECLEAN		5 -#define MT_MINICLEAN		6 -#define MT_LOW_VECTORS		7 -#define MT_HIGH_VECTORS		8 -#define MT_MEMORY		9 -#define MT_ROM			10 -#define MT_MEMORY_NONCACHED	11 -#define MT_MEMORY_DTCM		12 -#define MT_MEMORY_ITCM		13 +enum { +	MT_UNCACHED = 4, +	MT_CACHECLEAN, +	MT_MINICLEAN, +	MT_LOW_VECTORS, +	MT_HIGH_VECTORS, +	MT_MEMORY_RWX, +	MT_MEMORY_RW, +	MT_ROM, +	MT_MEMORY_RWX_NONCACHED, +	MT_MEMORY_RW_DTCM, +	MT_MEMORY_RWX_ITCM, +	MT_MEMORY_RW_SO, +	MT_MEMORY_DMA_READY, +};  #ifdef CONFIG_MMU  extern void iotable_init(struct map_desc *, int); +extern void vm_reserve_area_early(unsigned long addr, unsigned long size, +				  void *caller); + +#ifdef CONFIG_DEBUG_LL +extern void debug_ll_addr(unsigned long *paddr, unsigned long *vaddr); +extern void debug_ll_io_init(void); +#else +static inline void debug_ll_io_init(void) {} +#endif  struct mem_type;  extern const struct mem_type *get_mem_type(unsigned int type); @@ -42,4 +59,7 @@ extern int ioremap_page(unsigned long virt, unsigned long phys,  			const struct mem_type *mtype);  #else  #define iotable_init(map,num)	do { } while (0) +#define vm_reserve_area_early(a,s,c)	do { } while (0) +#endif +  #endif diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 16330bd0657..7fc42784bec 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -11,21 +11,33 @@  #ifndef __ASM_MACH_PCI_H  #define __ASM_MACH_PCI_H +#include <linux/ioport.h> +  struct pci_sys_data; +struct pci_ops;  struct pci_bus; +struct device;  struct hw_pci {  #ifdef CONFIG_PCI_DOMAINS  	int		domain;  #endif -	struct list_head buses; +	struct pci_ops	*ops;  	int		nr_controllers; +	void		**private_data;  	int		(*setup)(int nr, struct pci_sys_data *);  	struct pci_bus *(*scan)(int nr, struct pci_sys_data *);  	void		(*preinit)(void);  	void		(*postinit)(void);  	u8		(*swizzle)(struct pci_dev *dev, u8 *pin); -	int		(*map_irq)(struct pci_dev *dev, u8 slot, u8 pin); +	int		(*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); +	resource_size_t (*align_resource)(struct pci_dev *dev, +					  const struct resource *res, +					  resource_size_t start, +					  resource_size_t size, +					  resource_size_t align); +	void		(*add_bus)(struct pci_bus *bus); +	void		(*remove_bus)(struct pci_bus *bus);  };  /* @@ -40,45 +52,58 @@ struct pci_sys_data {  	u64		mem_offset;	/* bus->cpu memory mapping offset	*/  	unsigned long	io_offset;	/* bus->cpu IO mapping offset		*/  	struct pci_bus	*bus;		/* PCI bus				*/ -	struct resource *resource[3];	/* Primary PCI bus resources		*/ +	struct list_head resources;	/* root bus resources (apertures)       */ +	struct resource io_res; +	char		io_res_name[12];  					/* Bridge swizzling			*/  	u8		(*swizzle)(struct pci_dev *, u8 *);  					/* IRQ mapping				*/ -	int		(*map_irq)(struct pci_dev *, u8, u8); -	struct hw_pci	*hw; +	int		(*map_irq)(const struct pci_dev *, u8, u8); +					/* Resource alignement requirements	*/ +	resource_size_t (*align_resource)(struct pci_dev *dev, +					  const struct resource *res, +					  resource_size_t start, +					  resource_size_t size, +					  resource_size_t align); +	void		(*add_bus)(struct pci_bus *bus); +	void		(*remove_bus)(struct pci_bus *bus);  	void		*private_data;	/* platform controller private data	*/  };  /* - * This is the standard PCI-PCI bridge swizzling algorithm. + * Call this with your hw_pci struct to initialise the PCI system.   */ -#define pci_std_swizzle pci_common_swizzle +void pci_common_init_dev(struct device *, struct hw_pci *);  /* - * Call this with your hw_pci struct to initialise the PCI system. + * Compatibility wrapper for older platforms that do not care about + * passing the parent device.   */ -void pci_common_init(struct hw_pci *); +static inline void pci_common_init(struct hw_pci *hw) +{ +	pci_common_init_dev(NULL, hw); +} + +/* + * Setup early fixed I/O mapping. + */ +#if defined(CONFIG_PCI) +extern void pci_map_io_early(unsigned long pfn); +#else +static inline void pci_map_io_early(unsigned long pfn) {} +#endif  /*   * PCI controllers   */ +extern struct pci_ops iop3xx_ops;  extern int iop3xx_pci_setup(int nr, struct pci_sys_data *); -extern struct pci_bus *iop3xx_pci_scan_bus(int nr, struct pci_sys_data *);  extern void iop3xx_pci_preinit(void);  extern void iop3xx_pci_preinit_cond(void); +extern struct pci_ops dc21285_ops;  extern int dc21285_setup(int nr, struct pci_sys_data *); -extern struct pci_bus *dc21285_scan_bus(int nr, struct pci_sys_data *);  extern void dc21285_preinit(void);  extern void dc21285_postinit(void); -extern int via82c505_setup(int nr, struct pci_sys_data *); -extern struct pci_bus *via82c505_scan_bus(int nr, struct pci_sys_data *); -extern void via82c505_init(void *sysdata); - -extern int pci_v3_setup(int nr, struct pci_sys_data *); -extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *); -extern void pci_v3_preinit(void); -extern void pci_v3_postinit(void); -  #endif /* __ASM_MACH_PCI_H */ diff --git a/arch/arm/include/asm/mach/serial_at91.h b/arch/arm/include/asm/mach/serial_at91.h deleted file mode 100644 index ea6d063923b..00000000000 --- a/arch/arm/include/asm/mach/serial_at91.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - *  arch/arm/include/asm/mach/serial_at91.h - * - *  Based on serial_sa1100.h  by Nicolas Pitre - * - *  Copyright (C) 2002 ATMEL Rousset - * - *  Low level machine dependent UART functions. - */ - -struct uart_port; - -/* - * This is a temporary structure for registering these - * functions; it is intended to be discarded after boot. - */ -struct atmel_port_fns { -	void	(*set_mctrl)(struct uart_port *, u_int); -	u_int	(*get_mctrl)(struct uart_port *); -	void	(*enable_ms)(struct uart_port *); -	void	(*pm)(struct uart_port *, u_int, u_int); -	int	(*set_wake)(struct uart_port *, u_int); -	int	(*open)(struct uart_port *); -	void	(*close)(struct uart_port *); -}; - -#if defined(CONFIG_SERIAL_ATMEL) -void atmel_register_uart_fns(struct atmel_port_fns *fns); -#else -#define atmel_register_uart_fns(fns) do { } while (0) -#endif - - diff --git a/arch/arm/include/asm/mach/serial_sa1100.h b/arch/arm/include/asm/mach/serial_sa1100.h deleted file mode 100644 index d09064bf95a..00000000000 --- a/arch/arm/include/asm/mach/serial_sa1100.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - *  arch/arm/include/asm/mach/serial_sa1100.h - * - *  Author: Nicolas Pitre - * - * Moved and changed lots, Russell King - * - * Low level machine dependent UART functions. - */ - -struct uart_port; -struct uart_info; - -/* - * This is a temporary structure for registering these - * functions; it is intended to be discarded after boot. - */ -struct sa1100_port_fns { -	void	(*set_mctrl)(struct uart_port *, u_int); -	u_int	(*get_mctrl)(struct uart_port *); -	void	(*pm)(struct uart_port *, u_int, u_int); -	int	(*set_wake)(struct uart_port *, u_int); -}; - -#ifdef CONFIG_SERIAL_SA1100 -void sa1100_register_uart_fns(struct sa1100_port_fns *fns); -void sa1100_register_uart(int idx, int port); -#else -#define sa1100_register_uart_fns(fns) do { } while (0) -#define sa1100_register_uart(idx,port) do { } while (0) -#endif diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index 35d408f6dcc..90c12e1e695 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h @@ -10,40 +10,11 @@  #ifndef __ASM_ARM_MACH_TIME_H  #define __ASM_ARM_MACH_TIME_H -#include <linux/sysdev.h> - -/* - * This is our kernel timer structure. - * - * - init - *   Initialise the kernels jiffy timer source, claim interrupt - *   using setup_irq.  This is called early on during initialisation - *   while interrupts are still disabled on the local CPU. - * - suspend - *   Suspend the kernel jiffy timer source, if necessary.  This - *   is called with interrupts disabled, after all normal devices - *   have been suspended.  If no action is required, set this to - *   NULL. - * - resume - *   Resume the kernel jiffy timer source, if necessary.  This - *   is called with interrupts disabled before any normal devices - *   are resumed.  If no action is required, set this to NULL. - * - offset - *   Return the timer offset in microseconds since the last timer - *   interrupt.  Note: this must take account of any unprocessed - *   timer interrupt which may be pending. - */ -struct sys_timer { -	struct sys_device	dev; -	void			(*init)(void); -	void			(*suspend)(void); -	void			(*resume)(void); -#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET -	unsigned long		(*offset)(void); -#endif -}; - -extern struct sys_timer *system_timer;  extern void timer_tick(void); +struct timespec; +typedef void (*clock_access_fn)(struct timespec *); +extern int register_persistent_clock(clock_access_fn read_boot, +				     clock_access_fn read_persistent); +  #endif diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h deleted file mode 100644 index 833306ee9e7..00000000000 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * arch/arm/include/asm/mach/udc_pxa2xx.h - * - * This supports machine-specific differences in how the PXA2xx - * USB Device Controller (UDC) is wired. - * - * It is set in linux/arch/arm/mach-pxa/<machine>.c or in - * linux/arch/mach-ixp4xx/<machine>.c and used in - * the probe routine of linux/drivers/usb/gadget/pxa2xx_udc.c - */ - -struct pxa2xx_udc_mach_info { -        int  (*udc_is_connected)(void);		/* do we see host? */ -        void (*udc_command)(int cmd); -#define	PXA2XX_UDC_CMD_CONNECT		0	/* let host see us */ -#define	PXA2XX_UDC_CMD_DISCONNECT	1	/* so host won't see us */ - -	/* Boards following the design guidelines in the developer's manual, -	 * with on-chip GPIOs not Lubbock's weird hardware, can have a sane -	 * VBUS IRQ and omit the methods above.  Store the GPIO number -	 * here.  Note that sometimes the signals go through inverters... -	 */ -	bool	gpio_vbus_inverted; -	int	gpio_vbus;			/* high == vbus present */ -	bool	gpio_pullup_inverted; -	int	gpio_pullup;			/* high == pullup activated */ -}; - diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h index 6b884d2b0b6..e8567bb99df 100644 --- a/arch/arm/include/asm/mc146818rtc.h +++ b/arch/arm/include/asm/mc146818rtc.h @@ -5,7 +5,9 @@  #define _ASM_MC146818RTC_H  #include <linux/io.h> -#include <mach/irqs.h> +#include <linux/kernel.h> + +#define RTC_IRQ BUILD_BUG_ON(1)  #ifndef RTC_PORT  #define RTC_PORT(x)	(0x70 + (x)) diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h new file mode 100644 index 00000000000..94060adba17 --- /dev/null +++ b/arch/arm/include/asm/mcpm.h @@ -0,0 +1,259 @@ +/* + * arch/arm/include/asm/mcpm.h + * + * Created by:  Nicolas Pitre, April 2012 + * Copyright:   (C) 2012-2013  Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef MCPM_H +#define MCPM_H + +/* + * Maximum number of possible clusters / CPUs per cluster. + * + * This should be sufficient for quite a while, while keeping the + * (assembly) code simpler.  When this starts to grow then we'll have + * to consider dynamic allocation. + */ +#define MAX_CPUS_PER_CLUSTER	4 +#define MAX_NR_CLUSTERS		2 + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/cacheflush.h> + +/* + * Platform specific code should use this symbol to set up secondary + * entry location for processors to use when released from reset. + */ +extern void mcpm_entry_point(void); + +/* + * This is used to indicate where the given CPU from given cluster should + * branch once it is ready to re-enter the kernel using ptr, or NULL if it + * should be gated.  A gated CPU is held in a WFE loop until its vector + * becomes non NULL. + */ +void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); + +/* + * This sets an early poke i.e a value to be poked into some address + * from very early assembly code before the CPU is ungated.  The + * address must be physical, and if 0 then nothing will happen. + */ +void mcpm_set_early_poke(unsigned cpu, unsigned cluster, +			 unsigned long poke_phys_addr, unsigned long poke_val); + +/* + * CPU/cluster power operations API for higher subsystems to use. + */ + +/** + * mcpm_is_available - returns whether MCPM is initialized and available + * + * This returns true or false accordingly. + */ +bool mcpm_is_available(void); + +/** + * mcpm_cpu_power_up - make given CPU in given cluster runable + * + * @cpu: CPU number within given cluster + * @cluster: cluster number for the CPU + * + * The identified CPU is brought out of reset.  If the cluster was powered + * down then it is brought up as well, taking care not to let the other CPUs + * in the cluster run, and ensuring appropriate cluster setup. + * + * Caller must ensure the appropriate entry vector is initialized with + * mcpm_set_entry_vector() prior to calling this. + * + * This must be called in a sleepable context.  However, the implementation + * is strongly encouraged to return early and let the operation happen + * asynchronously, especially when significant delays are expected. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); + +/** + * mcpm_cpu_power_down - power the calling CPU down + * + * The calling CPU is powered down. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster is prepared for power-down too. + * + * This must be called with interrupts disabled. + * + * On success this does not return.  Re-entry in the kernel is expected + * via mcpm_entry_point. + * + * This will return if mcpm_platform_register() has not been called + * previously in which case the caller should take appropriate action. + * + * On success, the CPU is not guaranteed to be truly halted until + * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the + * specified cpu.  Until then, other CPUs should make sure they do not + * trash memory the target CPU might be executing/accessing. + */ +void mcpm_cpu_power_down(void); + +/** + * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and + *	make sure it is powered off + * + * @cpu: CPU number within given cluster + * @cluster: cluster number for the CPU + * + * Call this function to ensure that a pending powerdown has taken + * effect and the CPU is safely parked before performing non-mcpm + * operations that may affect the CPU (such as kexec trashing the + * kernel text). + * + * It is *not* necessary to call this function if you only need to + * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup + * event. + * + * Do not call this function unless the specified CPU has already + * called mcpm_cpu_power_down() or has committed to doing so. + * + * @return: + *	- zero if the CPU is in a safely parked state + *	- nonzero otherwise (e.g., timeout) + */ +int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); + +/** + * mcpm_cpu_suspend - bring the calling CPU in a suspended state + * + * @expected_residency: duration in microseconds the CPU is expected + *			to remain suspended, or 0 if unknown/infinity. + * + * The calling CPU is suspended.  The expected residency argument is used + * as a hint by the platform specific backend to implement the appropriate + * sleep state level according to the knowledge it has on wake-up latency + * for the given hardware. + * + * If this CPU is found to be the "last man standing" in the cluster + * then the cluster may be prepared for power-down too, if the expected + * residency makes it worthwhile. + * + * This must be called with interrupts disabled. + * + * On success this does not return.  Re-entry in the kernel is expected + * via mcpm_entry_point. + * + * This will return if mcpm_platform_register() has not been called + * previously in which case the caller should take appropriate action. + */ +void mcpm_cpu_suspend(u64 expected_residency); + +/** + * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up + * + * This lets the platform specific backend code perform needed housekeeping + * work.  This must be called by the newly activated CPU as soon as it is + * fully operational in kernel space, before it enables interrupts. + * + * If the operation cannot be performed then an error code is returned. + */ +int mcpm_cpu_powered_up(void); + +/* + * Platform specific methods used in the implementation of the above API. + */ +struct mcpm_platform_ops { +	int (*power_up)(unsigned int cpu, unsigned int cluster); +	void (*power_down)(void); +	int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster); +	void (*suspend)(u64); +	void (*powered_up)(void); +}; + +/** + * mcpm_platform_register - register platform specific power methods + * + * @ops: mcpm_platform_ops structure to register + * + * An error is returned if the registration has been done previously. + */ +int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); + +/* Synchronisation structures for coordinating safe cluster setup/teardown: */ + +/* + * When modifying this structure, make sure you update the MCPM_SYNC_ defines + * to match. + */ +struct mcpm_sync_struct { +	/* individual CPU states */ +	struct { +		s8 cpu __aligned(__CACHE_WRITEBACK_GRANULE); +	} cpus[MAX_CPUS_PER_CLUSTER]; + +	/* cluster state */ +	s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); + +	/* inbound-side state */ +	s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE); +}; + +struct sync_struct { +	struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; +}; + +void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); +void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); +void __mcpm_outbound_leave_critical(unsigned int cluster, int state); +bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); +int __mcpm_cluster_state(unsigned int cluster); + +int __init mcpm_sync_init( +	void (*power_up_setup)(unsigned int affinity_level)); + +void __init mcpm_smp_set_ops(void); + +#else + +/*  + * asm-offsets.h causes trouble when included in .c files, and cacheflush.h + * cannot be included in asm files.  Let's work around the conflict like this. + */ +#include <asm/asm-offsets.h> +#define __CACHE_WRITEBACK_GRANULE CACHE_WRITEBACK_GRANULE + +#endif /* ! __ASSEMBLY__ */ + +/* Definitions for mcpm_sync_struct */ +#define CPU_DOWN		0x11 +#define CPU_COMING_UP		0x12 +#define CPU_UP			0x13 +#define CPU_GOING_DOWN		0x14 + +#define CLUSTER_DOWN		0x21 +#define CLUSTER_UP		0x22 +#define CLUSTER_GOING_DOWN	0x23 + +#define INBOUND_NOT_COMING_UP	0x31 +#define INBOUND_COMING_UP	0x32 + +/* + * Offsets for the mcpm_sync_struct members, for use in asm. + * We don't want to make them global to the kernel via asm-offsets.c. + */ +#define MCPM_SYNC_CLUSTER_CPUS	0 +#define MCPM_SYNC_CPU_SIZE	__CACHE_WRITEBACK_GRANULE +#define MCPM_SYNC_CLUSTER_CLUSTER \ +	(MCPM_SYNC_CLUSTER_CPUS + MCPM_SYNC_CPU_SIZE * MAX_CPUS_PER_CLUSTER) +#define MCPM_SYNC_CLUSTER_INBOUND \ +	(MCPM_SYNC_CLUSTER_CLUSTER + __CACHE_WRITEBACK_GRANULE) +#define MCPM_SYNC_CLUSTER_SIZE \ +	(MCPM_SYNC_CLUSTER_INBOUND + __CACHE_WRITEBACK_GRANULE) + +#endif diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h index b8da2e415e4..bf47a6c110a 100644 --- a/arch/arm/include/asm/memblock.h +++ b/arch/arm/include/asm/memblock.h @@ -1,9 +1,9 @@  #ifndef _ASM_ARM_MEMBLOCK_H  #define _ASM_ARM_MEMBLOCK_H -struct meminfo;  struct machine_desc; -extern void arm_memblock_init(struct meminfo *, struct machine_desc *); +void arm_memblock_init(const struct machine_desc *); +phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);  #endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 23c2e8e5c0f..2b751464d6f 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -15,8 +15,14 @@  #include <linux/compiler.h>  #include <linux/const.h> +#include <linux/types.h> +#include <linux/sizes.h> + +#include <asm/cache.h> + +#ifdef CONFIG_NEED_MACH_MEMORY_H  #include <mach/memory.h> -#include <asm/sizes.h> +#endif  /*   * Allow for constants defined here to be used from assembly code @@ -24,31 +30,32 @@   */  #define UL(x) _AC(x, UL) +/* PAGE_OFFSET - the virtual address of the start of the kernel image */ +#define PAGE_OFFSET		UL(CONFIG_PAGE_OFFSET) +  #ifdef CONFIG_MMU  /* - * PAGE_OFFSET - the virtual address of the start of the kernel image   * TASK_SIZE - the maximum size of a user space task.   * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area   */ -#define PAGE_OFFSET		UL(CONFIG_PAGE_OFFSET) -#define TASK_SIZE		(UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) -#define TASK_UNMAPPED_BASE	(UL(CONFIG_PAGE_OFFSET) / 3) +#define TASK_SIZE		(UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) +#define TASK_UNMAPPED_BASE	ALIGN(TASK_SIZE / 3, SZ_16M)  /*   * The maximum size of a 26-bit user space task.   */ -#define TASK_SIZE_26		UL(0x04000000) +#define TASK_SIZE_26		(UL(1) << 26)  /*   * The module space lives between the addresses given by TASK_SIZE   * and PAGE_OFFSET - it must be within 32MB of the kernel text.   */  #ifndef CONFIG_THUMB2_KERNEL -#define MODULES_VADDR		(PAGE_OFFSET - 16*1024*1024) +#define MODULES_VADDR		(PAGE_OFFSET - SZ_16M)  #else  /* smaller range for Thumb-2 symbols relocation (2^24)*/ -#define MODULES_VADDR		(PAGE_OFFSET - 8*1024*1024) +#define MODULES_VADDR		(PAGE_OFFSET - SZ_8M)  #endif  #if TASK_SIZE > MODULES_VADDR @@ -76,17 +83,6 @@   */  #define IOREMAP_MAX_ORDER	24 -/* - * Size of DMA-consistent memory region.  Must be multiple of 2M, - * between 2MB and 14MB inclusive. - */ -#ifndef CONSISTENT_DMA_SIZE -#define CONSISTENT_DMA_SIZE 	SZ_2M -#endif - -#define CONSISTENT_END		(0xffe00000UL) -#define CONSISTENT_BASE		(CONSISTENT_END - CONSISTENT_DMA_SIZE) -  #else /* CONFIG_MMU */  /* @@ -103,23 +99,17 @@  #define TASK_UNMAPPED_BASE	UL(0x00000000)  #endif -#ifndef PHYS_OFFSET -#define PHYS_OFFSET 		UL(CONFIG_DRAM_BASE) -#endif -  #ifndef END_MEM  #define END_MEM     		(UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)  #endif -#ifndef PAGE_OFFSET -#define PAGE_OFFSET		(PHYS_OFFSET) -#endif -  /*   * The module can be at any place in ram in nommu mode.   */  #define MODULES_END		(END_MEM) -#define MODULES_VADDR		(PHYS_OFFSET) +#define MODULES_VADDR		PAGE_OFFSET + +#define XIP_VIRT_ADDR(physaddr)  (physaddr)  #endif /* !CONFIG_MMU */ @@ -133,20 +123,10 @@  #endif  /* - * Physical vs virtual RAM address space conversion.  These are - * private definitions which should NOT be used outside memory.h - * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead. - */ -#ifndef __virt_to_phys -#define __virt_to_phys(x)	((x) - PAGE_OFFSET + PHYS_OFFSET) -#define __phys_to_virt(x)	((x) - PHYS_OFFSET + PAGE_OFFSET) -#endif - -/*   * Convert a physical address to a Page Frame Number and back   */ -#define	__phys_to_pfn(paddr)	((paddr) >> PAGE_SHIFT) -#define	__pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT) +#define	__phys_to_pfn(paddr)	((unsigned long)((paddr) >> PAGE_SHIFT)) +#define	__pfn_to_phys(pfn)	((phys_addr_t)(pfn) << PAGE_SHIFT)  /*   * Convert a page to/from a physical address @@ -154,33 +134,143 @@  #define page_to_phys(page)	(__pfn_to_phys(page_to_pfn(page)))  #define phys_to_page(phys)	(pfn_to_page(__phys_to_pfn(phys))) -#ifndef __ASSEMBLY__ -  /* - * The DMA mask corresponding to the maximum bus address allocatable - * using GFP_DMA.  The default here places no restriction on DMA - * allocations.  This must be the smallest DMA mask in the system, - * so a successful GFP_DMA allocation will always satisfy this. + * Minimum guaranted alignment in pgd_alloc().  The page table pointers passed + * around in head.S and proc-*.S are shifted by this amount, in order to + * leave spare high bits for systems with physical address extension.  This + * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but + * gives us about 38-bits or so.   */ -#ifndef ISA_DMA_THRESHOLD -#define ISA_DMA_THRESHOLD	(0xffffffffULL) +#ifdef CONFIG_ARM_LPAE +#define ARCH_PGD_SHIFT		L1_CACHE_SHIFT +#else +#define ARCH_PGD_SHIFT		0  #endif +#define ARCH_PGD_MASK		((1 << ARCH_PGD_SHIFT) - 1) -#ifndef arch_adjust_zones -#define arch_adjust_zones(size,holes) do { } while (0) -#elif !defined(CONFIG_ZONE_DMA) -#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA" +/* + * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical + * memory.  This is used for XIP and NoMMU kernels, or by kernels which + * have their own mach/memory.h.  Assembly code must always use + * PLAT_PHYS_OFFSET and not PHYS_OFFSET. + */ +#ifndef PLAT_PHYS_OFFSET +#define PLAT_PHYS_OFFSET	UL(CONFIG_PHYS_OFFSET)  #endif +#ifndef __ASSEMBLY__ +  /* + * Physical vs virtual RAM address space conversion.  These are + * private definitions which should NOT be used outside memory.h + * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead. + *   * PFNs are used to describe any physical page; this means   * PFN 0 == physical address 0. - * - * This is the PFN of the first RAM page in the kernel - * direct-mapped view.  We assume this is the first page - * of RAM in the mem_map as well.   */ -#define PHYS_PFN_OFFSET	(PHYS_OFFSET >> PAGE_SHIFT) +#if defined(__virt_to_phys) +#define PHYS_OFFSET	PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET	((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) + +#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) + +#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT) + +/* + * Constants used to force the right instruction encodings and shifts + * so that all we need to do is modify the 8-bit constant field. + */ +#define __PV_BITS_31_24	0x81000000 +#define __PV_BITS_7_0	0x81 + +extern unsigned long __pv_phys_pfn_offset; +extern u64 __pv_offset; +extern void fixup_pv_table(const void *, unsigned long); +extern const void *__pv_table_begin, *__pv_table_end; + +#define PHYS_OFFSET	((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) +#define PHYS_PFN_OFFSET	(__pv_phys_pfn_offset) + +#define virt_to_pfn(kaddr) \ +	((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ +	 PHYS_PFN_OFFSET) + +#define __pv_stub(from,to,instr,type)			\ +	__asm__("@ __pv_stub\n"				\ +	"1:	" instr "	%0, %1, %2\n"		\ +	"	.pushsection .pv_table,\"a\"\n"		\ +	"	.long	1b\n"				\ +	"	.popsection\n"				\ +	: "=r" (to)					\ +	: "r" (from), "I" (type)) + +#define __pv_stub_mov_hi(t)				\ +	__asm__ volatile("@ __pv_stub_mov\n"		\ +	"1:	mov	%R0, %1\n"			\ +	"	.pushsection .pv_table,\"a\"\n"		\ +	"	.long	1b\n"				\ +	"	.popsection\n"				\ +	: "=r" (t)					\ +	: "I" (__PV_BITS_7_0)) + +#define __pv_add_carry_stub(x, y)			\ +	__asm__ volatile("@ __pv_add_carry_stub\n"	\ +	"1:	adds	%Q0, %1, %2\n"			\ +	"	adc	%R0, %R0, #0\n"			\ +	"	.pushsection .pv_table,\"a\"\n"		\ +	"	.long	1b\n"				\ +	"	.popsection\n"				\ +	: "+r" (y)					\ +	: "r" (x), "I" (__PV_BITS_31_24)		\ +	: "cc") + +static inline phys_addr_t __virt_to_phys(unsigned long x) +{ +	phys_addr_t t; + +	if (sizeof(phys_addr_t) == 4) { +		__pv_stub(x, t, "add", __PV_BITS_31_24); +	} else { +		__pv_stub_mov_hi(t); +		__pv_add_carry_stub(x, t); +	} +	return t; +} + +static inline unsigned long __phys_to_virt(phys_addr_t x) +{ +	unsigned long t; + +	/* +	 * 'unsigned long' cast discard upper word when +	 * phys_addr_t is 64 bit, and makes sure that inline +	 * assembler expression receives 32 bit argument +	 * in place where 'r' 32 bit operand is expected. +	 */ +	__pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24); +	return t; +} + +#else + +#define PHYS_OFFSET	PLAT_PHYS_OFFSET +#define PHYS_PFN_OFFSET	((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) + +static inline phys_addr_t __virt_to_phys(unsigned long x) +{ +	return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; +} + +static inline unsigned long __phys_to_virt(phys_addr_t x) +{ +	return x - PHYS_OFFSET + PAGE_OFFSET; +} + +#define virt_to_pfn(kaddr) \ +	((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ +	 PHYS_PFN_OFFSET) + +#endif  /*   * These are *only* valid on the kernel direct mapped RAM memory. @@ -188,23 +278,40 @@   * translation for translating DMA addresses.  Use the driver   * DMA support - see dma-mapping.h.   */ -static inline unsigned long virt_to_phys(void *x) +static inline phys_addr_t virt_to_phys(const volatile void *x)  {  	return __virt_to_phys((unsigned long)(x));  } -static inline void *phys_to_virt(unsigned long x) +static inline void *phys_to_virt(phys_addr_t x)  { -	return (void *)(__phys_to_virt((unsigned long)(x))); +	return (void *)__phys_to_virt(x);  }  /*   * Drivers should NOT use these either.   */  #define __pa(x)			__virt_to_phys((unsigned long)(x)) -#define __va(x)			((void *)__phys_to_virt((unsigned long)(x))) +#define __va(x)			((void *)__phys_to_virt((phys_addr_t)(x)))  #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT) +extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); + +/* + * These are for systems that have a hardware interconnect supported alias of + * physical memory for idmap purposes.  Most cases should leave these + * untouched. + */ +static inline phys_addr_t __virt_to_idmap(unsigned long x) +{ +	if (arch_virt_to_idmap) +		return arch_virt_to_idmap(x); +	else +		return __virt_to_phys(x); +} + +#define virt_to_idmap(x)	__virt_to_idmap((unsigned long)(x)) +  /*   * Virtual <-> DMA view memory address translations   * Again, these are *only* valid on the kernel direct mapped RAM @@ -218,6 +325,7 @@ static inline void *phys_to_virt(unsigned long x)  #define __bus_to_pfn(x)	__phys_to_pfn(x)  #endif +#ifdef CONFIG_VIRT_TO_BUS  static inline __deprecated unsigned long virt_to_bus(void *x)  {  	return __virt_to_bus((unsigned long)x); @@ -227,16 +335,11 @@ static inline __deprecated void *bus_to_virt(unsigned long x)  {  	return (void *)__bus_to_virt(x);  } +#endif  /*   * Conversion between a struct page and a physical address.   * - * Note: when converting an unknown physical address to a - * struct page, the resulting pointer must be validated - * using VALID_PAGE().  It must return an invalid struct page - * for any physical address not corresponding to a system - * RAM address. - *   *  page_to_pfn(page)	convert a struct page * to a PFN number   *  pfn_to_page(pfn)	convert a _valid_ PFN number to struct page *   * @@ -245,16 +348,9 @@ static inline __deprecated void *bus_to_virt(unsigned long x)   */  #define ARCH_PFN_OFFSET		PHYS_PFN_OFFSET -#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define virt_addr_valid(kaddr)	((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) - -/* - * Optional coherency support.  Currently used only by selected - * Intel XSC3-based systems. - */ -#ifndef arch_is_coherent -#define arch_is_coherent()		0 -#endif +#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr)) +#define virt_addr_valid(kaddr)	(((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ +					&& pfn_valid(virt_to_pfn(kaddr)))  #endif diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h deleted file mode 100644 index 41f99c573b9..00000000000 --- a/arch/arm/include/asm/mman.h +++ /dev/null @@ -1,4 +0,0 @@ -#include <asm-generic/mman.h> - -#define arch_mmap_check(addr, len, flags) \ -	(((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0) diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 68870c77667..64fd15159b7 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -5,14 +5,18 @@  typedef struct {  #ifdef CONFIG_CPU_HAS_ASID -	unsigned int id; -	spinlock_t id_lock; +	atomic64_t	id; +#else +	int		switch_pending;  #endif -	unsigned int kvm_seq; +	unsigned int	vmalloc_seq; +	unsigned long	sigpage;  } mm_context_t;  #ifdef CONFIG_CPU_HAS_ASID -#define ASID(mm)	((mm)->context.id & 255) +#define ASID_BITS	8 +#define ASID_MASK	((~0ULL) << ASID_BITS) +#define ASID(mm)	((unsigned int)((mm)->context.id.counter & ~ASID_MASK))  #else  #define ASID(mm)	(0)  #endif @@ -25,7 +29,7 @@ typedef struct {   *  modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>   */  typedef struct { -	unsigned long		end_brk; +	unsigned long	end_brk;  } mm_context_t;  #endif diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 71605d9f8e4..9b32f76bb0d 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -18,71 +18,79 @@  #include <asm/cacheflush.h>  #include <asm/cachetype.h>  #include <asm/proc-fns.h> +#include <asm/smp_plat.h> +#include <asm-generic/mm_hooks.h> -void __check_kvm_seq(struct mm_struct *mm); +void __check_vmalloc_seq(struct mm_struct *mm);  #ifdef CONFIG_CPU_HAS_ASID -/* - * On ARMv6, we have the following structure in the Context ID: - * - * 31                         7          0 - * +-------------------------+-----------+ - * |      process ID         |   ASID    | - * +-------------------------+-----------+ - * |              context ID             | - * +-------------------------------------+ - * - * The ASID is used to tag entries in the CPU caches and TLBs. - * The context ID is used by debuggers and trace logic, and - * should be unique within all running processes. - */ -#define ASID_BITS		8 -#define ASID_MASK		((~0) << ASID_BITS) -#define ASID_FIRST_VERSION	(1 << ASID_BITS) +void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); +#define init_new_context(tsk,mm)	({ atomic64_set(&mm->context.id, 0); 0; }) -extern unsigned int cpu_last_asid; -#ifdef CONFIG_SMP -DECLARE_PER_CPU(struct mm_struct *, current_mm); -#endif - -void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); -void __new_context(struct mm_struct *mm); - -static inline void check_context(struct mm_struct *mm) +#ifdef CONFIG_ARM_ERRATA_798181 +void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, +			     cpumask_t *mask); +#else  /* !CONFIG_ARM_ERRATA_798181 */ +static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, +					   cpumask_t *mask)  { -	/* -	 * This code is executed with interrupts enabled. Therefore, -	 * mm->context.id cannot be updated to the latest ASID version -	 * on a different CPU (and condition below not triggered) -	 * without first getting an IPI to reset the context. The -	 * alternative is to take a read_lock on mm->context.id_lock -	 * (after changing its type to rwlock_t). -	 */ -	if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) -		__new_context(mm); - -	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) -		__check_kvm_seq(mm);  } +#endif /* CONFIG_ARM_ERRATA_798181 */ -#define init_new_context(tsk,mm)	(__init_new_context(tsk,mm),0) +#else	/* !CONFIG_CPU_HAS_ASID */ -#else +#ifdef CONFIG_MMU -static inline void check_context(struct mm_struct *mm) +static inline void check_and_switch_context(struct mm_struct *mm, +					    struct task_struct *tsk)  { -#ifdef CONFIG_MMU -	if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) -		__check_kvm_seq(mm); -#endif +	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) +		__check_vmalloc_seq(mm); + +	if (irqs_disabled()) +		/* +		 * cpu_switch_mm() needs to flush the VIVT caches. To avoid +		 * high interrupt latencies, defer the call and continue +		 * running with the old mm. Since we only support UP systems +		 * on non-ASID CPUs, the old mm will remain valid until the +		 * finish_arch_post_lock_switch() call. +		 */ +		mm->context.switch_pending = 1; +	else +		cpu_switch_mm(mm->pgd, mm);  } +#define finish_arch_post_lock_switch \ +	finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ +	struct mm_struct *mm = current->mm; + +	if (mm && mm->context.switch_pending) { +		/* +		 * Preemption must be disabled during cpu_switch_mm() as we +		 * have some stateful cache flush implementations. Check +		 * switch_pending again in case we were preempted and the +		 * switch to this mm was already done. +		 */ +		preempt_disable(); +		if (mm->context.switch_pending) { +			mm->context.switch_pending = 0; +			cpu_switch_mm(mm->pgd, mm); +		} +		preempt_enable_no_resched(); +	} +} + +#endif	/* CONFIG_MMU */ +  #define init_new_context(tsk,mm)	0 -#endif +#endif	/* CONFIG_CPU_HAS_ASID */  #define destroy_context(mm)		do { } while(0) +#define activate_mm(prev,next)		switch_mm(prev, next, NULL)  /*   * This is called when "tsk" is about to enter lazy TLB mode. @@ -111,19 +119,18 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,  #ifdef CONFIG_MMU  	unsigned int cpu = smp_processor_id(); -#ifdef CONFIG_SMP -	/* check for possible thread migration */ -	if (!cpumask_empty(mm_cpumask(next)) && +	/* +	 * __sync_icache_dcache doesn't broadcast the I-cache invalidation, +	 * so check for possible thread migration and invalidate the I-cache +	 * if we're new to this CPU. +	 */ +	if (cache_ops_need_broadcast() && +	    !cpumask_empty(mm_cpumask(next)) &&  	    !cpumask_test_cpu(cpu, mm_cpumask(next)))  		__flush_icache_all(); -#endif +  	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { -#ifdef CONFIG_SMP -		struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); -		*crt_mm = next; -#endif -		check_context(next); -		cpu_switch_mm(next->pgd, next); +		check_and_switch_context(next, tsk);  		if (cache_is_vivt())  			cpumask_clear_cpu(cpu, mm_cpumask(prev));  	} @@ -131,34 +138,5 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,  }  #define deactivate_mm(tsk,mm)	do { } while (0) -#define activate_mm(prev,next)	switch_mm(prev, next, NULL) - -/* - * We are inserting a "fake" vma for the user-accessible vector page so - * gdb and friends can get to it through ptrace and /proc/<pid>/mem. - * But we also want to remove it before the generic code gets to see it - * during process exit or the unmapping of it would  cause total havoc. - * (the macro is used as remove_vma() is static to mm/mmap.c) - */ -#define arch_exit_mmap(mm) \ -do { \ -	struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \ -	if (high_vma) { \ -		BUG_ON(high_vma->vm_next);  /* it should be last */ \ -		if (high_vma->vm_prev) \ -			high_vma->vm_prev->vm_next = NULL; \ -		else \ -			mm->mmap = NULL; \ -		rb_erase(&high_vma->vm_rb, &mm->mm_rb); \ -		mm->mmap_cache = NULL; \ -		mm->map_count--; \ -		remove_vma(high_vma); \ -	} \ -} while (0) - -static inline void arch_dup_mmap(struct mm_struct *oldmm, -				 struct mm_struct *mm) -{ -}  #endif diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index cbb0bc295d2..ed690c49ef9 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -1,37 +1,49 @@  #ifndef _ASM_ARM_MODULE_H  #define _ASM_ARM_MODULE_H -#define Elf_Shdr	Elf32_Shdr -#define Elf_Sym		Elf32_Sym -#define Elf_Ehdr	Elf32_Ehdr +#include <asm-generic/module.h>  struct unwind_table;  #ifdef CONFIG_ARM_UNWIND -struct arm_unwind_mapping { -	Elf_Shdr *unw_sec; -	Elf_Shdr *sec_text; -	struct unwind_table *unwind; -};  enum {  	ARM_SEC_INIT,  	ARM_SEC_DEVINIT,  	ARM_SEC_CORE,  	ARM_SEC_EXIT,  	ARM_SEC_DEVEXIT, +	ARM_SEC_HOT, +	ARM_SEC_UNLIKELY,  	ARM_SEC_MAX,  }; +  struct mod_arch_specific { -	struct arm_unwind_mapping map[ARM_SEC_MAX]; -}; -#else -struct mod_arch_specific { +	struct unwind_table *unwind[ARM_SEC_MAX];  };  #endif  /* - * Include the ARM architecture version. + * Add the ARM architecture version to the version magic string   */ -#define MODULE_ARCH_VERMAGIC	"ARMv" __stringify(__LINUX_ARM_ARCH__) " " +#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " " + +/* Add __virt_to_phys patching state as well */ +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +#define MODULE_ARCH_VERMAGIC_P2V "p2v8 " +#else +#define MODULE_ARCH_VERMAGIC_P2V "" +#endif + +/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */ +#ifdef CONFIG_THUMB2_KERNEL +#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 " +#else +#define MODULE_ARCH_VERMAGIC_ARMTHUMB "" +#endif + +#define MODULE_ARCH_VERMAGIC \ +	MODULE_ARCH_VERMAGIC_ARMVSN \ +	MODULE_ARCH_VERMAGIC_ARMTHUMB \ +	MODULE_ARCH_VERMAGIC_P2V  #endif /* _ASM_ARM_MODULE_H */ diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h new file mode 100644 index 00000000000..c3247cc2fe0 --- /dev/null +++ b/arch/arm/include/asm/mpu.h @@ -0,0 +1,76 @@ +#ifndef __ARM_MPU_H +#define __ARM_MPU_H + +#ifdef CONFIG_ARM_MPU + +/* MPUIR layout */ +#define MPUIR_nU		1 +#define MPUIR_DREGION		8 +#define MPUIR_IREGION		16 +#define MPUIR_DREGION_SZMASK	(0xFF << MPUIR_DREGION) +#define MPUIR_IREGION_SZMASK	(0xFF << MPUIR_IREGION) + +/* ID_MMFR0 data relevant to MPU */ +#define MMFR0_PMSA		(0xF << 4) +#define MMFR0_PMSAv7		(3 << 4) + +/* MPU D/I Size Register fields */ +#define MPU_RSR_SZ		1 +#define MPU_RSR_EN		0 + +/* The D/I RSR value for an enabled region spanning the whole of memory */ +#define MPU_RSR_ALL_MEM		63 + +/* Individual bits in the DR/IR ACR */ +#define MPU_ACR_XN		(1 << 12) +#define MPU_ACR_SHARED		(1 << 2) + +/* C, B and TEX[2:0] bits only have semantic meanings when grouped */ +#define MPU_RGN_CACHEABLE	0xB +#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) +#define MPU_RGN_STRONGLY_ORDERED 0 + +/* Main region should only be shared for SMP */ +#ifdef CONFIG_SMP +#define MPU_RGN_NORMAL		(MPU_RGN_CACHEABLE | MPU_ACR_SHARED) +#else +#define MPU_RGN_NORMAL		MPU_RGN_CACHEABLE +#endif + +/* Access permission bits of ACR (only define those that we use)*/ +#define MPU_AP_PL1RW_PL0RW	(0x3 << 8) +#define MPU_AP_PL1RW_PL0R0	(0x2 << 8) +#define MPU_AP_PL1RW_PL0NA	(0x1 << 8) + +/* For minimal static MPU region configurations */ +#define MPU_PROBE_REGION	0 +#define MPU_BG_REGION		1 +#define MPU_RAM_REGION		2 +#define MPU_VECTORS_REGION	3 + +/* Maximum number of regions Linux is interested in */ +#define MPU_MAX_REGIONS		16 + +#define MPU_DATA_SIDE		0 +#define MPU_INSTR_SIDE		1 + +#ifndef __ASSEMBLY__ + +struct mpu_rgn { +	/* Assume same attributes for d/i-side  */ +	u32 drbar; +	u32 drsr; +	u32 dracr; +}; + +struct mpu_rgn_info { +	u32 mpuir; +	struct mpu_rgn rgns[MPU_MAX_REGIONS]; +}; +extern struct mpu_rgn_info mpu_rgn_info; + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_ARM_MPU */ + +#endif diff --git a/arch/arm/include/asm/msgbuf.h b/arch/arm/include/asm/msgbuf.h deleted file mode 100644 index 33b35b946ea..00000000000 --- a/arch/arm/include/asm/msgbuf.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _ASMARM_MSGBUF_H -#define _ASMARM_MSGBUF_H - -/*  - * The msqid64_ds structure for arm architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct msqid64_ds { -	struct ipc64_perm msg_perm; -	__kernel_time_t msg_stime;	/* last msgsnd time */ -	unsigned long	__unused1; -	__kernel_time_t msg_rtime;	/* last msgrcv time */ -	unsigned long	__unused2; -	__kernel_time_t msg_ctime;	/* last change time */ -	unsigned long	__unused3; -	unsigned long  msg_cbytes;	/* current number of bytes on queue */ -	unsigned long  msg_qnum;	/* number of messages in queue */ -	unsigned long  msg_qbytes;	/* max number of bytes on queue */ -	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */ -	__kernel_pid_t msg_lrpid;	/* last receive pid */ -	unsigned long  __unused4; -	unsigned long  __unused5; -}; - -#endif /* _ASMARM_MSGBUF_H */ diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h index 93226cf23ae..87c044910fe 100644 --- a/arch/arm/include/asm/mutex.h +++ b/arch/arm/include/asm/mutex.h @@ -7,121 +7,15 @@   */  #ifndef _ASM_MUTEX_H  #define _ASM_MUTEX_H - -#if __LINUX_ARM_ARCH__ < 6 -/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */ -# include <asm-generic/mutex-xchg.h> -#else - -/* - * Attempting to lock a mutex on ARMv6+ can be done with a bastardized - * atomic decrement (it is not a reliable atomic decrement but it satisfies - * the defined semantics for our purpose, while being smaller and faster - * than a real atomic decrement or atomic swap.  The idea is to attempt - * decrementing the lock value only once.  If once decremented it isn't zero, - * or if its store-back fails due to a dispute on the exclusive store, we - * simply bail out immediately through the slow path where the lock will be - * reattempted until it succeeds. - */ -static inline void -__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ -	int __ex_flag, __res; - -	__asm__ ( - -		"ldrex	%0, [%2]	\n\t" -		"sub	%0, %0, #1	\n\t" -		"strex	%1, %0, [%2]	" - -		: "=&r" (__res), "=&r" (__ex_flag) -		: "r" (&(count)->counter) -		: "cc","memory" ); - -	__res |= __ex_flag; -	if (unlikely(__res != 0)) -		fail_fn(count); -} - -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ -	int __ex_flag, __res; - -	__asm__ ( - -		"ldrex	%0, [%2]	\n\t" -		"sub	%0, %0, #1	\n\t" -		"strex	%1, %0, [%2]	" - -		: "=&r" (__res), "=&r" (__ex_flag) -		: "r" (&(count)->counter) -		: "cc","memory" ); - -	__res |= __ex_flag; -	if (unlikely(__res != 0)) -		__res = fail_fn(count); -	return __res; -} - -/* - * Same trick is used for the unlock fast path. However the original value, - * rather than the result, is used to test for success in order to have - * better generated assembly. - */ -static inline void -__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) -{ -	int __ex_flag, __res, __orig; - -	__asm__ ( - -		"ldrex	%0, [%3]	\n\t" -		"add	%1, %0, #1	\n\t" -		"strex	%2, %1, [%3]	" - -		: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) -		: "r" (&(count)->counter) -		: "cc","memory" ); - -	__orig |= __ex_flag; -	if (unlikely(__orig != 0)) -		fail_fn(count); -} -  /* - * If the unlock was done on a contended lock, or if the unlock simply fails - * then the mutex remains locked. + * On pre-ARMv6 hardware this results in a swp-based implementation, + * which is the most efficient. For ARMv6+, we have exclusive memory + * accessors and use atomic_dec to avoid the extra xchg operations + * on the locking slowpaths.   */ -#define __mutex_slowpath_needs_to_unlock()	1 - -/* - * For __mutex_fastpath_trylock we use another construct which could be - * described as a "single value cmpxchg". - * - * This provides the needed trylock semantics like cmpxchg would, but it is - * lighter and less generic than a true cmpxchg implementation. - */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ -	int __ex_flag, __res, __orig; - -	__asm__ ( - -		"1: ldrex	%0, [%3]	\n\t" -		"subs		%1, %0, #1	\n\t" -		"strexeq	%2, %1, [%3]	\n\t" -		"movlt		%0, #0		\n\t" -		"cmpeq		%2, #0		\n\t" -		"bgt		1b		" - -		: "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) -		: "r" (&count->counter) -		: "cc", "memory" ); - -	return __orig; -} - -#endif +#if __LINUX_ARM_ARCH__ < 6 +#include <asm-generic/mutex-xchg.h> +#else +#include <asm-generic/mutex-dec.h>  #endif +#endif	/* _ASM_MUTEX_H */ diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h new file mode 100644 index 00000000000..8f730fe7009 --- /dev/null +++ b/arch/arm/include/asm/neon.h @@ -0,0 +1,36 @@ +/* + * linux/arch/arm/include/asm/neon.h + * + * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <asm/hwcap.h> + +#define cpu_has_neon()		(!!(elf_hwcap & HWCAP_NEON)) + +#ifdef __ARM_NEON__ + +/* + * If you are affected by the BUILD_BUG below, it probably means that you are + * using NEON code /and/ calling the kernel_neon_begin() function from the same + * compilation unit. To prevent issues that may arise from GCC reordering or + * generating(1) NEON instructions outside of these begin/end functions, the + * only supported way of using NEON code in the kernel is by isolating it in a + * separate compilation unit, and calling it from another unit from inside a + * kernel_neon_begin/kernel_neon_end pair. + * + * (1) Current GCC (4.7) might generate NEON instructions at O3 level if + *     -mpfu=neon is set. + */ + +#define kernel_neon_begin() \ +	BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code") + +#else +void kernel_neon_begin(void); +#endif +void kernel_neon_end(void); diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h new file mode 100644 index 00000000000..bc3a9174417 --- /dev/null +++ b/arch/arm/include/asm/opcodes-sec.h @@ -0,0 +1,24 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * Copyright (C) 2012 ARM Limited + */ + +#ifndef __ASM_ARM_OPCODES_SEC_H +#define __ASM_ARM_OPCODES_SEC_H + +#include <asm/opcodes.h> + +#define __SMC(imm4) __inst_arm_thumb32(					\ +	0xE1600070 | (((imm4) & 0xF) << 0),				\ +	0xF7F08000 | (((imm4) & 0xF) << 16)				\ +) + +#endif /* __ASM_ARM_OPCODES_SEC_H */ diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h new file mode 100644 index 00000000000..efcfdf92d9d --- /dev/null +++ b/arch/arm/include/asm/opcodes-virt.h @@ -0,0 +1,39 @@ +/* + * opcodes-virt.h: Opcode definitions for the ARM virtualization extensions + * Copyright (C) 2012  Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef __ASM_ARM_OPCODES_VIRT_H +#define __ASM_ARM_OPCODES_VIRT_H + +#include <asm/opcodes.h> + +#define __HVC(imm16) __inst_arm_thumb32(				\ +	0xE1400070 | (((imm16) & 0xFFF0) << 4) | ((imm16) & 0x000F),	\ +	0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF)	\ +) + +#define __ERET	__inst_arm_thumb32(					\ +	0xE160006E,							\ +	0xF3DE8F00							\ +) + +#define __MSR_ELR_HYP(regnum)	__inst_arm_thumb32(			\ +	0xE12EF300 | regnum,						\ +	0xF3808E30 | (regnum << 16)					\ +) + +#endif /* ! __ASM_ARM_OPCODES_VIRT_H */ diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h new file mode 100644 index 00000000000..e796c598513 --- /dev/null +++ b/arch/arm/include/asm/opcodes.h @@ -0,0 +1,231 @@ +/* + *  arch/arm/include/asm/opcodes.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_OPCODES_H +#define __ASM_ARM_OPCODES_H + +#ifndef __ASSEMBLY__ +#include <linux/linkage.h> +extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); +#endif + +#define ARM_OPCODE_CONDTEST_FAIL   0 +#define ARM_OPCODE_CONDTEST_PASS   1 +#define ARM_OPCODE_CONDTEST_UNCOND 2 + + +/* + * Assembler opcode byteswap helpers. + * These are only intended for use by this header: don't use them directly, + * because they will be suboptimal in most cases. + */ +#define ___asm_opcode_swab32(x) (	\ +	  (((x) << 24) & 0xFF000000)	\ +	| (((x) <<  8) & 0x00FF0000)	\ +	| (((x) >>  8) & 0x0000FF00)	\ +	| (((x) >> 24) & 0x000000FF)	\ +) +#define ___asm_opcode_swab16(x) (	\ +	  (((x) << 8) & 0xFF00)		\ +	| (((x) >> 8) & 0x00FF)		\ +) +#define ___asm_opcode_swahb32(x) (	\ +	  (((x) << 8) & 0xFF00FF00)	\ +	| (((x) >> 8) & 0x00FF00FF)	\ +) +#define ___asm_opcode_swahw32(x) (	\ +	  (((x) << 16) & 0xFFFF0000)	\ +	| (((x) >> 16) & 0x0000FFFF)	\ +) +#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF) +#define ___asm_opcode_identity16(x) ((x) & 0xFFFF) + + +/* + * Opcode byteswap helpers + * + * These macros help with converting instructions between a canonical integer + * format and in-memory representation, in an endianness-agnostic manner. + * + * __mem_to_opcode_*() convert from in-memory representation to canonical form. + * __opcode_to_mem_*() convert from canonical form to in-memory representation. + * + * + * Canonical instruction representation: + * + *	ARM:		0xKKLLMMNN + *	Thumb 16-bit:	0x0000KKLL, where KK < 0xE8 + *	Thumb 32-bit:	0xKKLLMMNN, where KK >= 0xE8 + * + * There is no way to distinguish an ARM instruction in canonical representation + * from a Thumb instruction (just as these cannot be distinguished in memory). + * Where this distinction is important, it needs to be tracked separately. + * + * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not + * represent any valid Thumb-2 instruction.  For this range, + * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false. + * + * The ___asm variants are intended only for use by this header, in situations + * involving inline assembler.  For .S files, the normal __opcode_*() macros + * should do the right thing. + */ +#ifdef __ASSEMBLY__ + +#define ___opcode_swab32(x) ___asm_opcode_swab32(x) +#define ___opcode_swab16(x) ___asm_opcode_swab16(x) +#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x) +#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x) +#define ___opcode_identity32(x) ___asm_opcode_identity32(x) +#define ___opcode_identity16(x) ___asm_opcode_identity16(x) + +#else /* ! __ASSEMBLY__ */ + +#include <linux/types.h> +#include <linux/swab.h> + +#define ___opcode_swab32(x) swab32(x) +#define ___opcode_swab16(x) swab16(x) +#define ___opcode_swahb32(x) swahb32(x) +#define ___opcode_swahw32(x) swahw32(x) +#define ___opcode_identity32(x) ((u32)(x)) +#define ___opcode_identity16(x) ((u16)(x)) + +#endif /* ! __ASSEMBLY__ */ + + +#ifdef CONFIG_CPU_ENDIAN_BE8 + +#define __opcode_to_mem_arm(x) ___opcode_swab32(x) +#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x) +#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x) +#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x) +#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x) +#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x) + +#else /* ! CONFIG_CPU_ENDIAN_BE8 */ + +#define __opcode_to_mem_arm(x) ___opcode_identity32(x) +#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x) +#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x) +#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x) +#ifndef CONFIG_CPU_ENDIAN_BE32 +/* + * On BE32 systems, using 32-bit accesses to store Thumb instructions will not + * work in all cases, due to alignment constraints.  For now, a correct + * version is not provided for BE32. + */ +#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x) +#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x) +#endif + +#endif /* ! CONFIG_CPU_ENDIAN_BE8 */ + +#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x) +#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x) +#ifndef CONFIG_CPU_ENDIAN_BE32 +#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x) +#endif + +/* Operations specific to Thumb opcodes */ + +/* Instruction size checks: */ +#define __opcode_is_thumb32(x) (		\ +	   ((x) & 0xF8000000) == 0xE8000000	\ +	|| ((x) & 0xF0000000) == 0xF0000000	\ +) +#define __opcode_is_thumb16(x) (					\ +	   ((x) & 0xFFFF0000) == 0					\ +	&& !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000)	\ +) + +/* Operations to construct or split 32-bit Thumb instructions: */ +#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16)) +#define __opcode_thumb32_second(x) (___opcode_identity16(x)) +#define __opcode_thumb32_compose(first, second) (			\ +	  (___opcode_identity32(___opcode_identity16(first)) << 16)	\ +	| ___opcode_identity32(___opcode_identity16(second))		\ +) +#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16)) +#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x)) +#define ___asm_opcode_thumb32_compose(first, second) (			    \ +	  (___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \ +	| ___asm_opcode_identity32(___asm_opcode_identity16(second))	    \ +) + +/* + * Opcode injection helpers + * + * In rare cases it is necessary to assemble an opcode which the + * assembler does not support directly, or which would normally be + * rejected because of the CFLAGS or AFLAGS used to build the affected + * file. + * + * Before using these macros, consider carefully whether it is feasible + * instead to change the build flags for your file, or whether it really + * makes sense to support old assembler versions when building that + * particular kernel feature. + * + * The macros defined here should only be used where there is no viable + * alternative. + * + * + * __inst_arm(x): emit the specified ARM opcode + * __inst_thumb16(x): emit the specified 16-bit Thumb opcode + * __inst_thumb32(x): emit the specified 32-bit Thumb opcode + * + * __inst_arm_thumb16(arm, thumb): emit either the specified arm or + *	16-bit Thumb opcode, depending on whether an ARM or Thumb-2 + *	kernel is being built + * + * __inst_arm_thumb32(arm, thumb): emit either the specified arm or + *	32-bit Thumb opcode, depending on whether an ARM or Thumb-2 + *	kernel is being built + * + * + * Note that using these macros directly is poor practice.  Instead, you + * should use them to define human-readable wrapper macros to encode the + * instructions that you care about.  In code which might run on ARMv7 or + * above, you can usually use the __inst_arm_thumb{16,32} macros to + * specify the ARM and Thumb alternatives at the same time.  This ensures + * that the correct opcode gets emitted depending on the instruction set + * used for the kernel build. + * + * Look at opcodes-virt.h for an example of how to use these macros. + */ +#include <linux/stringify.h> + +#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x)) +#define __inst_thumb32(x) ___inst_thumb32(				\ +	___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)),	\ +	___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x))	\ +) +#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x)) + +#ifdef CONFIG_THUMB2_KERNEL +#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \ +	__inst_thumb16(thumb_opcode) +#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \ +	__inst_thumb32(thumb_opcode) +#else +#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode) +#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode) +#endif + +/* Helpers for the helpers.  Don't use these directly. */ +#ifdef __ASSEMBLY__ +#define ___inst_arm(x) .long x +#define ___inst_thumb16(x) .short x +#define ___inst_thumb32(first, second) .short first, second +#else +#define ___inst_arm(x) ".long " __stringify(x) "\n\t" +#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t" +#define ___inst_thumb32(first, second) \ +	".short " __stringify(first) ", " __stringify(second) "\n\t" +#endif + +#endif /* __ASM_ARM_OPCODES_H */ diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index fc190092527..891a56b35bc 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -21,71 +21,118 @@  #ifndef __ASM_OUTERCACHE_H  #define __ASM_OUTERCACHE_H +#include <linux/types.h> +  struct outer_cache_fns {  	void (*inv_range)(unsigned long, unsigned long);  	void (*clean_range)(unsigned long, unsigned long);  	void (*flush_range)(unsigned long, unsigned long);  	void (*flush_all)(void); -	void (*inv_all)(void);  	void (*disable)(void);  #ifdef CONFIG_OUTER_CACHE_SYNC  	void (*sync)(void);  #endif -}; +	void (*resume)(void); -#ifdef CONFIG_OUTER_CACHE +	/* This is an ARM L2C thing */ +	void (*write_sec)(unsigned long, unsigned); +};  extern struct outer_cache_fns outer_cache; -static inline void outer_inv_range(unsigned long start, unsigned long end) +#ifdef CONFIG_OUTER_CACHE +/** + * outer_inv_range - invalidate range of outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */ +static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)  {  	if (outer_cache.inv_range)  		outer_cache.inv_range(start, end);  } -static inline void outer_clean_range(unsigned long start, unsigned long end) + +/** + * outer_clean_range - clean dirty outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */ +static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)  {  	if (outer_cache.clean_range)  		outer_cache.clean_range(start, end);  } -static inline void outer_flush_range(unsigned long start, unsigned long end) + +/** + * outer_flush_range - clean and invalidate outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */ +static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)  {  	if (outer_cache.flush_range)  		outer_cache.flush_range(start, end);  } +/** + * outer_flush_all - clean and invalidate all cache lines in the outer cache + * + * Note: depending on implementation, this may not be atomic - it must + * only be called with interrupts disabled and no other active outer + * cache masters. + * + * It is intended that this function is only used by implementations + * needing to override the outer_cache.disable() method due to security. + * (Some implementations perform this as a clean followed by an invalidate.) + */  static inline void outer_flush_all(void)  {  	if (outer_cache.flush_all)  		outer_cache.flush_all();  } -static inline void outer_inv_all(void) -{ -	if (outer_cache.inv_all) -		outer_cache.inv_all(); -} +/** + * outer_disable - clean, invalidate and disable the outer cache + * + * Disable the outer cache, ensuring that any data contained in the outer + * cache is pushed out to lower levels of system memory.  The note and + * conditions above concerning outer_flush_all() applies here. + */ +extern void outer_disable(void); -static inline void outer_disable(void) +/** + * outer_resume - restore the cache configuration and re-enable outer cache + * + * Restore any configuration that the cache had when previously enabled, + * and re-enable the outer cache. + */ +static inline void outer_resume(void)  { -	if (outer_cache.disable) -		outer_cache.disable(); +	if (outer_cache.resume) +		outer_cache.resume();  }  #else -static inline void outer_inv_range(unsigned long start, unsigned long end) +static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)  { } -static inline void outer_clean_range(unsigned long start, unsigned long end) +static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)  { } -static inline void outer_flush_range(unsigned long start, unsigned long end) +static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)  { }  static inline void outer_flush_all(void) { } -static inline void outer_inv_all(void) { }  static inline void outer_disable(void) { } +static inline void outer_resume(void) { }  #endif  #ifdef CONFIG_OUTER_CACHE_SYNC +/** + * outer_sync - perform a sync point for outer cache + * + * Ensure that all outer cache operations are complete and any store + * buffers are drained. + */  static inline void outer_sync(void)  {  	if (outer_cache.sync) diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index a485ac3c869..4355f0ec44d 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -13,13 +13,13 @@  /* PAGE_SHIFT determines the page size */  #define PAGE_SHIFT		12  #define PAGE_SIZE		(_AC(1,UL) << PAGE_SHIFT) -#define PAGE_MASK		(~(PAGE_SIZE-1)) +#define PAGE_MASK		(~((1 << PAGE_SHIFT) - 1))  #ifndef __ASSEMBLY__  #ifndef CONFIG_MMU -#include "page-nommu.h" +#include <asm/page-nommu.h>  #else @@ -34,7 +34,6 @@   *	processor(s) we're building for.   *   *	We have the following to choose from: - *	  v3		- ARMv3   *	  v4wt		- ARMv4 with writethrough cache, without minicache   *	  v4wb		- ARMv4 with writeback cache, without minicache   *	  v4_mc		- ARMv4 with minicache @@ -44,14 +43,6 @@  #undef _USER  #undef MULTI_USER -#ifdef CONFIG_CPU_COPY_V3 -# ifdef _USER -#  define MULTI_USER 1 -# else -#  define _USER v3 -# endif -#endif -  #ifdef CONFIG_CPU_COPY_V4WT  # ifdef _USER  #  define MULTI_USER 1 @@ -151,51 +142,21 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,  #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)  extern void copy_page(void *to, const void *from); -#undef STRICT_MM_TYPECHECKS - -#ifdef STRICT_MM_TYPECHECKS -/* - * These are used to make use of C type-checking.. - */ -typedef struct { unsigned long pte; } pte_t; -typedef struct { unsigned long pmd; } pmd_t; -typedef struct { unsigned long pgd[2]; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; - -#define pte_val(x)      ((x).pte) -#define pmd_val(x)      ((x).pmd) -#define pgd_val(x)	((x).pgd[0]) -#define pgprot_val(x)   ((x).pgprot) - -#define __pte(x)        ((pte_t) { (x) } ) -#define __pmd(x)        ((pmd_t) { (x) } ) -#define __pgprot(x)     ((pgprot_t) { (x) } ) +#ifdef CONFIG_KUSER_HELPERS +#define __HAVE_ARCH_GATE_AREA 1 +#endif +#ifdef CONFIG_ARM_LPAE +#include <asm/pgtable-3level-types.h>  #else -/* - * .. while these make it easier on the compiler - */ -typedef unsigned long pte_t; -typedef unsigned long pmd_t; -typedef unsigned long pgd_t[2]; -typedef unsigned long pgprot_t; - -#define pte_val(x)      (x) -#define pmd_val(x)      (x) -#define pgd_val(x)	((x)[0]) -#define pgprot_val(x)   (x) - -#define __pte(x)        (x) -#define __pmd(x)        (x) -#define __pgprot(x)     (x) - -#endif /* STRICT_MM_TYPECHECKS */ +#include <asm/pgtable-2level-types.h> +#endif  #endif /* CONFIG_MMU */  typedef struct page *pgtable_t; -#ifndef CONFIG_SPARSEMEM +#ifdef CONFIG_HAVE_ARCH_PFN_VALID  extern int pfn_valid(unsigned long);  #endif diff --git a/arch/arm/include/asm/param.h b/arch/arm/include/asm/param.h deleted file mode 100644 index 8b24bf94c06..00000000000 --- a/arch/arm/include/asm/param.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - *  arch/arm/include/asm/param.h - * - *  Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __ASM_PARAM_H -#define __ASM_PARAM_H - -#ifdef __KERNEL__ -# define HZ		CONFIG_HZ	/* Internal kernel timer frequency */ -# define USER_HZ	100		/* User interfaces are in "ticks" */ -# define CLOCKS_PER_SEC	(USER_HZ)	/* like times() */ -#else -# define HZ		100 -#endif - -#define EXEC_PAGESIZE	4096 - -#ifndef NOGROUP -#define NOGROUP         (-1) -#endif - -/* max length of hostname */ -#define MAXHOSTNAMELEN  64 - -#endif - diff --git a/arch/arm/include/asm/parport.h b/arch/arm/include/asm/parport.h deleted file mode 100644 index 26e94b09035..00000000000 --- a/arch/arm/include/asm/parport.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - *  arch/arm/include/asm/parport.h: ARM-specific parport initialisation - * - *  Copyright (C) 1999, 2000  Tim Waugh <tim@cyberelk.demon.co.uk> - * - * This file should only be included by drivers/parport/parport_pc.c. - */ - -#ifndef __ASMARM_PARPORT_H -#define __ASMARM_PARPORT_H - -static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); -static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) -{ -	return parport_pc_find_isa_ports (autoirq, autodma); -} - -#endif /* !(_ASMARM_PARPORT_H) */ diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 92e2a833693..7e95d8535e2 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -3,9 +3,19 @@  #ifdef __KERNEL__  #include <asm-generic/pci-dma-compat.h> +#include <asm-generic/pci-bridge.h>  #include <asm/mach/pci.h> /* for pci_sys_data */ -#include <mach/hardware.h> /* for PCIBIOS_MIN_* */ + +extern unsigned long pcibios_min_io; +#define PCIBIOS_MIN_IO pcibios_min_io +extern unsigned long pcibios_min_mem; +#define PCIBIOS_MIN_MEM pcibios_min_mem + +static inline int pcibios_assign_all_busses(void) +{ +	return pci_has_flag(PCI_REASSIGN_ALL_RSRC); +}  #ifdef CONFIG_PCI_DOMAINS  static inline int pci_domain_nr(struct pci_bus *bus) @@ -21,23 +31,6 @@ static inline int pci_proc_domain(struct pci_bus *bus)  }  #endif /* CONFIG_PCI_DOMAINS */ -#ifdef CONFIG_PCI_HOST_ITE8152 -/* ITE bridge requires setting latency timer to avoid early bus access -   termination by PIC bus mater devices -*/ -extern void pcibios_set_master(struct pci_dev *dev); -#else -static inline void pcibios_set_master(struct pci_dev *dev) -{ -	/* No special bus mastering setup handling */ -} -#endif - -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ -	/* We don't do dynamic PCI IRQ allocation */ -} -  /*   * The PCI address space does equal the physical memory address space.   * The networking and block device layers use this boolean for bounce @@ -59,20 +52,9 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,  extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,                                 enum pci_mmap_state mmap_state, int write_combine); -extern void -pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, -			 struct resource *res); - -extern void -pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, -			struct pci_bus_region *region); - -/* - * Dummy implementation; always return 0. - */  static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)  { -	return 0; +	return channel ? 15 : 14;  }  #endif /* __KERNEL__ */ diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index b4e32d8ec07..209e6504922 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -1,6 +1,52 @@ -#ifndef __ARM_PERCPU -#define __ARM_PERCPU +/* + * Copyright 2012 Calxeda, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program.  If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef _ASM_ARM_PERCPU_H_ +#define _ASM_ARM_PERCPU_H_ + +/* + * Same as asm-generic/percpu.h, except that we store the per cpu offset + * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 + */ +#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) +static inline void set_my_cpu_offset(unsigned long off) +{ +	/* Set TPIDRPRW */ +	asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory"); +} + +static inline unsigned long __my_cpu_offset(void) +{ +	unsigned long off; +	register unsigned long *sp asm ("sp"); + +	/* +	 * Read TPIDRPRW. +	 * We want to allow caching the value, so avoid using volatile and +	 * instead use a fake stack read to hazard against barrier(). +	 */ +	asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp)); + +	return off; +} +#define __my_cpu_offset __my_cpu_offset() +#else +#define set_my_cpu_offset(x)	do {} while(0) + +#endif /* CONFIG_SMP */  #include <asm-generic/percpu.h> -#endif +#endif /* _ASM_ARM_PERCPU_H_ */ diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index c4aa4e8c6af..755877527cf 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -12,25 +12,20 @@  #ifndef __ARM_PERF_EVENT_H__  #define __ARM_PERF_EVENT_H__ -/* ARM performance counters start from 1 (in the cp15 accesses) so use the - * same indexes here for consistency. */ -#define PERF_EVENT_INDEX_OFFSET 1 - -/* ARM perf PMU IDs for use by internal perf clients. */ -enum arm_perf_pmu_ids { -	ARM_PERF_PMU_ID_XSCALE1	= 0, -	ARM_PERF_PMU_ID_XSCALE2, -	ARM_PERF_PMU_ID_V6, -	ARM_PERF_PMU_ID_V6MP, -	ARM_PERF_PMU_ID_CA8, -	ARM_PERF_PMU_ID_CA9, -	ARM_NUM_PMU_IDS, -}; +/* + * The ARMv7 CPU PMU supports up to 32 event counters. + */ +#define ARMPMU_MAX_HWEVENTS		32 -extern enum arm_perf_pmu_ids -armpmu_get_pmu_id(void); +#define HW_OP_UNSUPPORTED		0xFFFF +#define C(_x)				PERF_COUNT_HW_CACHE_##_x +#define CACHE_OP_UNSUPPORTED		0xFFFF -extern int -armpmu_get_max_events(void); +#ifdef CONFIG_HW_PERF_EVENTS +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs)	perf_misc_flags(regs) +#endif  #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index b12cc98bbe0..78a77936168 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -10,6 +10,8 @@  #ifndef _ASMARM_PGALLOC_H  #define _ASMARM_PGALLOC_H +#include <linux/pagemap.h> +  #include <asm/domain.h>  #include <asm/pgtable-hwdef.h>  #include <asm/processor.h> @@ -23,21 +25,45 @@  #define _PAGE_USER_TABLE	(PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))  #define _PAGE_KERNEL_TABLE	(PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) +#ifdef CONFIG_ARM_LPAE + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ +	return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ +	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); +	free_page((unsigned long)pmd); +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ +	set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); +} + +#else	/* !CONFIG_ARM_LPAE */ +  /*   * Since we have only two-level page tables, these are trivial   */  #define pmd_alloc_one(mm,addr)		({ BUG(); ((pmd_t *)2); })  #define pmd_free(mm, pmd)		do { } while (0) -#define pgd_populate(mm,pmd,pte)	BUG() +#define pud_populate(mm,pmd,pte)	BUG() -extern pgd_t *get_pgd_slow(struct mm_struct *mm); -extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); +#endif	/* CONFIG_ARM_LPAE */ -#define pgd_alloc(mm)			get_pgd_slow(mm) -#define pgd_free(mm, pgd)		free_pgd_slow(mm, pgd) +extern pgd_t *pgd_alloc(struct mm_struct *mm); +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);  #define PGALLOC_GFP	(GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) +static inline void clean_pte_table(pte_t *pte) +{ +	clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE); +} +  /*   * Allocate one PTE table.   * @@ -45,14 +71,14 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);   * into one table thus:   *   *  +------------+ - *  |  h/w pt 0  | - *  +------------+ - *  |  h/w pt 1  | - *  +------------+   *  | Linux pt 0 |   *  +------------+   *  | Linux pt 1 |   *  +------------+ + *  |  h/w pt 0  | + *  +------------+ + *  |  h/w pt 1  | + *  +------------+   */  static inline pte_t *  pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) @@ -60,10 +86,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)  	pte_t *pte;  	pte = (pte_t *)__get_free_page(PGALLOC_GFP); -	if (pte) { -		clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); -		pte += PTRS_PER_PTE; -	} +	if (pte) +		clean_pte_table(pte);  	return pte;  } @@ -78,14 +102,14 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)  #else  	pte = alloc_pages(PGALLOC_GFP, 0);  #endif -	if (pte) { -		if (!PageHighMem(pte)) { -			void *page = page_address(pte); -			clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); -		} -		pgtable_page_ctor(pte); +	if (!pte) +		return NULL; +	if (!PageHighMem(pte)) +		clean_pte_table(page_address(pte)); +	if (!pgtable_page_ctor(pte)) { +		__free_page(pte); +		return NULL;  	} -  	return pte;  } @@ -94,10 +118,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)   */  static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)  { -	if (pte) { -		pte -= PTRS_PER_PTE; +	if (pte)  		free_page((unsigned long)pte); -	}  }  static inline void pte_free(struct mm_struct *mm, pgtable_t pte) @@ -106,10 +128,14 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)  	__free_page(pte);  } -static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) +static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, +				  pmdval_t prot)  { +	pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;  	pmdp[0] = __pmd(pmdval); +#ifndef CONFIG_ARM_LPAE  	pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); +#endif  	flush_pmd_entry(pmdp);  } @@ -122,20 +148,16 @@ static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)  static inline void  pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)  { -	unsigned long pte_ptr = (unsigned long)ptep; -  	/* -	 * The pmd must be loaded with the physical -	 * address of the PTE table +	 * The pmd must be loaded with the physical address of the PTE table  	 */ -	pte_ptr -= PTRS_PER_PTE * sizeof(void *); -	__pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE); +	__pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE);  }  static inline void  pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)  { -	__pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); +	__pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);  }  #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h new file mode 100644 index 00000000000..5cfba15cb40 --- /dev/null +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -0,0 +1,93 @@ +/* + *  arch/arm/include/asm/pgtable-2level-hwdef.h + * + *  Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_PGTABLE_2LEVEL_HWDEF_H +#define _ASM_PGTABLE_2LEVEL_HWDEF_H + +/* + * Hardware page table definitions. + * + * + Level 1 descriptor (PMD) + *   - common + */ +#define PMD_TYPE_MASK		(_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_FAULT		(_AT(pmdval_t, 0) << 0) +#define PMD_TYPE_TABLE		(_AT(pmdval_t, 1) << 0) +#define PMD_TYPE_SECT		(_AT(pmdval_t, 2) << 0) +#define PMD_BIT4		(_AT(pmdval_t, 1) << 4) +#define PMD_DOMAIN(x)		(_AT(pmdval_t, (x)) << 5) +#define PMD_PROTECTION		(_AT(pmdval_t, 1) << 9)		/* v5 */ +/* + *   - section + */ +#define PMD_SECT_BUFFERABLE	(_AT(pmdval_t, 1) << 2) +#define PMD_SECT_CACHEABLE	(_AT(pmdval_t, 1) << 3) +#define PMD_SECT_XN		(_AT(pmdval_t, 1) << 4)		/* v6 */ +#define PMD_SECT_AP_WRITE	(_AT(pmdval_t, 1) << 10) +#define PMD_SECT_AP_READ	(_AT(pmdval_t, 1) << 11) +#define PMD_SECT_TEX(x)		(_AT(pmdval_t, (x)) << 12)	/* v5 */ +#define PMD_SECT_APX		(_AT(pmdval_t, 1) << 15)	/* v6 */ +#define PMD_SECT_S		(_AT(pmdval_t, 1) << 16)	/* v6 */ +#define PMD_SECT_nG		(_AT(pmdval_t, 1) << 17)	/* v6 */ +#define PMD_SECT_SUPER		(_AT(pmdval_t, 1) << 18)	/* v6 */ +#define PMD_SECT_AF		(_AT(pmdval_t, 0)) + +#define PMD_SECT_UNCACHED	(_AT(pmdval_t, 0)) +#define PMD_SECT_BUFFERED	(PMD_SECT_BUFFERABLE) +#define PMD_SECT_WT		(PMD_SECT_CACHEABLE) +#define PMD_SECT_WB		(PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) +#define PMD_SECT_MINICACHE	(PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) +#define PMD_SECT_WBWA		(PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) +#define PMD_SECT_NONSHARED_DEV	(PMD_SECT_TEX(2)) + +/* + *   - coarse table (not used) + */ + +/* + * + Level 2 descriptor (PTE) + *   - common + */ +#define PTE_TYPE_MASK		(_AT(pteval_t, 3) << 0) +#define PTE_TYPE_FAULT		(_AT(pteval_t, 0) << 0) +#define PTE_TYPE_LARGE		(_AT(pteval_t, 1) << 0) +#define PTE_TYPE_SMALL		(_AT(pteval_t, 2) << 0) +#define PTE_TYPE_EXT		(_AT(pteval_t, 3) << 0)		/* v5 */ +#define PTE_BUFFERABLE		(_AT(pteval_t, 1) << 2) +#define PTE_CACHEABLE		(_AT(pteval_t, 1) << 3) + +/* + *   - extended small page/tiny page + */ +#define PTE_EXT_XN		(_AT(pteval_t, 1) << 0)		/* v6 */ +#define PTE_EXT_AP_MASK		(_AT(pteval_t, 3) << 4) +#define PTE_EXT_AP0		(_AT(pteval_t, 1) << 4) +#define PTE_EXT_AP1		(_AT(pteval_t, 2) << 4) +#define PTE_EXT_AP_UNO_SRO	(_AT(pteval_t, 0) << 4) +#define PTE_EXT_AP_UNO_SRW	(PTE_EXT_AP0) +#define PTE_EXT_AP_URO_SRW	(PTE_EXT_AP1) +#define PTE_EXT_AP_URW_SRW	(PTE_EXT_AP1|PTE_EXT_AP0) +#define PTE_EXT_TEX(x)		(_AT(pteval_t, (x)) << 6)	/* v5 */ +#define PTE_EXT_APX		(_AT(pteval_t, 1) << 9)		/* v6 */ +#define PTE_EXT_COHERENT	(_AT(pteval_t, 1) << 9)		/* XScale3 */ +#define PTE_EXT_SHARED		(_AT(pteval_t, 1) << 10)	/* v6 */ +#define PTE_EXT_NG		(_AT(pteval_t, 1) << 11)	/* v6 */ + +/* + *   - small page + */ +#define PTE_SMALL_AP_MASK	(_AT(pteval_t, 0xff) << 4) +#define PTE_SMALL_AP_UNO_SRO	(_AT(pteval_t, 0x00) << 4) +#define PTE_SMALL_AP_UNO_SRW	(_AT(pteval_t, 0x55) << 4) +#define PTE_SMALL_AP_URO_SRW	(_AT(pteval_t, 0xaa) << 4) +#define PTE_SMALL_AP_URW_SRW	(_AT(pteval_t, 0xff) << 4) + +#define PHYS_MASK		(~0UL) + +#endif diff --git a/arch/arm/include/asm/pgtable-2level-types.h b/arch/arm/include/asm/pgtable-2level-types.h new file mode 100644 index 00000000000..66cb5b0e89c --- /dev/null +++ b/arch/arm/include/asm/pgtable-2level-types.h @@ -0,0 +1,67 @@ +/* + * arch/arm/include/asm/pgtable-2level-types.h + * + * Copyright (C) 1995-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_2LEVEL_TYPES_H +#define _ASM_PGTABLE_2LEVEL_TYPES_H + +#include <asm/types.h> + +typedef u32 pteval_t; +typedef u32 pmdval_t; + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS +/* + * These are used to make use of C type-checking.. + */ +typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; +typedef struct { pmdval_t pgd[2]; } pgd_t; +typedef struct { pteval_t pgprot; } pgprot_t; + +#define pte_val(x)      ((x).pte) +#define pmd_val(x)      ((x).pmd) +#define pgd_val(x)	((x).pgd[0]) +#define pgprot_val(x)   ((x).pgprot) + +#define __pte(x)        ((pte_t) { (x) } ) +#define __pmd(x)        ((pmd_t) { (x) } ) +#define __pgprot(x)     ((pgprot_t) { (x) } ) + +#else +/* + * .. while these make it easier on the compiler + */ +typedef pteval_t pte_t; +typedef pmdval_t pmd_t; +typedef pmdval_t pgd_t[2]; +typedef pteval_t pgprot_t; + +#define pte_val(x)      (x) +#define pmd_val(x)      (x) +#define pgd_val(x)	((x)[0]) +#define pgprot_val(x)   (x) + +#define __pte(x)        (x) +#define __pmd(x)        (x) +#define __pgprot(x)     (x) + +#endif /* STRICT_MM_TYPECHECKS */ + +#endif	/* _ASM_PGTABLE_2LEVEL_TYPES_H */ diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h new file mode 100644 index 00000000000..219ac88a954 --- /dev/null +++ b/arch/arm/include/asm/pgtable-2level.h @@ -0,0 +1,195 @@ +/* + *  arch/arm/include/asm/pgtable-2level.h + * + *  Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_PGTABLE_2LEVEL_H +#define _ASM_PGTABLE_2LEVEL_H + +/* + * Hardware-wise, we have a two level page table structure, where the first + * level has 4096 entries, and the second level has 256 entries.  Each entry + * is one 32-bit word.  Most of the bits in the second level entry are used + * by hardware, and there aren't any "accessed" and "dirty" bits. + * + * Linux on the other hand has a three level page table structure, which can + * be wrapped to fit a two level page table structure easily - using the PGD + * and PTE only.  However, Linux also expects one "PTE" table per page, and + * at least a "dirty" bit. + * + * Therefore, we tweak the implementation slightly - we tell Linux that we + * have 2048 entries in the first level, each of which is 8 bytes (iow, two + * hardware pointers to the second level.)  The second level contains two + * hardware PTE tables arranged contiguously, preceded by Linux versions + * which contain the state information Linux needs.  We, therefore, end up + * with 512 entries in the "PTE" level. + * + * This leads to the page tables having the following layout: + * + *    pgd             pte + * |        | + * +--------+ + * |        |       +------------+ +0 + * +- - - - +       | Linux pt 0 | + * |        |       +------------+ +1024 + * +--------+ +0    | Linux pt 1 | + * |        |-----> +------------+ +2048 + * +- - - - + +4    |  h/w pt 0  | + * |        |-----> +------------+ +3072 + * +--------+ +8    |  h/w pt 1  | + * |        |       +------------+ +4096 + * + * See L_PTE_xxx below for definitions of bits in the "Linux pt", and + * PTE_xxx for definitions of bits appearing in the "h/w pt". + * + * PMD_xxx definitions refer to bits in the first level page table. + * + * The "dirty" bit is emulated by only granting hardware write permission + * iff the page is marked "writable" and "dirty" in the Linux PTE.  This + * means that a write to a clean page will cause a permission fault, and + * the Linux MM layer will mark the page dirty via handle_pte_fault(). + * For the hardware to notice the permission change, the TLB entry must + * be flushed, and ptep_set_access_flags() does that for us. + * + * The "accessed" or "young" bit is emulated by a similar method; we only + * allow accesses to the page if the "young" bit is set.  Accesses to the + * page will cause a fault, and handle_pte_fault() will set the young bit + * for us as long as the page is marked present in the corresponding Linux + * PTE entry.  Again, ptep_set_access_flags() will ensure that the TLB is + * up to date. + * + * However, when the "young" bit is cleared, we deny access to the page + * by clearing the hardware PTE.  Currently Linux does not flush the TLB + * for us in this case, which means the TLB will retain the transation + * until either the TLB entry is evicted under pressure, or a context + * switch which changes the user space mapping occurs. + */ +#define PTRS_PER_PTE		512 +#define PTRS_PER_PMD		1 +#define PTRS_PER_PGD		2048 + +#define PTE_HWTABLE_PTRS	(PTRS_PER_PTE) +#define PTE_HWTABLE_OFF		(PTE_HWTABLE_PTRS * sizeof(pte_t)) +#define PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(u32)) + +/* + * PMD_SHIFT determines the size of the area a second-level page table can map + * PGDIR_SHIFT determines what a third-level page table entry can map + */ +#define PMD_SHIFT		21 +#define PGDIR_SHIFT		21 + +#define PMD_SIZE		(1UL << PMD_SHIFT) +#define PMD_MASK		(~(PMD_SIZE-1)) +#define PGDIR_SIZE		(1UL << PGDIR_SHIFT) +#define PGDIR_MASK		(~(PGDIR_SIZE-1)) + +/* + * section address mask and size definitions. + */ +#define SECTION_SHIFT		20 +#define SECTION_SIZE		(1UL << SECTION_SHIFT) +#define SECTION_MASK		(~(SECTION_SIZE-1)) + +/* + * ARMv6 supersection address mask and size definitions. + */ +#define SUPERSECTION_SHIFT	24 +#define SUPERSECTION_SIZE	(1UL << SUPERSECTION_SHIFT) +#define SUPERSECTION_MASK	(~(SUPERSECTION_SIZE-1)) + +#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE) + +/* + * "Linux" PTE definitions. + * + * We keep two sets of PTEs - the hardware and the linux version. + * This allows greater flexibility in the way we map the Linux bits + * onto the hardware tables, and allows us to have YOUNG and DIRTY + * bits. + * + * The PTE table pointer refers to the hardware entries; the "Linux" + * entries are stored 1024 bytes below. + */ +#define L_PTE_VALID		(_AT(pteval_t, 1) << 0)		/* Valid */ +#define L_PTE_PRESENT		(_AT(pteval_t, 1) << 0) +#define L_PTE_YOUNG		(_AT(pteval_t, 1) << 1) +#define L_PTE_FILE		(_AT(pteval_t, 1) << 2)	/* only when !PRESENT */ +#define L_PTE_DIRTY		(_AT(pteval_t, 1) << 6) +#define L_PTE_RDONLY		(_AT(pteval_t, 1) << 7) +#define L_PTE_USER		(_AT(pteval_t, 1) << 8) +#define L_PTE_XN		(_AT(pteval_t, 1) << 9) +#define L_PTE_SHARED		(_AT(pteval_t, 1) << 10)	/* shared(v6), coherent(xsc3) */ +#define L_PTE_NONE		(_AT(pteval_t, 1) << 11) + +/* + * These are the memory types, defined to be compatible with + * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB + */ +#define L_PTE_MT_UNCACHED	(_AT(pteval_t, 0x00) << 2)	/* 0000 */ +#define L_PTE_MT_BUFFERABLE	(_AT(pteval_t, 0x01) << 2)	/* 0001 */ +#define L_PTE_MT_WRITETHROUGH	(_AT(pteval_t, 0x02) << 2)	/* 0010 */ +#define L_PTE_MT_WRITEBACK	(_AT(pteval_t, 0x03) << 2)	/* 0011 */ +#define L_PTE_MT_MINICACHE	(_AT(pteval_t, 0x06) << 2)	/* 0110 (sa1100, xscale) */ +#define L_PTE_MT_WRITEALLOC	(_AT(pteval_t, 0x07) << 2)	/* 0111 */ +#define L_PTE_MT_DEV_SHARED	(_AT(pteval_t, 0x04) << 2)	/* 0100 */ +#define L_PTE_MT_DEV_NONSHARED	(_AT(pteval_t, 0x0c) << 2)	/* 1100 */ +#define L_PTE_MT_DEV_WC		(_AT(pteval_t, 0x09) << 2)	/* 1001 */ +#define L_PTE_MT_DEV_CACHED	(_AT(pteval_t, 0x0b) << 2)	/* 1011 */ +#define L_PTE_MT_VECTORS	(_AT(pteval_t, 0x0f) << 2)	/* 1111 */ +#define L_PTE_MT_MASK		(_AT(pteval_t, 0x0f) << 2) + +#ifndef __ASSEMBLY__ + +/* + * The "pud_xxx()" functions here are trivial when the pmd is folded into + * the pud: the pud entry is never bad, always exists, and can't be set or + * cleared. + */ +#define pud_none(pud)		(0) +#define pud_bad(pud)		(0) +#define pud_present(pud)	(1) +#define pud_clear(pudp)		do { } while (0) +#define set_pud(pud,pudp)	do { } while (0) + +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) +{ +	return (pmd_t *)pud; +} + +#define pmd_large(pmd)		(pmd_val(pmd) & 2) +#define pmd_bad(pmd)		(pmd_val(pmd) & 2) + +#define copy_pmd(pmdpd,pmdps)		\ +	do {				\ +		pmdpd[0] = pmdps[0];	\ +		pmdpd[1] = pmdps[1];	\ +		flush_pmd_entry(pmdpd);	\ +	} while (0) + +#define pmd_clear(pmdp)			\ +	do {				\ +		pmdp[0] = __pmd(0);	\ +		pmdp[1] = __pmd(0);	\ +		clean_pmd_entry(pmdp);	\ +	} while (0) + +/* we don't need complex calculations here as the pmd is folded into the pgd */ +#define pmd_addr_end(addr,end) (end) + +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) + +/* + * We don't have huge page support for short descriptors, for the moment + * define empty stubs for use by pin_page_for_write. + */ +#define pmd_hugewillfault(pmd)	(0) +#define pmd_thp_or_huge(pmd)	(0) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_PGTABLE_2LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h new file mode 100644 index 00000000000..626989fec4d --- /dev/null +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -0,0 +1,106 @@ +/* + * arch/arm/include/asm/pgtable-3level-hwdef.h + * + * Copyright (C) 2011 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_3LEVEL_HWDEF_H +#define _ASM_PGTABLE_3LEVEL_HWDEF_H + +/* + * Hardware page table definitions. + * + * + Level 1/2 descriptor + *   - common + */ +#define PMD_TYPE_MASK		(_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_FAULT		(_AT(pmdval_t, 0) << 0) +#define PMD_TYPE_TABLE		(_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_SECT		(_AT(pmdval_t, 1) << 0) +#define PMD_TABLE_BIT		(_AT(pmdval_t, 1) << 1) +#define PMD_BIT4		(_AT(pmdval_t, 0)) +#define PMD_DOMAIN(x)		(_AT(pmdval_t, 0)) +#define PMD_APTABLE_SHIFT	(61) +#define PMD_APTABLE		(_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT) +#define PMD_PXNTABLE		(_AT(pgdval_t, 1) << 59) + +/* + *   - section + */ +#define PMD_SECT_BUFFERABLE	(_AT(pmdval_t, 1) << 2) +#define PMD_SECT_CACHEABLE	(_AT(pmdval_t, 1) << 3) +#define PMD_SECT_USER		(_AT(pmdval_t, 1) << 6)		/* AP[1] */ +#define PMD_SECT_RDONLY		(_AT(pmdval_t, 1) << 7)		/* AP[2] */ +#define PMD_SECT_S		(_AT(pmdval_t, 3) << 8) +#define PMD_SECT_AF		(_AT(pmdval_t, 1) << 10) +#define PMD_SECT_nG		(_AT(pmdval_t, 1) << 11) +#define PMD_SECT_PXN		(_AT(pmdval_t, 1) << 53) +#define PMD_SECT_XN		(_AT(pmdval_t, 1) << 54) +#define PMD_SECT_AP_WRITE	(_AT(pmdval_t, 0)) +#define PMD_SECT_AP_READ	(_AT(pmdval_t, 0)) +#define PMD_SECT_AP1		(_AT(pmdval_t, 1) << 6) +#define PMD_SECT_TEX(x)		(_AT(pmdval_t, 0)) + +/* + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). + */ +#define PMD_SECT_UNCACHED	(_AT(pmdval_t, 0) << 2)	/* strongly ordered */ +#define PMD_SECT_BUFFERED	(_AT(pmdval_t, 1) << 2)	/* normal non-cacheable */ +#define PMD_SECT_WT		(_AT(pmdval_t, 2) << 2)	/* normal inner write-through */ +#define PMD_SECT_WB		(_AT(pmdval_t, 3) << 2)	/* normal inner write-back */ +#define PMD_SECT_WBWA		(_AT(pmdval_t, 7) << 2)	/* normal inner write-alloc */ + +/* + * + Level 3 descriptor (PTE) + */ +#define PTE_TYPE_MASK		(_AT(pteval_t, 3) << 0) +#define PTE_TYPE_FAULT		(_AT(pteval_t, 0) << 0) +#define PTE_TYPE_PAGE		(_AT(pteval_t, 3) << 0) +#define PTE_TABLE_BIT		(_AT(pteval_t, 1) << 1) +#define PTE_BUFFERABLE		(_AT(pteval_t, 1) << 2)		/* AttrIndx[0] */ +#define PTE_CACHEABLE		(_AT(pteval_t, 1) << 3)		/* AttrIndx[1] */ +#define PTE_EXT_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */ +#define PTE_EXT_AF		(_AT(pteval_t, 1) << 10)	/* Access Flag */ +#define PTE_EXT_NG		(_AT(pteval_t, 1) << 11)	/* nG */ +#define PTE_EXT_XN		(_AT(pteval_t, 1) << 54)	/* XN */ + +/* + * 40-bit physical address supported. + */ +#define PHYS_MASK_SHIFT		(40) +#define PHYS_MASK		((1ULL << PHYS_MASK_SHIFT) - 1) + +/* + * TTBR0/TTBR1 split (PAGE_OFFSET): + *   0x40000000: T0SZ = 2, T1SZ = 0 (not used) + *   0x80000000: T0SZ = 0, T1SZ = 1 + *   0xc0000000: T0SZ = 0, T1SZ = 2 + * + * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise + * booting secondary CPUs would end up using TTBR1 for the identity + * mapping set up in TTBR0. + */ +#if defined CONFIG_VMSPLIT_2G +#define TTBR1_OFFSET	16			/* skip two L1 entries */ +#elif defined CONFIG_VMSPLIT_3G +#define TTBR1_OFFSET	(4096 * (1 + 3))	/* only L2, skip pgd + 3*pmd */ +#else +#define TTBR1_OFFSET	0 +#endif + +#define TTBR1_SIZE	(((PAGE_OFFSET >> 30) - 1) << 16) + +#endif diff --git a/arch/arm/include/asm/pgtable-3level-types.h b/arch/arm/include/asm/pgtable-3level-types.h new file mode 100644 index 00000000000..921aa30259c --- /dev/null +++ b/arch/arm/include/asm/pgtable-3level-types.h @@ -0,0 +1,70 @@ +/* + * arch/arm/include/asm/pgtable-3level-types.h + * + * Copyright (C) 2011 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_3LEVEL_TYPES_H +#define _ASM_PGTABLE_3LEVEL_TYPES_H + +#include <asm/types.h> + +typedef u64 pteval_t; +typedef u64 pmdval_t; +typedef u64 pgdval_t; + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; +typedef struct { pgdval_t pgd; } pgd_t; +typedef struct { pteval_t pgprot; } pgprot_t; + +#define pte_val(x)      ((x).pte) +#define pmd_val(x)      ((x).pmd) +#define pgd_val(x)	((x).pgd) +#define pgprot_val(x)   ((x).pgprot) + +#define __pte(x)        ((pte_t) { (x) } ) +#define __pmd(x)        ((pmd_t) { (x) } ) +#define __pgd(x)	((pgd_t) { (x) } ) +#define __pgprot(x)     ((pgprot_t) { (x) } ) + +#else	/* !STRICT_MM_TYPECHECKS */ + +typedef pteval_t pte_t; +typedef pmdval_t pmd_t; +typedef pgdval_t pgd_t; +typedef pteval_t pgprot_t; + +#define pte_val(x)	(x) +#define pmd_val(x)	(x) +#define pgd_val(x)	(x) +#define pgprot_val(x)	(x) + +#define __pte(x)	(x) +#define __pmd(x)	(x) +#define __pgd(x)	(x) +#define __pgprot(x)	(x) + +#endif	/* STRICT_MM_TYPECHECKS */ + +#endif	/* _ASM_PGTABLE_3LEVEL_TYPES_H */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h new file mode 100644 index 00000000000..85c60adc8b6 --- /dev/null +++ b/arch/arm/include/asm/pgtable-3level.h @@ -0,0 +1,270 @@ +/* + * arch/arm/include/asm/pgtable-3level.h + * + * Copyright (C) 2011 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_3LEVEL_H +#define _ASM_PGTABLE_3LEVEL_H + +/* + * With LPAE, there are 3 levels of page tables. Each level has 512 entries of + * 8 bytes each, occupying a 4K page. The first level table covers a range of + * 512GB, each entry representing 1GB. Since we are limited to 4GB input + * address range, only 4 entries in the PGD are used. + * + * There are enough spare bits in a page table entry for the kernel specific + * state. + */ +#define PTRS_PER_PTE		512 +#define PTRS_PER_PMD		512 +#define PTRS_PER_PGD		4 + +#define PTE_HWTABLE_PTRS	(0) +#define PTE_HWTABLE_OFF		(0) +#define PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(u64)) + +/* + * PGDIR_SHIFT determines the size a top-level page table entry can map. + */ +#define PGDIR_SHIFT		30 + +/* + * PMD_SHIFT determines the size a middle-level page table entry can map. + */ +#define PMD_SHIFT		21 + +#define PMD_SIZE		(1UL << PMD_SHIFT) +#define PMD_MASK		(~((1 << PMD_SHIFT) - 1)) +#define PGDIR_SIZE		(1UL << PGDIR_SHIFT) +#define PGDIR_MASK		(~((1 << PGDIR_SHIFT) - 1)) + +/* + * section address mask and size definitions. + */ +#define SECTION_SHIFT		21 +#define SECTION_SIZE		(1UL << SECTION_SHIFT) +#define SECTION_MASK		(~((1 << SECTION_SHIFT) - 1)) + +#define USER_PTRS_PER_PGD	(PAGE_OFFSET / PGDIR_SIZE) + +/* + * Hugetlb definitions. + */ +#define HPAGE_SHIFT		PMD_SHIFT +#define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK		(~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT) + +/* + * "Linux" PTE definitions for LPAE. + * + * These bits overlap with the hardware bits but the naming is preserved for + * consistency with the classic page table format. + */ +#define L_PTE_VALID		(_AT(pteval_t, 1) << 0)		/* Valid */ +#define L_PTE_PRESENT		(_AT(pteval_t, 3) << 0)		/* Present */ +#define L_PTE_FILE		(_AT(pteval_t, 1) << 2)		/* only when !PRESENT */ +#define L_PTE_USER		(_AT(pteval_t, 1) << 6)		/* AP[1] */ +#define L_PTE_RDONLY		(_AT(pteval_t, 1) << 7)		/* AP[2] */ +#define L_PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */ +#define L_PTE_YOUNG		(_AT(pteval_t, 1) << 10)	/* AF */ +#define L_PTE_XN		(_AT(pteval_t, 1) << 54)	/* XN */ +#define L_PTE_DIRTY		(_AT(pteval_t, 1) << 55)	/* unused */ +#define L_PTE_SPECIAL		(_AT(pteval_t, 1) << 56)	/* unused */ +#define L_PTE_NONE		(_AT(pteval_t, 1) << 57)	/* PROT_NONE */ + +#define PMD_SECT_VALID		(_AT(pmdval_t, 1) << 0) +#define PMD_SECT_DIRTY		(_AT(pmdval_t, 1) << 55) +#define PMD_SECT_SPLITTING	(_AT(pmdval_t, 1) << 56) +#define PMD_SECT_NONE		(_AT(pmdval_t, 1) << 57) + +/* + * To be used in assembly code with the upper page attributes. + */ +#define L_PTE_XN_HIGH		(1 << (54 - 32)) +#define L_PTE_DIRTY_HIGH	(1 << (55 - 32)) + +/* + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). + */ +#define L_PTE_MT_UNCACHED	(_AT(pteval_t, 0) << 2)	/* strongly ordered */ +#define L_PTE_MT_BUFFERABLE	(_AT(pteval_t, 1) << 2)	/* normal non-cacheable */ +#define L_PTE_MT_WRITETHROUGH	(_AT(pteval_t, 2) << 2)	/* normal inner write-through */ +#define L_PTE_MT_WRITEBACK	(_AT(pteval_t, 3) << 2)	/* normal inner write-back */ +#define L_PTE_MT_WRITEALLOC	(_AT(pteval_t, 7) << 2)	/* normal inner write-alloc */ +#define L_PTE_MT_DEV_SHARED	(_AT(pteval_t, 4) << 2)	/* device */ +#define L_PTE_MT_DEV_NONSHARED	(_AT(pteval_t, 4) << 2)	/* device */ +#define L_PTE_MT_DEV_WC		(_AT(pteval_t, 1) << 2)	/* normal non-cacheable */ +#define L_PTE_MT_DEV_CACHED	(_AT(pteval_t, 3) << 2)	/* normal inner write-back */ +#define L_PTE_MT_MASK		(_AT(pteval_t, 7) << 2) + +/* + * Software PGD flags. + */ +#define L_PGD_SWAPPER		(_AT(pgdval_t, 1) << 55)	/* swapper_pg_dir entry */ + +/* + * 2nd stage PTE definitions for LPAE. + */ +#define L_PTE_S2_MT_UNCACHED		(_AT(pteval_t, 0x0) << 2) /* strongly ordered */ +#define L_PTE_S2_MT_WRITETHROUGH	(_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ +#define L_PTE_S2_MT_WRITEBACK		(_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ +#define L_PTE_S2_MT_DEV_SHARED		(_AT(pteval_t, 0x1) << 2) /* device */ +#define L_PTE_S2_MT_MASK		(_AT(pteval_t, 0xf) << 2) + +#define L_PTE_S2_RDONLY			(_AT(pteval_t, 1) << 6)   /* HAP[1]   */ +#define L_PTE_S2_RDWR			(_AT(pteval_t, 3) << 6)   /* HAP[2:1] */ + +#define L_PMD_S2_RDWR			(_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */ + +/* + * Hyp-mode PL2 PTE definitions for LPAE. + */ +#define L_PTE_HYP		L_PTE_USER + +#ifndef __ASSEMBLY__ + +#define pud_none(pud)		(!pud_val(pud)) +#define pud_bad(pud)		(!(pud_val(pud) & 2)) +#define pud_present(pud)	(pud_val(pud)) +#define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \ +						 PMD_TYPE_TABLE) +#define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \ +						 PMD_TYPE_SECT) +#define pmd_large(pmd)		pmd_sect(pmd) + +#define pud_clear(pudp)			\ +	do {				\ +		*pudp = __pud(0);	\ +		clean_pmd_entry(pudp);	\ +	} while (0) + +#define set_pud(pudp, pud)		\ +	do {				\ +		*pudp = pud;		\ +		flush_pmd_entry(pudp);	\ +	} while (0) + +static inline pmd_t *pud_page_vaddr(pud_t pud) +{ +	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); +} + +/* Find an entry in the second-level page table.. */ +#define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) +{ +	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); +} + +#define pmd_bad(pmd)		(!(pmd_val(pmd) & 2)) + +#define copy_pmd(pmdpd,pmdps)		\ +	do {				\ +		*pmdpd = *pmdps;	\ +		flush_pmd_entry(pmdpd);	\ +	} while (0) + +#define pmd_clear(pmdp)			\ +	do {				\ +		*pmdp = __pmd(0);	\ +		clean_pmd_entry(pmdp);	\ +	} while (0) + +/* + * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes + * that are written to a page table but not for ptes created with mk_pte. + * + * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to + * hugetlb_cow, where it is compared with an entry in a page table. + * This comparison test fails erroneously leading ultimately to a memory leak. + * + * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is + * present before running the comparison. + */ +#define __HAVE_ARCH_PTE_SAME +#define pte_same(pte_a,pte_b)	((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG	\ +					: pte_val(pte_a))				\ +				== (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG	\ +					: pte_val(pte_b))) + +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) + +#define pte_huge(pte)		(pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT)) +#define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT)) + +#define pmd_young(pmd)		(pmd_val(pmd) & PMD_SECT_AF) + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write(pmd)		(!(pmd_val(pmd) & PMD_SECT_RDONLY)) + +#define pmd_hugewillfault(pmd)	(!pmd_young(pmd) || !pmd_write(pmd)) +#define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd)) + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) +#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) +#endif + +#define PMD_BIT_FUNC(fn,op) \ +static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } + +PMD_BIT_FUNC(wrprotect,	|= PMD_SECT_RDONLY); +PMD_BIT_FUNC(mkold,	&= ~PMD_SECT_AF); +PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); +PMD_BIT_FUNC(mkwrite,   &= ~PMD_SECT_RDONLY); +PMD_BIT_FUNC(mkdirty,   |= PMD_SECT_DIRTY); +PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF); + +#define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) + +#define pmd_pfn(pmd)		(((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) +#define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) +#define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot) + +/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ +#define pmd_mknotpresent(pmd)	(__pmd(0)) + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ +	const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY | +				PMD_SECT_VALID | PMD_SECT_NONE; +	pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); +	return pmd; +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, +			      pmd_t *pmdp, pmd_t pmd) +{ +	BUG_ON(addr >= TASK_SIZE); + +	/* create a faulting entry if PROT_NONE protected */ +	if (pmd_val(pmd) & PMD_SECT_NONE) +		pmd_val(pmd) &= ~PMD_SECT_VALID; + +	*pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); +	flush_pmd_entry(pmdp); +} + +static inline int has_transparent_hugepage(void) +{ +	return 1; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_PGTABLE_3LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h index fd1521d5cb9..8426229ba29 100644 --- a/arch/arm/include/asm/pgtable-hwdef.h +++ b/arch/arm/include/asm/pgtable-hwdef.h @@ -10,81 +10,10 @@  #ifndef _ASMARM_PGTABLE_HWDEF_H  #define _ASMARM_PGTABLE_HWDEF_H -/* - * Hardware page table definitions. - * - * + Level 1 descriptor (PMD) - *   - common - */ -#define PMD_TYPE_MASK		(3 << 0) -#define PMD_TYPE_FAULT		(0 << 0) -#define PMD_TYPE_TABLE		(1 << 0) -#define PMD_TYPE_SECT		(2 << 0) -#define PMD_BIT4		(1 << 4) -#define PMD_DOMAIN(x)		((x) << 5) -#define PMD_PROTECTION		(1 << 9)	/* v5 */ -/* - *   - section - */ -#define PMD_SECT_BUFFERABLE	(1 << 2) -#define PMD_SECT_CACHEABLE	(1 << 3) -#define PMD_SECT_XN		(1 << 4)	/* v6 */ -#define PMD_SECT_AP_WRITE	(1 << 10) -#define PMD_SECT_AP_READ	(1 << 11) -#define PMD_SECT_TEX(x)		((x) << 12)	/* v5 */ -#define PMD_SECT_APX		(1 << 15)	/* v6 */ -#define PMD_SECT_S		(1 << 16)	/* v6 */ -#define PMD_SECT_nG		(1 << 17)	/* v6 */ -#define PMD_SECT_SUPER		(1 << 18)	/* v6 */ - -#define PMD_SECT_UNCACHED	(0) -#define PMD_SECT_BUFFERED	(PMD_SECT_BUFFERABLE) -#define PMD_SECT_WT		(PMD_SECT_CACHEABLE) -#define PMD_SECT_WB		(PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) -#define PMD_SECT_MINICACHE	(PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) -#define PMD_SECT_WBWA		(PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) -#define PMD_SECT_NONSHARED_DEV	(PMD_SECT_TEX(2)) - -/* - *   - coarse table (not used) - */ - -/* - * + Level 2 descriptor (PTE) - *   - common - */ -#define PTE_TYPE_MASK		(3 << 0) -#define PTE_TYPE_FAULT		(0 << 0) -#define PTE_TYPE_LARGE		(1 << 0) -#define PTE_TYPE_SMALL		(2 << 0) -#define PTE_TYPE_EXT		(3 << 0)	/* v5 */ -#define PTE_BUFFERABLE		(1 << 2) -#define PTE_CACHEABLE		(1 << 3) - -/* - *   - extended small page/tiny page - */ -#define PTE_EXT_XN		(1 << 0)	/* v6 */ -#define PTE_EXT_AP_MASK		(3 << 4) -#define PTE_EXT_AP0		(1 << 4) -#define PTE_EXT_AP1		(2 << 4) -#define PTE_EXT_AP_UNO_SRO	(0 << 4) -#define PTE_EXT_AP_UNO_SRW	(PTE_EXT_AP0) -#define PTE_EXT_AP_URO_SRW	(PTE_EXT_AP1) -#define PTE_EXT_AP_URW_SRW	(PTE_EXT_AP1|PTE_EXT_AP0) -#define PTE_EXT_TEX(x)		((x) << 6)	/* v5 */ -#define PTE_EXT_APX		(1 << 9)	/* v6 */ -#define PTE_EXT_COHERENT	(1 << 9)	/* XScale3 */ -#define PTE_EXT_SHARED		(1 << 10)	/* v6 */ -#define PTE_EXT_NG		(1 << 11)	/* v6 */ - -/* - *   - small page - */ -#define PTE_SMALL_AP_MASK	(0xff << 4) -#define PTE_SMALL_AP_UNO_SRO	(0x00 << 4) -#define PTE_SMALL_AP_UNO_SRW	(0x55 << 4) -#define PTE_SMALL_AP_URO_SRW	(0xaa << 4) -#define PTE_SMALL_AP_URW_SRW	(0xff << 4) +#ifdef CONFIG_ARM_LPAE +#include <asm/pgtable-3level-hwdef.h> +#else +#include <asm/pgtable-2level-hwdef.h> +#endif  #endif diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index ffc0e85775b..0642228ff78 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -79,9 +79,6 @@ extern unsigned int kobjsize(const void *objp);   * No page table caches to initialise.   */  #define pgtable_cache_init()	do { } while (0) -#define io_remap_page_range	remap_page_range -#define io_remap_pfn_range	remap_pfn_range -  /*   * All 32bit addresses are effectively valid for vmalloc... diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index b155414192d..5478e5d6ad8 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -10,19 +10,29 @@  #ifndef _ASMARM_PGTABLE_H  #define _ASMARM_PGTABLE_H -#include <asm-generic/4level-fixup.h> +#include <linux/const.h>  #include <asm/proc-fns.h>  #ifndef CONFIG_MMU -#include "pgtable-nommu.h" +#include <asm-generic/4level-fixup.h> +#include <asm/pgtable-nommu.h>  #else +#include <asm-generic/pgtable-nopud.h>  #include <asm/memory.h> -#include <mach/vmalloc.h>  #include <asm/pgtable-hwdef.h> + +#include <asm/tlbflush.h> + +#ifdef CONFIG_ARM_LPAE +#include <asm/pgtable-3level.h> +#else +#include <asm/pgtable-2level.h> +#endif +  /*   * Just any arbitrary offset to the start of the vmalloc VM area: the   * current 8MB value just means that there will be a 8MB "hole" after the @@ -30,163 +40,37 @@   * any out-of-bounds memory accesses will hopefully be caught.   * The vmalloc() routines leaves a hole of 4kB between each vmalloced   * area for the same reason. ;) - * - * Note that platforms may override VMALLOC_START, but they must provide - * VMALLOC_END.  VMALLOC_END defines the (exclusive) limit of this space, - * which may not overlap IO space.   */ -#ifndef VMALLOC_START  #define VMALLOC_OFFSET		(8*1024*1024)  #define VMALLOC_START		(((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#endif - -/* - * Hardware-wise, we have a two level page table structure, where the first - * level has 4096 entries, and the second level has 256 entries.  Each entry - * is one 32-bit word.  Most of the bits in the second level entry are used - * by hardware, and there aren't any "accessed" and "dirty" bits. - * - * Linux on the other hand has a three level page table structure, which can - * be wrapped to fit a two level page table structure easily - using the PGD - * and PTE only.  However, Linux also expects one "PTE" table per page, and - * at least a "dirty" bit. - * - * Therefore, we tweak the implementation slightly - we tell Linux that we - * have 2048 entries in the first level, each of which is 8 bytes (iow, two - * hardware pointers to the second level.)  The second level contains two - * hardware PTE tables arranged contiguously, followed by Linux versions - * which contain the state information Linux needs.  We, therefore, end up - * with 512 entries in the "PTE" level. - * - * This leads to the page tables having the following layout: - * - *    pgd             pte - * |        | - * +--------+ +0 - * |        |-----> +------------+ +0 - * +- - - - + +4    |  h/w pt 0  | - * |        |-----> +------------+ +1024 - * +--------+ +8    |  h/w pt 1  | - * |        |       +------------+ +2048 - * +- - - - +       | Linux pt 0 | - * |        |       +------------+ +3072 - * +--------+       | Linux pt 1 | - * |        |       +------------+ +4096 - * - * See L_PTE_xxx below for definitions of bits in the "Linux pt", and - * PTE_xxx for definitions of bits appearing in the "h/w pt". - * - * PMD_xxx definitions refer to bits in the first level page table. - * - * The "dirty" bit is emulated by only granting hardware write permission - * iff the page is marked "writable" and "dirty" in the Linux PTE.  This - * means that a write to a clean page will cause a permission fault, and - * the Linux MM layer will mark the page dirty via handle_pte_fault(). - * For the hardware to notice the permission change, the TLB entry must - * be flushed, and ptep_set_access_flags() does that for us. - * - * The "accessed" or "young" bit is emulated by a similar method; we only - * allow accesses to the page if the "young" bit is set.  Accesses to the - * page will cause a fault, and handle_pte_fault() will set the young bit - * for us as long as the page is marked present in the corresponding Linux - * PTE entry.  Again, ptep_set_access_flags() will ensure that the TLB is - * up to date. - * - * However, when the "young" bit is cleared, we deny access to the page - * by clearing the hardware PTE.  Currently Linux does not flush the TLB - * for us in this case, which means the TLB will retain the transation - * until either the TLB entry is evicted under pressure, or a context - * switch which changes the user space mapping occurs. - */ -#define PTRS_PER_PTE		512 -#define PTRS_PER_PMD		1 -#define PTRS_PER_PGD		2048 - -/* - * PMD_SHIFT determines the size of the area a second-level page table can map - * PGDIR_SHIFT determines what a third-level page table entry can map - */ -#define PMD_SHIFT		21 -#define PGDIR_SHIFT		21 +#define VMALLOC_END		0xff000000UL  #define LIBRARY_TEXT_START	0x0c000000  #ifndef __ASSEMBLY__ -extern void __pte_error(const char *file, int line, unsigned long val); -extern void __pmd_error(const char *file, int line, unsigned long val); -extern void __pgd_error(const char *file, int line, unsigned long val); - -#define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte)) -#define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd)) -#define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd)) -#endif /* !__ASSEMBLY__ */ +extern void __pte_error(const char *file, int line, pte_t); +extern void __pmd_error(const char *file, int line, pmd_t); +extern void __pgd_error(const char *file, int line, pgd_t); -#define PMD_SIZE		(1UL << PMD_SHIFT) -#define PMD_MASK		(~(PMD_SIZE-1)) -#define PGDIR_SIZE		(1UL << PGDIR_SHIFT) -#define PGDIR_MASK		(~(PGDIR_SIZE-1)) +#define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte) +#define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd) +#define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd)  /*   * This is the lowest virtual address we can permit any user space   * mapping to be mapped at.  This is particularly important for   * non-high vector CPUs.   */ -#define FIRST_USER_ADDRESS	PAGE_SIZE - -#define FIRST_USER_PGD_NR	1 -#define USER_PTRS_PER_PGD	((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) - -/* - * section address mask and size definitions. - */ -#define SECTION_SHIFT		20 -#define SECTION_SIZE		(1UL << SECTION_SHIFT) -#define SECTION_MASK		(~(SECTION_SIZE-1)) +#define FIRST_USER_ADDRESS	(PAGE_SIZE * 2)  /* - * ARMv6 supersection address mask and size definitions. + * Use TASK_SIZE as the ceiling argument for free_pgtables() and + * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd + * page shared between user and kernel).   */ -#define SUPERSECTION_SHIFT	24 -#define SUPERSECTION_SIZE	(1UL << SUPERSECTION_SHIFT) -#define SUPERSECTION_MASK	(~(SUPERSECTION_SIZE-1)) - -/* - * "Linux" PTE definitions. - * - * We keep two sets of PTEs - the hardware and the linux version. - * This allows greater flexibility in the way we map the Linux bits - * onto the hardware tables, and allows us to have YOUNG and DIRTY - * bits. - * - * The PTE table pointer refers to the hardware entries; the "Linux" - * entries are stored 1024 bytes below. - */ -#define L_PTE_PRESENT		(1 << 0) -#define L_PTE_YOUNG		(1 << 1) -#define L_PTE_FILE		(1 << 2)	/* only when !PRESENT */ -#define L_PTE_DIRTY		(1 << 6) -#define L_PTE_WRITE		(1 << 7) -#define L_PTE_USER		(1 << 8) -#define L_PTE_EXEC		(1 << 9) -#define L_PTE_SHARED		(1 << 10)	/* shared(v6), coherent(xsc3) */ - -/* - * These are the memory types, defined to be compatible with - * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB - */ -#define L_PTE_MT_UNCACHED	(0x00 << 2)	/* 0000 */ -#define L_PTE_MT_BUFFERABLE	(0x01 << 2)	/* 0001 */ -#define L_PTE_MT_WRITETHROUGH	(0x02 << 2)	/* 0010 */ -#define L_PTE_MT_WRITEBACK	(0x03 << 2)	/* 0011 */ -#define L_PTE_MT_MINICACHE	(0x06 << 2)	/* 0110 (sa1100, xscale) */ -#define L_PTE_MT_WRITEALLOC	(0x07 << 2)	/* 0111 */ -#define L_PTE_MT_DEV_SHARED	(0x04 << 2)	/* 0100 */ -#define L_PTE_MT_DEV_NONSHARED	(0x0c << 2)	/* 1100 */ -#define L_PTE_MT_DEV_WC		(0x09 << 2)	/* 1001 */ -#define L_PTE_MT_DEV_CACHED	(0x0b << 2)	/* 1011 */ -#define L_PTE_MT_MASK		(0x0f << 2) - -#ifndef __ASSEMBLY__ +#ifdef CONFIG_ARM_LPAE +#define USER_PGTABLES_CEILING	TASK_SIZE +#endif  /*   * The pgprot_* and protection_map entries will be fixed up in runtime @@ -198,26 +82,57 @@ extern void __pgd_error(const char *file, int line, unsigned long val);  extern pgprot_t		pgprot_user;  extern pgprot_t		pgprot_kernel; +extern pgprot_t		pgprot_hyp_device; +extern pgprot_t		pgprot_s2; +extern pgprot_t		pgprot_s2_device;  #define _MOD_PROT(p, b)	__pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE		pgprot_user -#define PAGE_SHARED		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) -#define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) -#define PAGE_COPY		_MOD_PROT(pgprot_user, L_PTE_USER) -#define PAGE_COPY_EXEC		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) -#define PAGE_READONLY		_MOD_PROT(pgprot_user, L_PTE_USER) -#define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) -#define PAGE_KERNEL		pgprot_kernel -#define PAGE_KERNEL_EXEC	_MOD_PROT(pgprot_kernel, L_PTE_EXEC) - -#define __PAGE_NONE		__pgprot(_L_PTE_DEFAULT) -#define __PAGE_SHARED		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) -#define __PAGE_SHARED_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) -#define __PAGE_COPY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER) -#define __PAGE_COPY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) -#define __PAGE_READONLY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER) -#define __PAGE_READONLY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) +#define PAGE_NONE		_MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE) +#define PAGE_SHARED		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) +#define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER) +#define PAGE_COPY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define PAGE_COPY_EXEC		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) +#define PAGE_READONLY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) +#define PAGE_KERNEL		_MOD_PROT(pgprot_kernel, L_PTE_XN) +#define PAGE_KERNEL_EXEC	pgprot_kernel +#define PAGE_HYP		_MOD_PROT(pgprot_kernel, L_PTE_HYP) +#define PAGE_HYP_DEVICE		_MOD_PROT(pgprot_hyp_device, L_PTE_HYP) +#define PAGE_S2			_MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) +#define PAGE_S2_DEVICE		_MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR) + +#define __PAGE_NONE		__pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) +#define __PAGE_SHARED		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) +#define __PAGE_SHARED_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER) +#define __PAGE_COPY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_COPY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) +#define __PAGE_READONLY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) +#define __PAGE_READONLY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) + +#define __pgprot_modify(prot,mask,bits)		\ +	__pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + +#define pgprot_noncached(prot) \ +	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) + +#define pgprot_writecombine(prot) \ +	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) + +#define pgprot_stronglyordered(prot) \ +	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) + +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#define pgprot_dmacoherent(prot) \ +	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, +				     unsigned long size, pgprot_t vma_prot); +#else +#define pgprot_dmacoherent(prot) \ +	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) +#endif  #endif /* __ASSEMBLY__ */ @@ -255,26 +170,62 @@ extern pgprot_t		pgprot_kernel;  extern struct page *empty_zero_page;  #define ZERO_PAGE(vaddr)	(empty_zero_page) -#define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT) -#define pfn_pte(pfn,prot)	(__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) -#define pte_none(pte)		(!pte_val(pte)) -#define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0) -#define pte_page(pte)		(pfn_to_page(pte_pfn(pte))) -#define pte_offset_kernel(dir,addr)	(pmd_page_vaddr(*(dir)) + __pte_index(addr)) +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; -#define pte_offset_map(dir,addr)	(__pte_map(dir) + __pte_index(addr)) -#define pte_unmap(pte)			__pte_unmap(pte) +/* to find an entry in a page-table-directory */ +#define pgd_index(addr)		((addr) >> PGDIR_SHIFT) + +#define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(addr)	pgd_offset(&init_mm, addr) + +#define pmd_none(pmd)		(!pmd_val(pmd)) +#define pmd_present(pmd)	(pmd_val(pmd)) + +static inline pte_t *pmd_page_vaddr(pmd_t pmd) +{ +	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); +} + +#define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))  #ifndef CONFIG_HIGHPTE -#define __pte_map(dir)		pmd_page_vaddr(*(dir)) +#define __pte_map(pmd)		pmd_page_vaddr(*(pmd))  #define __pte_unmap(pte)	do { } while (0)  #else -#define __pte_map(dir)		((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE) -#define __pte_unmap(pte)	kunmap_atomic((pte - PTRS_PER_PTE)) +#define __pte_map(pmd)		(pte_t *)kmap_atomic(pmd_page(*(pmd))) +#define __pte_unmap(pte)	kunmap_atomic(pte)  #endif -#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +#define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + +#define pte_offset_kernel(pmd,addr)	(pmd_page_vaddr(*(pmd)) + pte_index(addr)) + +#define pte_offset_map(pmd,addr)	(__pte_map(pmd) + pte_index(addr)) +#define pte_unmap(pte)			__pte_unmap(pte) + +#define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) +#define pfn_pte(pfn,prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot)) + +#define pte_page(pte)		pfn_to_page(pte_pfn(pte)) +#define mk_pte(page,prot)	pfn_pte(page_to_pfn(page), prot) + +#define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0) + +#define pte_none(pte)		(!pte_val(pte)) +#define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT) +#define pte_valid(pte)		(pte_val(pte) & L_PTE_VALID) +#define pte_accessible(mm, pte)	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) +#define pte_write(pte)		(!(pte_val(pte) & L_PTE_RDONLY)) +#define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG) +#define pte_exec(pte)		(!(pte_val(pte) & L_PTE_XN)) +#define pte_special(pte)	(0) + +#define pte_valid_user(pte)	\ +	(pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte))  #if __LINUX_ARM_ARCH__ < 6  static inline void __sync_icache_dcache(pte_t pteval) @@ -287,146 +238,51 @@ extern void __sync_icache_dcache(pte_t pteval);  static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,  			      pte_t *ptep, pte_t pteval)  { -	if (addr >= TASK_SIZE) -		set_pte_ext(ptep, pteval, 0); -	else { +	unsigned long ext = 0; + +	if (addr < TASK_SIZE && pte_valid_user(pteval)) {  		__sync_icache_dcache(pteval); -		set_pte_ext(ptep, pteval, PTE_EXT_NG); +		ext |= PTE_EXT_NG;  	} -} -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -#define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte)		(pte_val(pte) & L_PTE_WRITE) -#define pte_dirty(pte)		(pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte)		(pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte)		(pte_val(pte) & L_PTE_EXEC) -#define pte_special(pte)	(0) - -#define pte_present_user(pte) \ -	((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ -	 (L_PTE_PRESENT | L_PTE_USER)) +	set_pte_ext(ptep, pteval, ext); +}  #define PTE_BIT_FUNC(fn,op) \  static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } -PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); -PTE_BIT_FUNC(mkwrite,   |= L_PTE_WRITE); +PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); +PTE_BIT_FUNC(mkwrite,   &= ~L_PTE_RDONLY);  PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);  PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);  PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);  PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG); +PTE_BIT_FUNC(mkexec,   &= ~L_PTE_XN); +PTE_BIT_FUNC(mknexec,   |= L_PTE_XN);  static inline pte_t pte_mkspecial(pte_t pte) { return pte; } -#define __pgprot_modify(prot,mask,bits)		\ -	__pgprot((pgprot_val(prot) & ~(mask)) | (bits)) - -/* - * Mark the prot value as uncacheable and unbufferable. - */ -#define pgprot_noncached(prot) \ -	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) -#define pgprot_writecombine(prot) \ -	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) -#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE -#define pgprot_dmacoherent(prot) \ -	__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) -#define __HAVE_PHYS_MEM_ACCESS_PROT -struct file; -extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, -				     unsigned long size, pgprot_t vma_prot); -#else -#define pgprot_dmacoherent(prot) \ -	__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) -#endif - -#define pmd_none(pmd)		(!pmd_val(pmd)) -#define pmd_present(pmd)	(pmd_val(pmd)) -#define pmd_bad(pmd)		(pmd_val(pmd) & 2) - -#define copy_pmd(pmdpd,pmdps)		\ -	do {				\ -		pmdpd[0] = pmdps[0];	\ -		pmdpd[1] = pmdps[1];	\ -		flush_pmd_entry(pmdpd);	\ -	} while (0) - -#define pmd_clear(pmdp)			\ -	do {				\ -		pmdp[0] = __pmd(0);	\ -		pmdp[1] = __pmd(0);	\ -		clean_pmd_entry(pmdp);	\ -	} while (0) - -static inline pte_t *pmd_page_vaddr(pmd_t pmd) -{ -	unsigned long ptr; - -	ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); -	ptr += PTRS_PER_PTE * sizeof(void *); - -	return __va(ptr); -} - -#define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd))) - -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot) - -/* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pgd is never bad, and a pmd always exists (as it's folded - * into the pgd entry) - */ -#define pgd_none(pgd)		(0) -#define pgd_bad(pgd)		(0) -#define pgd_present(pgd)	(1) -#define pgd_clear(pgdp)		do { } while (0) -#define set_pgd(pgd,pgdp)	do { } while (0) - -/* to find an entry in a page-table-directory */ -#define pgd_index(addr)		((addr) >> PGDIR_SHIFT) - -#define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr)) - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(addr)	pgd_offset(&init_mm, addr) - -/* Find an entry in the second-level page table.. */ -#define pmd_offset(dir, addr)	((pmd_t *)(dir)) - -/* Find an entry in the third-level page table.. */ -#define __pte_index(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -  static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)  { -	const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; +	const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | +		L_PTE_NONE | L_PTE_VALID;  	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);  	return pte;  } -extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; -  /*   * Encode and decode a swap entry.  Swap entries are stored in the Linux   * page tables as follows:   *   *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1   *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - *   <--------------- offset --------------------> <- type --> 0 0 0 + *   <--------------- offset ----------------------> < type -> 0 0 0   * - * This gives us up to 63 swap files and 32GB per swap file.  Note that + * This gives us up to 31 swap files and 64GB per swap file.  Note that   * the offset field is always non-zero.   */  #define __SWP_TYPE_SHIFT	3 -#define __SWP_TYPE_BITS		6 +#define __SWP_TYPE_BITS		5  #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)  #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) @@ -468,13 +324,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];   * We provide our own arch_get_unmapped_area to cope with VIPT caches.   */  #define HAVE_ARCH_UNMAPPED_AREA - -/* - * remap a physical page `pfn' of size `size' with page protection `prot' - * into virtual address `from' - */ -#define io_remap_pfn_range(vma,from,pfn,size,prot) \ -		remap_pfn_range(vma, from, pfn, size, prot) +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN  #define pgtable_cache_init() do { } while (0) diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 8ccea012722..ae1919be8f9 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -12,66 +12,102 @@  #ifndef __ARM_PMU_H__  #define __ARM_PMU_H__ -enum arm_pmu_type { -	ARM_PMU_DEVICE_CPU	= 0, -	ARM_NUM_PMU_DEVICES, -}; - -#ifdef CONFIG_CPU_HAS_PMU +#include <linux/interrupt.h> +#include <linux/perf_event.h> -/** - * reserve_pmu() - reserve the hardware performance counters +/* + * struct arm_pmu_platdata - ARM PMU platform data   * - * Reserve the hardware performance counters in the system for exclusive use. - * The platform_device for the system is returned on success, ERR_PTR() - * encoded error on failure. + * @handle_irq: an optional handler which will be called from the + *	interrupt and passed the address of the low level handler, + *	and can be used to implement any platform specific handling + *	before or after calling it. + * @runtime_resume: an optional handler which will be called by the + *	runtime PM framework following a call to pm_runtime_get(). + *	Note that if pm_runtime_get() is called more than once in + *	succession this handler will only be called once. + * @runtime_suspend: an optional handler which will be called by the + *	runtime PM framework following a call to pm_runtime_put(). + *	Note that if pm_runtime_get() is called more than once in + *	succession this handler will only be called following the + *	final call to pm_runtime_put() that actually disables the + *	hardware.   */ -extern struct platform_device * -reserve_pmu(enum arm_pmu_type device); +struct arm_pmu_platdata { +	irqreturn_t (*handle_irq)(int irq, void *dev, +				  irq_handler_t pmu_handler); +	int (*runtime_resume)(struct device *dev); +	int (*runtime_suspend)(struct device *dev); +}; -/** - * release_pmu() - Relinquish control of the performance counters - * - * Release the performance counters and allow someone else to use them. - * Callers must have disabled the counters and released IRQs before calling - * this. The platform_device returned from reserve_pmu() must be passed as - * a cookie. - */ -extern int -release_pmu(struct platform_device *pdev); +#ifdef CONFIG_HW_PERF_EVENTS -/** - * init_pmu() - Initialise the PMU. - * - * Initialise the system ready for PMU enabling. This should typically set the - * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do - * the actual hardware initialisation. - */ -extern int -init_pmu(enum arm_pmu_type device); +/* The events for a given PMU register set. */ +struct pmu_hw_events { +	/* +	 * The events that are active on the PMU for the given index. +	 */ +	struct perf_event	**events; + +	/* +	 * A 1 bit for an index indicates that the counter is being used for +	 * an event. A 0 means that the counter can be used. +	 */ +	unsigned long           *used_mask; + +	/* +	 * Hardware lock to serialize accesses to PMU registers. Needed for the +	 * read/modify/write sequences. +	 */ +	raw_spinlock_t		pmu_lock; +}; + +struct arm_pmu { +	struct pmu	pmu; +	cpumask_t	active_irqs; +	char		*name; +	irqreturn_t	(*handle_irq)(int irq_num, void *dev); +	void		(*enable)(struct perf_event *event); +	void		(*disable)(struct perf_event *event); +	int		(*get_event_idx)(struct pmu_hw_events *hw_events, +					 struct perf_event *event); +	void		(*clear_event_idx)(struct pmu_hw_events *hw_events, +					 struct perf_event *event); +	int		(*set_event_filter)(struct hw_perf_event *evt, +					    struct perf_event_attr *attr); +	u32		(*read_counter)(struct perf_event *event); +	void		(*write_counter)(struct perf_event *event, u32 val); +	void		(*start)(struct arm_pmu *); +	void		(*stop)(struct arm_pmu *); +	void		(*reset)(void *); +	int		(*request_irq)(struct arm_pmu *, irq_handler_t handler); +	void		(*free_irq)(struct arm_pmu *); +	int		(*map_event)(struct perf_event *event); +	int		num_events; +	atomic_t	active_events; +	struct mutex	reserve_mutex; +	u64		max_period; +	struct platform_device	*plat_device; +	struct pmu_hw_events	*(*get_hw_events)(void); +}; + +#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) -#else /* CONFIG_CPU_HAS_PMU */ +extern const struct dev_pm_ops armpmu_dev_pm_ops; -#include <linux/err.h> +int armpmu_register(struct arm_pmu *armpmu, int type); -static inline struct platform_device * -reserve_pmu(enum arm_pmu_type device) -{ -	return ERR_PTR(-ENODEV); -} +u64 armpmu_event_update(struct perf_event *event); -static inline int -release_pmu(struct platform_device *pdev) -{ -	return -ENODEV; -} +int armpmu_event_set_period(struct perf_event *event); -static inline int -init_pmu(enum arm_pmu_type device) -{ -	return -ENODEV; -} +int armpmu_map_event(struct perf_event *event, +		     const unsigned (*event_map)[PERF_COUNT_HW_MAX], +		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] +						[PERF_COUNT_HW_CACHE_OP_MAX] +						[PERF_COUNT_HW_CACHE_RESULT_MAX], +		     u32 raw_event_mask); -#endif /* CONFIG_CPU_HAS_PMU */ +#endif /* CONFIG_HW_PERF_EVENTS */  #endif /* __ARM_PMU_H__ */ diff --git a/arch/arm/include/asm/poll.h b/arch/arm/include/asm/poll.h deleted file mode 100644 index c98509d3149..00000000000 --- a/arch/arm/include/asm/poll.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/poll.h> diff --git a/arch/arm/include/asm/posix_types.h b/arch/arm/include/asm/posix_types.h deleted file mode 100644 index 2446d23bfdb..00000000000 --- a/arch/arm/include/asm/posix_types.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - *  arch/arm/include/asm/posix_types.h - * - *  Copyright (C) 1996-1998 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - *  Changelog: - *   27-06-1996	RMK	Created - */ -#ifndef __ARCH_ARM_POSIX_TYPES_H -#define __ARCH_ARM_POSIX_TYPES_H - -/* - * This file is generally used by user-level software, so you need to - * be a little careful about namespace pollution etc.  Also, we cannot - * assume GCC is being used. - */ - -typedef unsigned long		__kernel_ino_t; -typedef unsigned short		__kernel_mode_t; -typedef unsigned short		__kernel_nlink_t; -typedef long			__kernel_off_t; -typedef int			__kernel_pid_t; -typedef unsigned short		__kernel_ipc_pid_t; -typedef unsigned short		__kernel_uid_t; -typedef unsigned short		__kernel_gid_t; -typedef unsigned int		__kernel_size_t; -typedef int			__kernel_ssize_t; -typedef int			__kernel_ptrdiff_t; -typedef long			__kernel_time_t; -typedef long			__kernel_suseconds_t; -typedef long			__kernel_clock_t; -typedef int			__kernel_timer_t; -typedef int			__kernel_clockid_t; -typedef int			__kernel_daddr_t; -typedef char *			__kernel_caddr_t; -typedef unsigned short		__kernel_uid16_t; -typedef unsigned short		__kernel_gid16_t; -typedef unsigned int		__kernel_uid32_t; -typedef unsigned int		__kernel_gid32_t; - -typedef unsigned short		__kernel_old_uid_t; -typedef unsigned short		__kernel_old_gid_t; -typedef unsigned short		__kernel_old_dev_t; - -#ifdef __GNUC__ -typedef long long		__kernel_loff_t; -#endif - -typedef struct { -	int	val[2]; -} __kernel_fsid_t; - -#if defined(__KERNEL__) - -#undef	__FD_SET -#define __FD_SET(fd, fdsetp) \ -		(((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] |= (1<<((fd) & 31))) - -#undef	__FD_CLR -#define __FD_CLR(fd, fdsetp) \ -		(((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] &= ~(1<<((fd) & 31))) - -#undef	__FD_ISSET -#define __FD_ISSET(fd, fdsetp) \ -		((((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] & (1<<((fd) & 31))) != 0) - -#undef	__FD_ZERO -#define __FD_ZERO(fdsetp) \ -		(memset (fdsetp, 0, sizeof (*(fd_set *)(fdsetp)))) - -#endif - -#endif diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h new file mode 100644 index 00000000000..806cfe622a9 --- /dev/null +++ b/arch/arm/include/asm/probes.h @@ -0,0 +1,43 @@ +/* + * arch/arm/include/asm/probes.h + * + * Original contents copied from arch/arm/include/asm/kprobes.h + * which contains the following notice... + * + * Copyright (C) 2006, 2007 Motorola Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + */ + +#ifndef _ASM_PROBES_H +#define _ASM_PROBES_H + +typedef u32 probes_opcode_t; + +struct arch_probes_insn; +typedef void (probes_insn_handler_t)(probes_opcode_t, +				     struct arch_probes_insn *, +				     struct pt_regs *); +typedef unsigned long (probes_check_cc)(unsigned long); +typedef void (probes_insn_singlestep_t)(probes_opcode_t, +					struct arch_probes_insn *, +					struct pt_regs *); +typedef void (probes_insn_fn_t)(void); + +/* Architecture specific copy of original instruction. */ +struct arch_probes_insn { +	probes_opcode_t			*insn; +	probes_insn_handler_t		*insn_handler; +	probes_check_cc			*insn_check_cc; +	probes_insn_singlestep_t	*insn_singlestep; +	probes_insn_fn_t		*insn_fn; +}; + +#endif diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8fdae9bc9ab..5324c1112f3 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -13,256 +13,132 @@  #ifdef __KERNEL__ +#include <asm/glue-proc.h> +#include <asm/page.h> -/* - * Work out if we need multiple CPU support - */ -#undef MULTI_CPU -#undef CPU_NAME +#ifndef __ASSEMBLY__ + +struct mm_struct;  /* - * CPU_NAME - the prefix for CPU related functions + * Don't change this structure - ASM code relies on it.   */ - -#ifdef CONFIG_CPU_ARM610 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm6 -# endif -#endif - -#ifdef CONFIG_CPU_ARM7TDMI -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm7tdmi -# endif -#endif - -#ifdef CONFIG_CPU_ARM710 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm7 -# endif -#endif - -#ifdef CONFIG_CPU_ARM720T -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm720 -# endif -#endif - -#ifdef CONFIG_CPU_ARM740T -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm740 -# endif -#endif - -#ifdef CONFIG_CPU_ARM9TDMI -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm9tdmi -# endif -#endif - -#ifdef CONFIG_CPU_ARM920T -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm920 -# endif -#endif - -#ifdef CONFIG_CPU_ARM922T -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm922 -# endif -#endif - -#ifdef CONFIG_CPU_FA526 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_fa526 -# endif -#endif - -#ifdef CONFIG_CPU_ARM925T -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm925 -# endif -#endif - -#ifdef CONFIG_CPU_ARM926T -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm926 -# endif -#endif - -#ifdef CONFIG_CPU_ARM940T -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm940 -# endif -#endif - -#ifdef CONFIG_CPU_ARM946E -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm946 -# endif -#endif - -#ifdef CONFIG_CPU_SA110 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_sa110 -# endif -#endif - -#ifdef CONFIG_CPU_SA1100 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_sa1100 -# endif +extern struct processor { +	/* MISC +	 * get data abort address/flags +	 */ +	void (*_data_abort)(unsigned long pc); +	/* +	 * Retrieve prefetch fault address +	 */ +	unsigned long (*_prefetch_abort)(unsigned long lr); +	/* +	 * Set up any processor specifics +	 */ +	void (*_proc_init)(void); +	/* +	 * Disable any processor specifics +	 */ +	void (*_proc_fin)(void); +	/* +	 * Special stuff for a reset +	 */ +	void (*reset)(unsigned long addr) __attribute__((noreturn)); +	/* +	 * Idle the processor +	 */ +	int (*_do_idle)(void); +	/* +	 * Processor architecture specific +	 */ +	/* +	 * clean a virtual address range from the +	 * D-cache without flushing the cache. +	 */ +	void (*dcache_clean_area)(void *addr, int size); + +	/* +	 * Set the page table +	 */ +	void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm); +	/* +	 * Set a possibly extended PTE.  Non-extended PTEs should +	 * ignore 'ext'. +	 */ +#ifdef CONFIG_ARM_LPAE +	void (*set_pte_ext)(pte_t *ptep, pte_t pte); +#else +	void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);  #endif -#ifdef CONFIG_CPU_ARM1020 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm1020 -# endif -#endif +	/* Suspend/resume */ +	unsigned int suspend_size; +	void (*do_suspend)(void *); +	void (*do_resume)(void *); +} processor; -#ifdef CONFIG_CPU_ARM1020E -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm1020e -# endif +#ifndef MULTI_CPU +extern void cpu_proc_init(void); +extern void cpu_proc_fin(void); +extern int cpu_do_idle(void); +extern void cpu_dcache_clean_area(void *, int); +extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); +#ifdef CONFIG_ARM_LPAE +extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); +#else +extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);  #endif +extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); -#ifdef CONFIG_CPU_ARM1022 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm1022 -# endif -#endif +/* These three are private to arch/arm/kernel/suspend.c */ +extern void cpu_do_suspend(void *); +extern void cpu_do_resume(void *); +#else +#define cpu_proc_init			processor._proc_init +#define cpu_proc_fin			processor._proc_fin +#define cpu_reset			processor.reset +#define cpu_do_idle			processor._do_idle +#define cpu_dcache_clean_area		processor.dcache_clean_area +#define cpu_set_pte_ext			processor.set_pte_ext +#define cpu_do_switch_mm		processor.switch_mm -#ifdef CONFIG_CPU_ARM1026 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_arm1026 -# endif +/* These three are private to arch/arm/kernel/suspend.c */ +#define cpu_do_suspend			processor.do_suspend +#define cpu_do_resume			processor.do_resume  #endif -#ifdef CONFIG_CPU_XSCALE -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_xscale -# endif -#endif +extern void cpu_resume(void); -#ifdef CONFIG_CPU_XSC3 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_xsc3 -# endif -#endif +#include <asm/memory.h> -#ifdef CONFIG_CPU_MOHAWK -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_mohawk -# endif -#endif +#ifdef CONFIG_MMU -#ifdef CONFIG_CPU_FEROCEON -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_feroceon -# endif -#endif +#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) -#ifdef CONFIG_CPU_V6 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_v6 -# endif -#endif +#ifdef CONFIG_ARM_LPAE -#ifdef CONFIG_CPU_V7 -# ifdef CPU_NAME -#  undef  MULTI_CPU -#  define MULTI_CPU -# else -#  define CPU_NAME cpu_v7 -# endif -#endif +#define cpu_get_ttbr(nr)					\ +	({							\ +		u64 ttbr;					\ +		__asm__("mrrc	p15, " #nr ", %Q0, %R0, c2"	\ +			: "=r" (ttbr));				\ +		ttbr;						\ +	}) -#ifndef __ASSEMBLY__ +#define cpu_set_ttbr(nr, val)					\ +	do {							\ +		u64 ttbr = val;					\ +		__asm__("mcrr	p15, " #nr ", %Q0, %R0, c2"	\ +			: : "r" (ttbr));			\ +	} while (0) -#ifndef MULTI_CPU -#include <asm/cpu-single.h> +#define cpu_get_pgd()	\ +	({						\ +		u64 pg = cpu_get_ttbr(0);		\ +		pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1);	\ +		(pgd_t *)phys_to_virt(pg);		\ +	})  #else -#include <asm/cpu-multi32.h> -#endif - -#include <asm/memory.h> - -#ifdef CONFIG_MMU - -#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) -  #define cpu_get_pgd()	\  	({						\  		unsigned long pg;			\ @@ -271,6 +147,11 @@  		pg &= ~0x3fff;				\  		(pgd_t *)phys_to_virt(pg);		\  	}) +#endif + +#else	/*!CONFIG_MMU */ + +#define cpu_switch_mm(pgd,mm)	{ }  #endif diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 67357baaeee..c3d5fc124a0 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -22,6 +22,7 @@  #include <asm/hw_breakpoint.h>  #include <asm/ptrace.h>  #include <asm/types.h> +#include <asm/unified.h>  #ifdef __KERNEL__  #define STACK_TOP	((current->personality & ADDR_LIMIT_32BIT) ? \ @@ -29,19 +30,7 @@  #define STACK_TOP_MAX	TASK_SIZE  #endif -union debug_insn { -	u32	arm; -	u16	thumb; -}; - -struct debug_entry { -	u32			address; -	union debug_insn	insn; -}; -  struct debug_info { -	int			nsaved; -	struct debug_entry	bp[2];  #ifdef CONFIG_HAVE_HW_BREAKPOINT  	struct perf_event	*hbp[ARM_MAX_HBP_SLOTS];  #endif @@ -66,8 +55,6 @@ struct thread_struct {  #define start_thread(regs,pc,sp)					\  ({									\ -	unsigned long *stack = (unsigned long *)sp;			\ -	set_fs(USER_DS);						\  	memset(regs->uregs, 0, sizeof(regs->uregs));			\  	if (current->personality & ADDR_LIMIT_32BIT)			\  		regs->ARM_cpsr = USR_MODE;				\ @@ -78,9 +65,6 @@ struct thread_struct {  	regs->ARM_cpsr |= PSR_ENDSTATE;					\  	regs->ARM_pc = pc & ~1;		/* pc */			\  	regs->ARM_sp = sp;		/* sp */			\ -	regs->ARM_r2 = stack[2];	/* r2 (envp) */			\ -	regs->ARM_r1 = stack[1];	/* r1 (argv) */			\ -	regs->ARM_r0 = stack[0];	/* r0 (argc) */			\  	nommu_start_thread(regs);					\  }) @@ -90,28 +74,31 @@ struct task_struct;  /* Free all resources held by a thread. */  extern void release_thread(struct task_struct *); -/* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk)	do { } while (0) -  unsigned long get_wchan(struct task_struct *p); -#if __LINUX_ARM_ARCH__ == 6 +#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)  #define cpu_relax()			smp_mb()  #else  #define cpu_relax()			barrier()  #endif -/* - * Create a new kernel thread - */ -extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); -  #define task_pt_regs(p) \  	((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)  #define KSTK_EIP(tsk)	task_pt_regs(tsk)->ARM_pc  #define KSTK_ESP(tsk)	task_pt_regs(tsk)->ARM_sp +#ifdef CONFIG_SMP +#define __ALT_SMP_ASM(smp, up)						\ +	"9998:	" smp "\n"						\ +	"	.pushsection \".alt.smp.init\", \"a\"\n"		\ +	"	.long	9998b\n"					\ +	"	" up "\n"						\ +	"	.popsection\n" +#else +#define __ALT_SMP_ASM(smp, up)	up +#endif +  /*   * Prefetching support - only ARMv5.   */ @@ -122,18 +109,25 @@ static inline void prefetch(const void *ptr)  {  	__asm__ __volatile__(  		"pld\t%a0" -		: -		: "p" (ptr) -		: "cc"); +		:: "p" (ptr));  } +#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)  #define ARCH_HAS_PREFETCHW -#define prefetchw(ptr)	prefetch(ptr) - -#define ARCH_HAS_SPINLOCK_PREFETCH -#define spin_lock_prefetch(x) do { } while (0) - +static inline void prefetchw(const void *ptr) +{ +	__asm__ __volatile__( +		".arch_extension	mp\n" +		__ALT_SMP_ASM( +			WASM(pldw)		"\t%a0", +			WASM(pld)		"\t%a0" +		) +		:: "p" (ptr)); +}  #endif +#endif + +#define HAVE_ARCH_PICK_MMAP_LAYOUT  #endif diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h new file mode 100644 index 00000000000..cd94ef2ef28 --- /dev/null +++ b/arch/arm/include/asm/prom.h @@ -0,0 +1,29 @@ +/* + *  arch/arm/include/asm/prom.h + * + *  Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#ifndef __ASMARM_PROM_H +#define __ASMARM_PROM_H + +#ifdef CONFIG_OF + +extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys); +extern void __init arm_dt_init_cpu_maps(void); + +#else /* CONFIG_OF */ + +static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys) +{ +	return NULL; +} + +static inline void arm_dt_init_cpu_maps(void) { } + +#endif /* CONFIG_OF */ +#endif /* ASMARM_PROM_H */ diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h new file mode 100644 index 00000000000..c25ef3ec6d1 --- /dev/null +++ b/arch/arm/include/asm/psci.h @@ -0,0 +1,48 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * Copyright (C) 2012 ARM Limited + */ + +#ifndef __ASM_ARM_PSCI_H +#define __ASM_ARM_PSCI_H + +#define PSCI_POWER_STATE_TYPE_STANDBY		0 +#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1 + +struct psci_power_state { +	u16	id; +	u8	type; +	u8	affinity_level; +}; + +struct psci_operations { +	int (*cpu_suspend)(struct psci_power_state state, +			   unsigned long entry_point); +	int (*cpu_off)(struct psci_power_state state); +	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); +	int (*migrate)(unsigned long cpuid); +	int (*affinity_info)(unsigned long target_affinity, +			unsigned long lowest_affinity_level); +	int (*migrate_info_type)(void); +}; + +extern struct psci_operations psci_ops; +extern struct smp_operations psci_smp_ops; + +#ifdef CONFIG_ARM_PSCI +int psci_init(void); +bool psci_smp_available(void); +#else +static inline int psci_init(void) { return 0; } +static inline bool psci_smp_available(void) { return false; } +#endif + +#endif /* __ASM_ARM_PSCI_H */ diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 783d50f3261..c877654fe3b 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -10,127 +10,12 @@  #ifndef __ASM_ARM_PTRACE_H  #define __ASM_ARM_PTRACE_H -#include <asm/hwcap.h> - -#define PTRACE_GETREGS		12 -#define PTRACE_SETREGS		13 -#define PTRACE_GETFPREGS	14 -#define PTRACE_SETFPREGS	15 -/* PTRACE_ATTACH is 16 */ -/* PTRACE_DETACH is 17 */ -#define PTRACE_GETWMMXREGS	18 -#define PTRACE_SETWMMXREGS	19 -/* 20 is unused */ -#define PTRACE_OLDSETOPTIONS	21 -#define PTRACE_GET_THREAD_AREA	22 -#define PTRACE_SET_SYSCALL	23 -/* PTRACE_SYSCALL is 24 */ -#define PTRACE_GETCRUNCHREGS	25 -#define PTRACE_SETCRUNCHREGS	26 -#define PTRACE_GETVFPREGS	27 -#define PTRACE_SETVFPREGS	28 -#define PTRACE_GETHBPREGS	29 -#define PTRACE_SETHBPREGS	30 - -/* - * PSR bits - */ -#define USR26_MODE	0x00000000 -#define FIQ26_MODE	0x00000001 -#define IRQ26_MODE	0x00000002 -#define SVC26_MODE	0x00000003 -#define USR_MODE	0x00000010 -#define FIQ_MODE	0x00000011 -#define IRQ_MODE	0x00000012 -#define SVC_MODE	0x00000013 -#define ABT_MODE	0x00000017 -#define UND_MODE	0x0000001b -#define SYSTEM_MODE	0x0000001f -#define MODE32_BIT	0x00000010 -#define MODE_MASK	0x0000001f -#define PSR_T_BIT	0x00000020 -#define PSR_F_BIT	0x00000040 -#define PSR_I_BIT	0x00000080 -#define PSR_A_BIT	0x00000100 -#define PSR_E_BIT	0x00000200 -#define PSR_J_BIT	0x01000000 -#define PSR_Q_BIT	0x08000000 -#define PSR_V_BIT	0x10000000 -#define PSR_C_BIT	0x20000000 -#define PSR_Z_BIT	0x40000000 -#define PSR_N_BIT	0x80000000 - -/* - * Groups of PSR bits - */ -#define PSR_f		0xff000000	/* Flags		*/ -#define PSR_s		0x00ff0000	/* Status		*/ -#define PSR_x		0x0000ff00	/* Extension		*/ -#define PSR_c		0x000000ff	/* Control		*/ - -/* - * ARMv7 groups of APSR bits - */ -#define PSR_ISET_MASK	0x01000010	/* ISA state (J, T) mask */ -#define PSR_IT_MASK	0x0600fc00	/* If-Then execution state mask */ -#define PSR_ENDIAN_MASK	0x00000200	/* Endianness state mask */ - -/* - * Default endianness state - */ -#ifdef CONFIG_CPU_ENDIAN_BE8 -#define PSR_ENDSTATE	PSR_E_BIT -#else -#define PSR_ENDSTATE	0 -#endif - -/*  - * These are 'magic' values for PTRACE_PEEKUSR that return info about where a - * process is located in memory. - */ -#define PT_TEXT_ADDR		0x10000 -#define PT_DATA_ADDR		0x10004 -#define PT_TEXT_END_ADDR	0x10008 +#include <uapi/asm/ptrace.h>  #ifndef __ASSEMBLY__ - -/* - * This struct defines the way the registers are stored on the - * stack during a system call.  Note that sizeof(struct pt_regs) - * has to be a multiple of 8. - */ -#ifndef __KERNEL__ -struct pt_regs { -	long uregs[18]; -}; -#else /* __KERNEL__ */  struct pt_regs {  	unsigned long uregs[18];  }; -#endif /* __KERNEL__ */ - -#define ARM_cpsr	uregs[16] -#define ARM_pc		uregs[15] -#define ARM_lr		uregs[14] -#define ARM_sp		uregs[13] -#define ARM_ip		uregs[12] -#define ARM_fp		uregs[11] -#define ARM_r10		uregs[10] -#define ARM_r9		uregs[9] -#define ARM_r8		uregs[8] -#define ARM_r7		uregs[7] -#define ARM_r6		uregs[6] -#define ARM_r5		uregs[5] -#define ARM_r4		uregs[4] -#define ARM_r3		uregs[3] -#define ARM_r2		uregs[2] -#define ARM_r1		uregs[1] -#define ARM_r0		uregs[0] -#define ARM_ORIG_r0	uregs[17] - -#ifdef __KERNEL__ - -#define arch_has_single_step()	(1)  #define user_mode(regs)	\  	(((regs)->ARM_cpsr & 0xf) == 0) @@ -142,9 +27,13 @@ struct pt_regs {  #define thumb_mode(regs) (0)  #endif +#ifndef CONFIG_CPU_V7M  #define isa_mode(regs) \ -	((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \ -	 (((regs)->ARM_cpsr & PSR_T_BIT) >> 5)) +	((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \ +	 (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT)))) +#else +#define isa_mode(regs) 1 /* Thumb */ +#endif  #define processor_mode(regs) \  	((regs)->ARM_cpsr & MODE_MASK) @@ -160,6 +49,7 @@ struct pt_regs {   */  static inline int valid_user_regs(struct pt_regs *regs)  { +#ifndef CONFIG_CPU_V7M  	unsigned long mode = regs->ARM_cpsr & MODE_MASK;  	/* @@ -182,10 +72,24 @@ static inline int valid_user_regs(struct pt_regs *regs)  		regs->ARM_cpsr |= USR_MODE;  	return 0; +#else /* ifndef CONFIG_CPU_V7M */ +	return 1; +#endif +} + +static inline long regs_return_value(struct pt_regs *regs) +{ +	return regs->ARM_r0;  }  #define instruction_pointer(regs)	(regs)->ARM_pc +static inline void instruction_pointer_set(struct pt_regs *regs, +					   unsigned long val) +{ +	instruction_pointer(regs) = val; +} +  #ifdef CONFIG_SMP  extern unsigned long profile_pc(struct pt_regs *regs);  #else @@ -196,6 +100,14 @@ extern unsigned long profile_pc(struct pt_regs *regs);  #define PREDICATE_ALWAYS	0xe0000000  /* + * True if instr is a 32-bit thumb instruction. This works if instr + * is the first or only half-word of a thumb instruction. It also works + * when instr holds all 32-bits of a wide thumb instruction if stored + * in the form (first_half<<16)|(second_half) + */ +#define is_wide_instruction(instr)	((unsigned)(instr) >= 0xe800) + +/*   * kprobe-based event tracer support   */  #include <linux/stddef.h> @@ -231,9 +143,15 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)  	return regs->ARM_sp;  } -#endif /* __KERNEL__ */ +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ +	return regs->ARM_sp; +} -#endif /* __ASSEMBLY__ */ +#define current_pt_regs(void) ({				\ +	register unsigned long sp asm ("sp");			\ +	(struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1;	\ +}) +#endif /* __ASSEMBLY__ */  #endif - diff --git a/arch/arm/include/asm/resource.h b/arch/arm/include/asm/resource.h deleted file mode 100644 index 734b581b5b6..00000000000 --- a/arch/arm/include/asm/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ARM_RESOURCE_H -#define _ARM_RESOURCE_H - -#include <asm-generic/resource.h> - -#endif diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h index 2f87870d934..cefdb8f898a 100644 --- a/arch/arm/include/asm/scatterlist.h +++ b/arch/arm/include/asm/scatterlist.h @@ -1,6 +1,10 @@  #ifndef _ASMARM_SCATTERLIST_H  #define _ASMARM_SCATTERLIST_H +#ifdef CONFIG_ARM_HAS_SG_CHAIN +#define ARCH_HAS_SG_CHAIN +#endif +  #include <asm/memory.h>  #include <asm/types.h>  #include <asm-generic/scatterlist.h> diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h deleted file mode 100644 index 2b8c5160388..00000000000 --- a/arch/arm/include/asm/sections.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/sections.h> diff --git a/arch/arm/include/asm/segment.h b/arch/arm/include/asm/segment.h deleted file mode 100644 index 9e24c21f630..00000000000 --- a/arch/arm/include/asm/segment.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __ASM_ARM_SEGMENT_H -#define __ASM_ARM_SEGMENT_H - -#define __KERNEL_CS   0x0 -#define __KERNEL_DS   0x0 - -#define __USER_CS     0x1 -#define __USER_DS     0x1 - -#endif /* __ASM_ARM_SEGMENT_H */ - diff --git a/arch/arm/include/asm/sembuf.h b/arch/arm/include/asm/sembuf.h deleted file mode 100644 index 1c028395428..00000000000 --- a/arch/arm/include/asm/sembuf.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _ASMARM_SEMBUF_H -#define _ASMARM_SEMBUF_H - -/*  - * The semid64_ds structure for arm architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct semid64_ds { -	struct ipc64_perm sem_perm;		/* permissions .. see ipc.h */ -	__kernel_time_t	sem_otime;		/* last semop time */ -	unsigned long	__unused1; -	__kernel_time_t	sem_ctime;		/* last change time */ -	unsigned long	__unused2; -	unsigned long	sem_nsems;		/* no. of semaphores in array */ -	unsigned long	__unused3; -	unsigned long	__unused4; -}; - -#endif /* _ASMARM_SEMBUF_H */ diff --git a/arch/arm/include/asm/serial.h b/arch/arm/include/asm/serial.h deleted file mode 100644 index ebb049091e2..00000000000 --- a/arch/arm/include/asm/serial.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - *  arch/arm/include/asm/serial.h - * - *  Copyright (C) 1996 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - *  Changelog: - *   15-10-1996	RMK	Created - */ - -#ifndef __ASM_SERIAL_H -#define __ASM_SERIAL_H - -#define BASE_BAUD	(1843200 / 16) - -#endif diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index f1e5a9bca24..e0adb9f1bf9 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -14,213 +14,15 @@  #ifndef __ASMARM_SETUP_H  #define __ASMARM_SETUP_H -#include <linux/types.h> +#include <uapi/asm/setup.h> -#define COMMAND_LINE_SIZE 1024 - -/* The list ends with an ATAG_NONE node. */ -#define ATAG_NONE	0x00000000 - -struct tag_header { -	__u32 size; -	__u32 tag; -}; - -/* The list must start with an ATAG_CORE node */ -#define ATAG_CORE	0x54410001 - -struct tag_core { -	__u32 flags;		/* bit 0 = read-only */ -	__u32 pagesize; -	__u32 rootdev; -}; - -/* it is allowed to have multiple ATAG_MEM nodes */ -#define ATAG_MEM	0x54410002 - -struct tag_mem32 { -	__u32	size; -	__u32	start;	/* physical start address */ -}; - -/* VGA text type displays */ -#define ATAG_VIDEOTEXT	0x54410003 - -struct tag_videotext { -	__u8		x; -	__u8		y; -	__u16		video_page; -	__u8		video_mode; -	__u8		video_cols; -	__u16		video_ega_bx; -	__u8		video_lines; -	__u8		video_isvga; -	__u16		video_points; -}; - -/* describes how the ramdisk will be used in kernel */ -#define ATAG_RAMDISK	0x54410004 - -struct tag_ramdisk { -	__u32 flags;	/* bit 0 = load, bit 1 = prompt */ -	__u32 size;	/* decompressed ramdisk size in _kilo_ bytes */ -	__u32 start;	/* starting block of floppy-based RAM disk image */ -}; - -/* describes where the compressed ramdisk image lives (virtual address) */ -/* - * this one accidentally used virtual addresses - as such, - * it's deprecated. - */ -#define ATAG_INITRD	0x54410005 - -/* describes where the compressed ramdisk image lives (physical address) */ -#define ATAG_INITRD2	0x54420005 - -struct tag_initrd { -	__u32 start;	/* physical start address */ -	__u32 size;	/* size of compressed ramdisk image in bytes */ -}; - -/* board serial number. "64 bits should be enough for everybody" */ -#define ATAG_SERIAL	0x54410006 - -struct tag_serialnr { -	__u32 low; -	__u32 high; -}; - -/* board revision */ -#define ATAG_REVISION	0x54410007 - -struct tag_revision { -	__u32 rev; -}; - -/* initial values for vesafb-type framebuffers. see struct screen_info - * in include/linux/tty.h - */ -#define ATAG_VIDEOLFB	0x54410008 - -struct tag_videolfb { -	__u16		lfb_width; -	__u16		lfb_height; -	__u16		lfb_depth; -	__u16		lfb_linelength; -	__u32		lfb_base; -	__u32		lfb_size; -	__u8		red_size; -	__u8		red_pos; -	__u8		green_size; -	__u8		green_pos; -	__u8		blue_size; -	__u8		blue_pos; -	__u8		rsvd_size; -	__u8		rsvd_pos; -}; - -/* command line: \0 terminated string */ -#define ATAG_CMDLINE	0x54410009 - -struct tag_cmdline { -	char	cmdline[1];	/* this is the minimum size */ -}; - -/* acorn RiscPC specific information */ -#define ATAG_ACORN	0x41000101 - -struct tag_acorn { -	__u32 memc_control_reg; -	__u32 vram_pages; -	__u8 sounddefault; -	__u8 adfsdrives; -}; - -/* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */ -#define ATAG_MEMCLK	0x41000402 - -struct tag_memclk { -	__u32 fmemclk; -}; - -struct tag { -	struct tag_header hdr; -	union { -		struct tag_core		core; -		struct tag_mem32	mem; -		struct tag_videotext	videotext; -		struct tag_ramdisk	ramdisk; -		struct tag_initrd	initrd; -		struct tag_serialnr	serialnr; -		struct tag_revision	revision; -		struct tag_videolfb	videolfb; -		struct tag_cmdline	cmdline; - -		/* -		 * Acorn specific -		 */ -		struct tag_acorn	acorn; - -		/* -		 * DC21285 specific -		 */ -		struct tag_memclk	memclk; -	} u; -}; - -struct tagtable { -	__u32 tag; -	int (*parse)(const struct tag *); -}; - -#define tag_member_present(tag,member)				\ -	((unsigned long)(&((struct tag *)0L)->member + 1)	\ -		<= (tag)->hdr.size * 4) - -#define tag_next(t)	((struct tag *)((__u32 *)(t) + (t)->hdr.size)) -#define tag_size(type)	((sizeof(struct tag_header) + sizeof(struct type)) >> 2) - -#define for_each_tag(t,base)		\ -	for (t = base; t->hdr.size; t = tag_next(t)) - -#ifdef __KERNEL__  #define __tag __used __attribute__((__section__(".taglist.init")))  #define __tagtable(tag, fn) \ -static struct tagtable __tagtable_##fn __tag = { tag, fn } - -/* - * Memory map description - */ -#ifdef CONFIG_ARCH_LH7A40X -# define NR_BANKS 16 -#else -# define NR_BANKS 8 -#endif - -struct membank { -	unsigned long start; -	unsigned long size; -	unsigned int highmem; -}; - -struct meminfo { -	int nr_banks; -	struct membank bank[NR_BANKS]; -}; - -extern struct meminfo meminfo; - -#define for_each_bank(iter,mi)				\ -	for (iter = 0; iter < (mi)->nr_banks; iter++) - -#define bank_pfn_start(bank)	__phys_to_pfn((bank)->start) -#define bank_pfn_end(bank)	__phys_to_pfn((bank)->start + (bank)->size) -#define bank_pfn_size(bank)	((bank)->size >> PAGE_SHIFT) -#define bank_phys_start(bank)	(bank)->start -#define bank_phys_end(bank)	((bank)->start + (bank)->size) -#define bank_phys_size(bank)	(bank)->size +static const struct tagtable __tagtable_##fn __tag = { tag, fn } -#endif  /*  __KERNEL__  */ +extern int arm_add_memory(u64 start, u64 size); +extern void early_print(const char *str, ...); +extern void dump_machine_table(void);  #endif diff --git a/arch/arm/include/asm/shmbuf.h b/arch/arm/include/asm/shmbuf.h deleted file mode 100644 index 2e5c67ba1c9..00000000000 --- a/arch/arm/include/asm/shmbuf.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _ASMARM_SHMBUF_H -#define _ASMARM_SHMBUF_H - -/*  - * The shmid64_ds structure for arm architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct shmid64_ds { -	struct ipc64_perm	shm_perm;	/* operation perms */ -	size_t			shm_segsz;	/* size of segment (bytes) */ -	__kernel_time_t		shm_atime;	/* last attach time */ -	unsigned long		__unused1; -	__kernel_time_t		shm_dtime;	/* last detach time */ -	unsigned long		__unused2; -	__kernel_time_t		shm_ctime;	/* last change time */ -	unsigned long		__unused3; -	__kernel_pid_t		shm_cpid;	/* pid of creator */ -	__kernel_pid_t		shm_lpid;	/* pid of last operator */ -	unsigned long		shm_nattch;	/* no. of current attaches */ -	unsigned long		__unused4; -	unsigned long		__unused5; -}; - -struct shminfo64 { -	unsigned long	shmmax; -	unsigned long	shmmin; -	unsigned long	shmmni; -	unsigned long	shmseg; -	unsigned long	shmall; -	unsigned long	__unused1; -	unsigned long	__unused2; -	unsigned long	__unused3; -	unsigned long	__unused4; -}; - -#endif /* _ASMARM_SHMBUF_H */ diff --git a/arch/arm/include/asm/sigcontext.h b/arch/arm/include/asm/sigcontext.h deleted file mode 100644 index fc0b80b6a6f..00000000000 --- a/arch/arm/include/asm/sigcontext.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef _ASMARM_SIGCONTEXT_H -#define _ASMARM_SIGCONTEXT_H - -/* - * Signal context structure - contains all info to do with the state - * before the signal handler was invoked.  Note: only add new entries - * to the end of the structure. - */ -struct sigcontext { -	unsigned long trap_no; -	unsigned long error_code; -	unsigned long oldmask; -	unsigned long arm_r0; -	unsigned long arm_r1; -	unsigned long arm_r2; -	unsigned long arm_r3; -	unsigned long arm_r4; -	unsigned long arm_r5; -	unsigned long arm_r6; -	unsigned long arm_r7; -	unsigned long arm_r8; -	unsigned long arm_r9; -	unsigned long arm_r10; -	unsigned long arm_fp; -	unsigned long arm_ip; -	unsigned long arm_sp; -	unsigned long arm_lr; -	unsigned long arm_pc; -	unsigned long arm_cpsr; -	unsigned long fault_address; -}; - - -#endif diff --git a/arch/arm/include/asm/siginfo.h b/arch/arm/include/asm/siginfo.h deleted file mode 100644 index 5e21852e603..00000000000 --- a/arch/arm/include/asm/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASMARM_SIGINFO_H -#define _ASMARM_SIGINFO_H - -#include <asm-generic/siginfo.h> - -#endif diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h index 43ba0fb1c8a..c0eb412aff0 100644 --- a/arch/arm/include/asm/signal.h +++ b/arch/arm/include/asm/signal.h @@ -1,12 +1,8 @@  #ifndef _ASMARM_SIGNAL_H  #define _ASMARM_SIGNAL_H -#include <linux/types.h> +#include <uapi/asm/signal.h> -/* Avoid too many header ordering problems.  */ -struct siginfo; - -#ifdef __KERNEL__  /* Most things should be clean enough to redefine this at will, if care     is taken to make libc match.  */ @@ -20,145 +16,7 @@ typedef struct {  	unsigned long sig[_NSIG_WORDS];  } sigset_t; -#else -/* Here we must cater to libcs that poke about in kernel headers.  */ - -#define NSIG		32 -typedef unsigned long sigset_t; - -#endif /* __KERNEL__ */ - -#define SIGHUP		 1 -#define SIGINT		 2 -#define SIGQUIT		 3 -#define SIGILL		 4 -#define SIGTRAP		 5 -#define SIGABRT		 6 -#define SIGIOT		 6 -#define SIGBUS		 7 -#define SIGFPE		 8 -#define SIGKILL		 9 -#define SIGUSR1		10 -#define SIGSEGV		11 -#define SIGUSR2		12 -#define SIGPIPE		13 -#define SIGALRM		14 -#define SIGTERM		15 -#define SIGSTKFLT	16 -#define SIGCHLD		17 -#define SIGCONT		18 -#define SIGSTOP		19 -#define SIGTSTP		20 -#define SIGTTIN		21 -#define SIGTTOU		22 -#define SIGURG		23 -#define SIGXCPU		24 -#define SIGXFSZ		25 -#define SIGVTALRM	26 -#define SIGPROF		27 -#define SIGWINCH	28 -#define SIGIO		29 -#define SIGPOLL		SIGIO -/* -#define SIGLOST		29 -*/ -#define SIGPWR		30 -#define SIGSYS		31 -#define	SIGUNUSED	31 - -/* These should not be considered constants from userland.  */ -#define SIGRTMIN	32 -#define SIGRTMAX	_NSIG - -#define SIGSWI		32 - -/* - * SA_FLAGS values: - * - * SA_NOCLDSTOP		flag to turn off SIGCHLD when children stop. - * SA_NOCLDWAIT		flag on SIGCHLD to inhibit zombies. - * SA_SIGINFO		deliver the signal with SIGINFO structs - * SA_THIRTYTWO		delivers the signal in 32-bit mode, even if the task  - *			is running in 26-bit. - * SA_ONSTACK		allows alternate signal stacks (see sigaltstack(2)). - * SA_RESTART		flag to get restarting signals (which were the default long ago) - * SA_NODEFER		prevents the current signal from being masked in the handler. - * SA_RESETHAND		clears the handler when the signal is delivered. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP	0x00000001 -#define SA_NOCLDWAIT	0x00000002 -#define SA_SIGINFO	0x00000004 -#define SA_THIRTYTWO	0x02000000 -#define SA_RESTORER	0x04000000 -#define SA_ONSTACK	0x08000000 -#define SA_RESTART	0x10000000 -#define SA_NODEFER	0x40000000 -#define SA_RESETHAND	0x80000000 - -#define SA_NOMASK	SA_NODEFER -#define SA_ONESHOT	SA_RESETHAND - - -/*  - * sigaltstack controls - */ -#define SS_ONSTACK	1 -#define SS_DISABLE	2 - -#define MINSIGSTKSZ	2048 -#define SIGSTKSZ	8192 +#define __ARCH_HAS_SA_RESTORER -#include <asm-generic/signal-defs.h> - -#ifdef __KERNEL__ -struct old_sigaction { -	__sighandler_t sa_handler; -	old_sigset_t sa_mask; -	unsigned long sa_flags; -	__sigrestore_t sa_restorer; -}; - -struct sigaction { -	__sighandler_t sa_handler; -	unsigned long sa_flags; -	__sigrestore_t sa_restorer; -	sigset_t sa_mask;		/* mask last for extensibility */ -}; - -struct k_sigaction { -	struct sigaction sa; -}; - -#else -/* Here we must cater to libcs that poke about in kernel headers.  */ - -struct sigaction { -	union { -	  __sighandler_t _sa_handler; -	  void (*_sa_sigaction)(int, struct siginfo *, void *); -	} _u; -	sigset_t sa_mask; -	unsigned long sa_flags; -	void (*sa_restorer)(void); -}; - -#define sa_handler	_u._sa_handler -#define sa_sigaction	_u._sa_sigaction - -#endif /* __KERNEL__ */ - -typedef struct sigaltstack { -	void __user *ss_sp; -	int ss_flags; -	size_t ss_size; -} stack_t; - -#ifdef __KERNEL__  #include <asm/sigcontext.h> -#define ptrace_signal_deliver(regs, cookie) do { } while (0) -#endif -  #endif diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h deleted file mode 100644 index 4fc1565e4f9..00000000000 --- a/arch/arm/include/asm/sizes.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA - */ -/* DO NOT EDIT!! - this file automatically generated - *                 from .s file by awk -f s2h.awk - */ -/*  Size definitions - *  Copyright (C) ARM Limited 1998. All rights reserved. - */ - -#ifndef __sizes_h -#define __sizes_h                       1 - -/* handy sizes */ -#define SZ_16				0x00000010 -#define SZ_256				0x00000100 -#define SZ_512				0x00000200 - -#define SZ_1K                           0x00000400 -#define SZ_2K                           0x00000800 -#define SZ_4K                           0x00001000 -#define SZ_8K                           0x00002000 -#define SZ_16K                          0x00004000 -#define SZ_32K                          0x00008000 -#define SZ_64K                          0x00010000 -#define SZ_128K                         0x00020000 -#define SZ_256K                         0x00040000 -#define SZ_512K                         0x00080000 - -#define SZ_1M                           0x00100000 -#define SZ_2M                           0x00200000 -#define SZ_4M                           0x00400000 -#define SZ_8M                           0x00800000 -#define SZ_16M                          0x01000000 -#define SZ_32M                          0x02000000 -#define SZ_48M                          0x03000000 -#define SZ_64M                          0x04000000 -#define SZ_128M                         0x08000000 -#define SZ_256M                         0x10000000 -#define SZ_512M                         0x20000000 - -#define SZ_1G                           0x40000000 -#define SZ_2G                           0x80000000 - -#endif - -/*         END */ diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 3d05190797c..2ec765c39ab 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -14,46 +14,39 @@  #include <linux/cpumask.h>  #include <linux/thread_info.h> -#include <mach/smp.h> -  #ifndef CONFIG_SMP  # error "<asm/smp.h> included in non-SMP build"  #endif  #define raw_smp_processor_id() (current_thread_info()->cpu) -/* - * at the moment, there's not a big penalty for changing CPUs - * (the >big< penalty is running SMP in the first place) - */ -#define PROC_CHANGE_PENALTY		15 -  struct seq_file;  /*   * generate IPI list text   */ -extern void show_ipi_list(struct seq_file *p); +extern void show_ipi_list(struct seq_file *, int);  /*   * Called from assembly code, this handles an IPI.   */ -asmlinkage void do_IPI(struct pt_regs *regs); +asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);  /* - * Setup the set of possible CPUs (via set_cpu_possible) + * Called from C code, this handles an IPI.   */ -extern void smp_init_cpus(void); +void handle_IPI(int ipinr, struct pt_regs *regs);  /* - * Move global data into per-processor storage. + * Setup the set of possible CPUs (via set_cpu_possible)   */ -extern void smp_store_cpu_info(unsigned int cpuid); +extern void smp_init_cpus(void); +  /* - * Raise an IPI cross call on CPUs in callmap. + * Provide a function to raise an IPI cross call on CPUs in callmap.   */ -extern void smp_cross_call(const struct cpumask *mask); +extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));  /*   * Boot a secondary CPU, and assign it the specified idle task. @@ -67,36 +60,73 @@ extern int boot_secondary(unsigned int cpu, struct task_struct *);   */  asmlinkage void secondary_start_kernel(void); -/* - * Perform platform specific initialisation of the specified CPU. - */ -extern void platform_secondary_init(unsigned int cpu);  /*   * Initial data for bringing up a secondary CPU.   */  struct secondary_data { -	unsigned long pgdir; +	union { +		unsigned long mpu_rgn_szr; +		unsigned long pgdir; +	}; +	unsigned long swapper_pg_dir;  	void *stack;  };  extern struct secondary_data secondary_data; +extern volatile int pen_release; +extern void secondary_startup(void);  extern int __cpu_disable(void); -extern int platform_cpu_disable(unsigned int cpu);  extern void __cpu_die(unsigned int cpu);  extern void cpu_die(void); -extern void platform_cpu_die(unsigned int cpu); -extern int platform_cpu_kill(unsigned int cpu); -extern void platform_cpu_enable(unsigned int cpu); -  extern void arch_send_call_function_single_ipi(int cpu);  extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); + +extern int register_ipi_completion(struct completion *completion, int cpu); + +struct smp_operations { +#ifdef CONFIG_SMP +	/* +	 * Setup the set of possible CPUs (via set_cpu_possible) +	 */ +	void (*smp_init_cpus)(void); +	/* +	 * Initialize cpu_possible map, and enable coherency +	 */ +	void (*smp_prepare_cpus)(unsigned int max_cpus); + +	/* +	 * Perform platform specific initialisation of the specified CPU. +	 */ +	void (*smp_secondary_init)(unsigned int cpu); +	/* +	 * Boot a secondary CPU, and assign it the specified idle task. +	 * This also gives us the initial stack to use for this CPU. +	 */ +	int  (*smp_boot_secondary)(unsigned int cpu, struct task_struct *idle); +#ifdef CONFIG_HOTPLUG_CPU +	int  (*cpu_kill)(unsigned int cpu); +	void (*cpu_die)(unsigned int cpu); +	int  (*cpu_disable)(unsigned int cpu); +#endif +#endif +}; + +struct of_cpu_method { +	const char *method; +	struct smp_operations *ops; +}; +#define CPU_METHOD_OF_DECLARE(name, _method, _ops)			\ +	static const struct of_cpu_method __cpu_method_of_table_##name	\ +		__used __section(__cpu_method_of_table)			\ +		= { .method = _method, .ops = _ops }  /* - * show local interrupt info + * set platform specific SMP operations   */ -extern void show_local_irqs(struct seq_file *); +extern void smp_set_ops(struct smp_operations *);  #endif /* ifndef __ASM_ARM_SMP_H */ diff --git a/arch/arm/include/asm/smp_mpidr.h b/arch/arm/include/asm/smp_mpidr.h deleted file mode 100644 index 6a9307d6490..00000000000 --- a/arch/arm/include/asm/smp_mpidr.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef ASMARM_SMP_MIDR_H -#define ASMARM_SMP_MIDR_H - -#define hard_smp_processor_id()						\ -	({								\ -		unsigned int cpunum;					\ -		__asm__("\n"						\ -			"1:	mrc p15, 0, %0, c0, c0, 5\n"		\ -			"	.pushsection \".alt.smp.init\", \"a\"\n"\ -			"	.long	1b\n"				\ -			"	mov	%0, #0\n"			\ -			"	.popsection"				\ -			: "=r" (cpunum));				\ -		cpunum &= 0x0F;						\ -	}) - -#endif diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index f24c1b9e211..a252c0bfacf 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -5,6 +5,9 @@  #ifndef __ASMARM_SMP_PLAT_H  #define __ASMARM_SMP_PLAT_H +#include <linux/cpumask.h> +#include <linux/err.h> +  #include <asm/cputype.h>  /* @@ -23,6 +26,9 @@ static inline bool is_smp(void)  }  /* all SMP configurations have the extended CPUID registers */ +#ifndef CONFIG_MMU +#define tlb_ops_need_broadcast()	0 +#else  static inline int tlb_ops_need_broadcast(void)  {  	if (!is_smp()) @@ -30,6 +36,7 @@ static inline int tlb_ops_need_broadcast(void)  	return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;  } +#endif  #if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7  #define cache_ops_need_broadcast()	0 @@ -43,4 +50,45 @@ static inline int cache_ops_need_broadcast(void)  }  #endif +/* + * Logical CPU mapping. + */ +extern u32 __cpu_logical_map[]; +#define cpu_logical_map(cpu)	__cpu_logical_map[cpu] +/* + * Retrieve logical cpu index corresponding to a given MPIDR[23:0] + *  - mpidr: MPIDR[23:0] to be used for the look-up + * + * Returns the cpu logical index or -EINVAL on look-up error + */ +static inline int get_logical_index(u32 mpidr) +{ +	int cpu; +	for (cpu = 0; cpu < nr_cpu_ids; cpu++) +		if (cpu_logical_map(cpu) == mpidr) +			return cpu; +	return -EINVAL; +} + +/* + * NOTE ! Assembly code relies on the following + * structure memory layout in order to carry out load + * multiple from its base address. For more + * information check arch/arm/kernel/sleep.S + */ +struct mpidr_hash { +	u32	mask; /* used by sleep.S */ +	u32	shift_aff[3]; /* used by sleep.S */ +	u32	bits; +}; + +extern struct mpidr_hash mpidr_hash; + +static inline u32 mpidr_hash_size(void) +{ +	return 1 << mpidr_hash.bits; +} + +extern int platform_can_cpu_hotplug(void); +  #endif diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h index 2376835015d..0393fbab8dd 100644 --- a/arch/arm/include/asm/smp_scu.h +++ b/arch/arm/include/asm/smp_scu.h @@ -1,7 +1,48 @@  #ifndef __ASMARM_ARCH_SCU_H  #define __ASMARM_ARCH_SCU_H +#define SCU_PM_NORMAL	0 +#define SCU_PM_DORMANT	2 +#define SCU_PM_POWEROFF	3 + +#ifndef __ASSEMBLER__ + +#include <asm/cputype.h> + +static inline bool scu_a9_has_base(void) +{ +	return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; +} + +static inline unsigned long scu_a9_get_base(void) +{ +	unsigned long pa; + +	asm("mrc p15, 4, %0, c15, c0, 0" : "=r" (pa)); + +	return pa; +} + +#ifdef CONFIG_HAVE_ARM_SCU  unsigned int scu_get_core_count(void __iomem *); -void scu_enable(void __iomem *); +int scu_power_mode(void __iomem *, unsigned int); +#else +static inline unsigned int scu_get_core_count(void __iomem *scu_base) +{ +	return 0; +} +static inline int scu_power_mode(void __iomem *scu_base, unsigned int mode) +{ +	return -EINVAL; +} +#endif + +#if defined(CONFIG_SMP) && defined(CONFIG_HAVE_ARM_SCU) +void scu_enable(void __iomem *scu_base); +#else +static inline void scu_enable(void __iomem *scu_base) {} +#endif + +#endif  #endif diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h index 634f357be6b..7b2899c2f7f 100644 --- a/arch/arm/include/asm/smp_twd.h +++ b/arch/arm/include/asm/smp_twd.h @@ -18,12 +18,20 @@  #define TWD_TIMER_CONTROL_PERIODIC	(1 << 1)  #define TWD_TIMER_CONTROL_IT_ENABLE	(1 << 2) -struct clock_event_device; +#include <linux/ioport.h> -extern void __iomem *twd_base; +struct twd_local_timer { +	struct resource	res[2]; +}; -void twd_timer_stop(void); -int twd_timer_ack(void); -void twd_timer_setup(struct clock_event_device *); +#define DEFINE_TWD_LOCAL_TIMER(name,base,irq)	\ +struct twd_local_timer name __initdata = {	\ +	.res	= {				\ +		DEFINE_RES_MEM(base, 0x10),	\ +		DEFINE_RES_IRQ(irq),		\ +	},					\ +}; + +int twd_local_timer_register(struct twd_local_timer *);  #endif diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h deleted file mode 100644 index 90ffd04b8e7..00000000000 --- a/arch/arm/include/asm/socket.h +++ /dev/null @@ -1,65 +0,0 @@ -#ifndef _ASMARM_SOCKET_H -#define _ASMARM_SOCKET_H - -#include <asm/sockios.h> - -/* For setsockopt(2) */ -#define SOL_SOCKET	1 - -#define SO_DEBUG	1 -#define SO_REUSEADDR	2 -#define SO_TYPE		3 -#define SO_ERROR	4 -#define SO_DONTROUTE	5 -#define SO_BROADCAST	6 -#define SO_SNDBUF	7 -#define SO_RCVBUF	8 -#define SO_SNDBUFFORCE	32 -#define SO_RCVBUFFORCE	33 -#define SO_KEEPALIVE	9 -#define SO_OOBINLINE	10 -#define SO_NO_CHECK	11 -#define SO_PRIORITY	12 -#define SO_LINGER	13 -#define SO_BSDCOMPAT	14 -/* To add :#define SO_REUSEPORT 15 */ -#define SO_PASSCRED	16 -#define SO_PEERCRED	17 -#define SO_RCVLOWAT	18 -#define SO_SNDLOWAT	19 -#define SO_RCVTIMEO	20 -#define SO_SNDTIMEO	21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION		22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT	23 -#define SO_SECURITY_ENCRYPTION_NETWORK		24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER        26 -#define SO_DETACH_FILTER        27 - -#define SO_PEERNAME             28 -#define SO_TIMESTAMP		29 -#define SCM_TIMESTAMP		SO_TIMESTAMP - -#define SO_ACCEPTCONN		30 - -#define SO_PEERSEC		31 -#define SO_PASSSEC		34 -#define SO_TIMESTAMPNS		35 -#define SCM_TIMESTAMPNS		SO_TIMESTAMPNS - -#define SO_MARK			36 - -#define SO_TIMESTAMPING		37 -#define SCM_TIMESTAMPING	SO_TIMESTAMPING - -#define SO_PROTOCOL		38 -#define SO_DOMAIN		39 - -#define SO_RXQ_OVFL             40 - -#endif /* _ASM_SOCKET_H */ diff --git a/arch/arm/include/asm/sockios.h b/arch/arm/include/asm/sockios.h deleted file mode 100644 index a2588a2512d..00000000000 --- a/arch/arm/include/asm/sockios.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ARCH_ARM_SOCKIOS_H -#define __ARCH_ARM_SOCKIOS_H - -/* Socket-level I/O control calls. */ -#define FIOSETOWN 	0x8901 -#define SIOCSPGRP	0x8902 -#define FIOGETOWN	0x8903 -#define SIOCGPGRP	0x8904 -#define SIOCATMARK	0x8905 -#define SIOCGSTAMP	0x8906		/* Get stamp (timeval) */ -#define SIOCGSTAMPNS	0x8907		/* Get stamp (timespec) */ - -#endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 17eb355707d..ac4bfae2670 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,35 +5,51 @@  #error SMP not supported on pre-ARMv6 CPUs  #endif +#include <linux/prefetch.h> + +/* + * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K + * extensions, so when running on UP, we have to patch these instructions away. + */ +#ifdef CONFIG_THUMB2_KERNEL +/* + * For Thumb-2, special care is needed to ensure that the conditional WFE + * instruction really does assemble to exactly 4 bytes (as required by + * the SMP_ON_UP fixup code).   By itself "wfene" might cause the + * assembler to insert a extra (16-bit) IT instruction, depending on the + * presence or absence of neighbouring conditional instructions. + * + * To avoid this unpredictableness, an approprite IT is inserted explicitly: + * the assembler won't change IT instructions which are explicitly present + * in the input. + */ +#define WFE(cond)	__ALT_SMP_ASM(		\ +	"it " cond "\n\t"			\ +	"wfe" cond ".n",			\ +						\ +	"nop.w"					\ +) +#else +#define WFE(cond)	__ALT_SMP_ASM("wfe" cond, "nop") +#endif + +#define SEV		__ALT_SMP_ASM(WASM(sev), WASM(nop)) +  static inline void dsb_sev(void)  { -#if __LINUX_ARM_ARCH__ >= 7 -	__asm__ __volatile__ ( -		"dsb\n" -		"sev" -	); -#elif defined(CONFIG_CPU_32v6K) -	__asm__ __volatile__ ( -		"mcr p15, 0, %0, c7, c10, 4\n" -		"sev" -		: : "r" (0) -	); -#endif + +	dsb(ishst); +	__asm__(SEV);  }  /* - * ARMv6 Spin-locking. - * - * We exclusively read the old value.  If it is zero, we may have - * won the lock, so we try exclusively storing it.  A memory barrier - * is required after we get a lock, and before we release it, because - * V6 CPUs are assumed to have weakly ordered memory. + * ARMv6 ticket-based spin-locking.   * - * Unlocked value: 0 - * Locked value: 1 + * A memory barrier is required after we get a lock, and before we + * release it, because V6 CPUs are assumed to have weakly ordered + * memory.   */ -#define arch_spin_is_locked(x)		((x)->lock != 0)  #define arch_spin_unlock_wait(lock) \  	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) @@ -42,36 +58,47 @@ static inline void dsb_sev(void)  static inline void arch_spin_lock(arch_spinlock_t *lock)  {  	unsigned long tmp; +	u32 newval; +	arch_spinlock_t lockval; +	prefetchw(&lock->slock);  	__asm__ __volatile__( -"1:	ldrex	%0, [%1]\n" -"	teq	%0, #0\n" -#ifdef CONFIG_CPU_32v6K -"	wfene\n" -#endif -"	strexeq	%0, %2, [%1]\n" -"	teqeq	%0, #0\n" +"1:	ldrex	%0, [%3]\n" +"	add	%1, %0, %4\n" +"	strex	%2, %1, [%3]\n" +"	teq	%2, #0\n"  "	bne	1b" -	: "=&r" (tmp) -	: "r" (&lock->lock), "r" (1) +	: "=&r" (lockval), "=&r" (newval), "=&r" (tmp) +	: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)  	: "cc"); +	while (lockval.tickets.next != lockval.tickets.owner) { +		wfe(); +		lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); +	} +  	smp_mb();  }  static inline int arch_spin_trylock(arch_spinlock_t *lock)  { -	unsigned long tmp; - -	__asm__ __volatile__( -"	ldrex	%0, [%1]\n" -"	teq	%0, #0\n" -"	strexeq	%0, %2, [%1]" -	: "=&r" (tmp) -	: "r" (&lock->lock), "r" (1) -	: "cc"); - -	if (tmp == 0) { +	unsigned long contended, res; +	u32 slock; + +	prefetchw(&lock->slock); +	do { +		__asm__ __volatile__( +		"	ldrex	%0, [%3]\n" +		"	mov	%2, #0\n" +		"	subs	%1, %0, %0, ror #16\n" +		"	addeq	%0, %0, %4\n" +		"	strexeq	%2, %0, [%3]" +		: "=&r" (slock), "=&r" (contended), "=&r" (res) +		: "r" (&lock->slock), "I" (1 << TICKET_SHIFT) +		: "cc"); +	} while (res); + +	if (!contended) {  		smp_mb();  		return 1;  	} else { @@ -82,15 +109,26 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)  static inline void arch_spin_unlock(arch_spinlock_t *lock)  {  	smp_mb(); +	lock->tickets.owner++; +	dsb_sev(); +} -	__asm__ __volatile__( -"	str	%1, [%0]\n" -	: -	: "r" (&lock->lock), "r" (0) -	: "cc"); +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ +	return lock.tickets.owner == lock.tickets.next; +} -	dsb_sev(); +static inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ +	return !arch_spin_value_unlocked(ACCESS_ONCE(*lock)); +} + +static inline int arch_spin_is_contended(arch_spinlock_t *lock) +{ +	struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); +	return (tickets.next - tickets.owner) > 1;  } +#define arch_spin_is_contended	arch_spin_is_contended  /*   * RWLOCKS @@ -104,12 +142,11 @@ static inline void arch_write_lock(arch_rwlock_t *rw)  {  	unsigned long tmp; +	prefetchw(&rw->lock);  	__asm__ __volatile__(  "1:	ldrex	%0, [%1]\n"  "	teq	%0, #0\n" -#ifdef CONFIG_CPU_32v6K -"	wfene\n" -#endif +	WFE("ne")  "	strexeq	%0, %2, [%1]\n"  "	teq	%0, #0\n"  "	bne	1b" @@ -122,17 +159,21 @@ static inline void arch_write_lock(arch_rwlock_t *rw)  static inline int arch_write_trylock(arch_rwlock_t *rw)  { -	unsigned long tmp; - -	__asm__ __volatile__( -"1:	ldrex	%0, [%1]\n" -"	teq	%0, #0\n" -"	strexeq	%0, %2, [%1]" -	: "=&r" (tmp) -	: "r" (&rw->lock), "r" (0x80000000) -	: "cc"); - -	if (tmp == 0) { +	unsigned long contended, res; + +	prefetchw(&rw->lock); +	do { +		__asm__ __volatile__( +		"	ldrex	%0, [%2]\n" +		"	mov	%1, #0\n" +		"	teq	%0, #0\n" +		"	strexeq	%1, %3, [%2]" +		: "=&r" (contended), "=&r" (res) +		: "r" (&rw->lock), "r" (0x80000000) +		: "cc"); +	} while (res); + +	if (!contended) {  		smp_mb();  		return 1;  	} else { @@ -154,7 +195,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)  }  /* write_can_lock - would write_trylock() succeed? */ -#define arch_write_can_lock(x)		((x)->lock == 0) +#define arch_write_can_lock(x)		(ACCESS_ONCE((x)->lock) == 0)  /*   * Read locks are a bit more hairy: @@ -172,13 +213,12 @@ static inline void arch_read_lock(arch_rwlock_t *rw)  {  	unsigned long tmp, tmp2; +	prefetchw(&rw->lock);  	__asm__ __volatile__(  "1:	ldrex	%0, [%2]\n"  "	adds	%0, %0, #1\n"  "	strexpl	%1, %0, [%2]\n" -#ifdef CONFIG_CPU_32v6K -"	wfemi\n" -#endif +	WFE("mi")  "	rsbpls	%0, %1, #0\n"  "	bmi	1b"  	: "=&r" (tmp), "=&r" (tmp2) @@ -194,6 +234,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)  	smp_mb(); +	prefetchw(&rw->lock);  	__asm__ __volatile__(  "1:	ldrex	%0, [%2]\n"  "	sub	%0, %0, #1\n" @@ -210,22 +251,31 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)  static inline int arch_read_trylock(arch_rwlock_t *rw)  { -	unsigned long tmp, tmp2 = 1; - -	__asm__ __volatile__( -"1:	ldrex	%0, [%2]\n" -"	adds	%0, %0, #1\n" -"	strexpl	%1, %0, [%2]\n" -	: "=&r" (tmp), "+r" (tmp2) -	: "r" (&rw->lock) -	: "cc"); - -	smp_mb(); -	return tmp2 == 0; +	unsigned long contended, res; + +	prefetchw(&rw->lock); +	do { +		__asm__ __volatile__( +		"	ldrex	%0, [%2]\n" +		"	mov	%1, #0\n" +		"	adds	%0, %0, #1\n" +		"	strexpl	%1, %0, [%2]" +		: "=&r" (contended), "=&r" (res) +		: "r" (&rw->lock) +		: "cc"); +	} while (res); + +	/* If the lock is negative, then it is already held for write. */ +	if (contended < 0x80000000) { +		smp_mb(); +		return 1; +	} else { +		return 0; +	}  }  /* read_can_lock - would read_trylock() succeed? */ -#define arch_read_can_lock(x)		((x)->lock < 0x80000000) +#define arch_read_can_lock(x)		(ACCESS_ONCE((x)->lock) < 0x80000000)  #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)  #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index d14d197ae04..47663fcb10a 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -5,14 +5,27 @@  # error "please don't include this file directly"  #endif +#define TICKET_SHIFT	16 +  typedef struct { -	volatile unsigned int lock; +	union { +		u32 slock; +		struct __raw_tickets { +#ifdef __ARMEB__ +			u16 next; +			u16 owner; +#else +			u16 owner; +			u16 next; +#endif +		} tickets; +	};  } arch_spinlock_t; -#define __ARCH_SPIN_LOCK_UNLOCKED	{ 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED	{ { 0 } }  typedef struct { -	volatile unsigned int lock; +	u32 lock;  } arch_rwlock_t;  #define __ARCH_RW_LOCK_UNLOCKED		{ 0 } diff --git a/arch/arm/include/asm/stat.h b/arch/arm/include/asm/stat.h deleted file mode 100644 index 42c0c13999d..00000000000 --- a/arch/arm/include/asm/stat.h +++ /dev/null @@ -1,87 +0,0 @@ -#ifndef _ASMARM_STAT_H -#define _ASMARM_STAT_H - -struct __old_kernel_stat { -	unsigned short st_dev; -	unsigned short st_ino; -	unsigned short st_mode; -	unsigned short st_nlink; -	unsigned short st_uid; -	unsigned short st_gid; -	unsigned short st_rdev; -	unsigned long  st_size; -	unsigned long  st_atime; -	unsigned long  st_mtime; -	unsigned long  st_ctime; -}; - -#define STAT_HAVE_NSEC  - -struct stat { -#if defined(__ARMEB__) -	unsigned short st_dev; -	unsigned short __pad1; -#else -	unsigned long  st_dev; -#endif -	unsigned long  st_ino; -	unsigned short st_mode; -	unsigned short st_nlink; -	unsigned short st_uid; -	unsigned short st_gid; -#if defined(__ARMEB__) -	unsigned short st_rdev; -	unsigned short __pad2; -#else -	unsigned long  st_rdev; -#endif -	unsigned long  st_size; -	unsigned long  st_blksize; -	unsigned long  st_blocks; -	unsigned long  st_atime; -	unsigned long  st_atime_nsec; -	unsigned long  st_mtime; -	unsigned long  st_mtime_nsec; -	unsigned long  st_ctime; -	unsigned long  st_ctime_nsec; -	unsigned long  __unused4; -	unsigned long  __unused5; -}; - -/* This matches struct stat64 in glibc2.1, hence the absolutely - * insane amounts of padding around dev_t's. - * Note: The kernel zero's the padded region because glibc might read them - * in the hope that the kernel has stretched to using larger sizes. - */ -struct stat64 { -	unsigned long long	st_dev; -	unsigned char   __pad0[4]; - -#define STAT64_HAS_BROKEN_ST_INO	1 -	unsigned long	__st_ino; -	unsigned int	st_mode; -	unsigned int	st_nlink; - -	unsigned long	st_uid; -	unsigned long	st_gid; - -	unsigned long long	st_rdev; -	unsigned char   __pad3[4]; - -	long long	st_size; -	unsigned long	st_blksize; -	unsigned long long st_blocks;	/* Number 512-byte blocks allocated. */ - -	unsigned long	st_atime; -	unsigned long	st_atime_nsec; - -	unsigned long	st_mtime; -	unsigned long	st_mtime_nsec; - -	unsigned long	st_ctime; -	unsigned long	st_ctime_nsec; - -	unsigned long long	st_ino; -}; - -#endif diff --git a/arch/arm/include/asm/statfs.h b/arch/arm/include/asm/statfs.h deleted file mode 100644 index 079447c05ba..00000000000 --- a/arch/arm/include/asm/statfs.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASMARM_STATFS_H -#define _ASMARM_STATFS_H - -/* - * With EABI there is 4 bytes of padding added to this structure. - * Let's pack it so the padding goes away to simplify dual ABI support. - * Note that user space does NOT have to pack this structure. - */ -#define ARCH_PACK_STATFS64 __attribute__((packed,aligned(4))) - -#include <asm-generic/statfs.h> -#endif diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h new file mode 100644 index 00000000000..cd20029bcd9 --- /dev/null +++ b/arch/arm/include/asm/suspend.h @@ -0,0 +1,12 @@ +#ifndef __ASM_ARM_SUSPEND_H +#define __ASM_ARM_SUSPEND_H + +struct sleep_save_sp { +	u32 *save_ptr_stash; +	u32 save_ptr_stash_phys; +}; + +extern void cpu_resume(void); +extern int cpu_suspend(unsigned long, int (*)(unsigned long)); + +#endif diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h index 9997ad20eff..537fc9b9188 100644 --- a/arch/arm/include/asm/swab.h +++ b/arch/arm/include/asm/swab.h @@ -15,21 +15,17 @@  #ifndef __ASM_ARM_SWAB_H  #define __ASM_ARM_SWAB_H -#include <linux/compiler.h> -#include <linux/types.h> +#include <uapi/asm/swab.h> -#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) -#  define __SWAB_64_THRU_32__ -#endif - -#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6 +#if __LINUX_ARM_ARCH__ >= 6 -static inline __attribute_const__ __u16 __arch_swab16(__u16 x) +static inline __attribute_const__ __u32 __arch_swahb32(__u32 x)  {  	__asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x));  	return x;  } -#define __arch_swab16 __arch_swab16 +#define __arch_swahb32 __arch_swahb32 +#define __arch_swab16(x) ((__u16)__arch_swahb32(x))  static inline __attribute_const__ __u32 __arch_swab32(__u32 x)  { @@ -38,32 +34,5 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)  }  #define __arch_swab32 __arch_swab32 -#else - -static inline __attribute_const__ __u32 __arch_swab32(__u32 x) -{ -	__u32 t; - -#ifndef __thumb__ -	if (!__builtin_constant_p(x)) { -		/* -		 * The compiler needs a bit of a hint here to always do the -		 * right thing and not screw it up to different degrees -		 * depending on the gcc version. -		 */ -		asm ("eor\t%0, %1, %1, ror #16" : "=r" (t) : "r" (x)); -	} else -#endif -		t = x ^ ((x << 16) | (x >> 16)); /* eor r1,r0,r0,ror #16 */ - -	x = (x << 24) | (x >> 8);		/* mov r0,r0,ror #8      */ -	t &= ~0x00FF0000;			/* bic r1,r1,#0x00FF0000 */ -	x ^= (t >> 8);				/* eor r0,r0,r1,lsr #8   */ - -	return x; -} -#define __arch_swab32 __arch_swab32 -  #endif -  #endif diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h new file mode 100644 index 00000000000..c99e259469f --- /dev/null +++ b/arch/arm/include/asm/switch_to.h @@ -0,0 +1,28 @@ +#ifndef __ASM_ARM_SWITCH_TO_H +#define __ASM_ARM_SWITCH_TO_H + +#include <linux/thread_info.h> + +/* + * For v7 SMP cores running a preemptible kernel we may be pre-empted + * during a TLB maintenance operation, so execute an inner-shareable dsb + * to ensure that the maintenance completes in case we migrate to another + * CPU. + */ +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) +#define finish_arch_switch(prev)	dsb(ish) +#endif + +/* + * switch_to(prev, next) should switch from task `prev' to `next' + * `prev' will never be the same as `next'.  schedule() itself + * contains the memory barrier to tell GCC not to cache `current'. + */ +extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); + +#define switch_to(prev,next,last)					\ +do {									\ +	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\ +} while (0) + +#endif /* __ASM_ARM_SWITCH_TO_H */ diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h new file mode 100644 index 00000000000..9732b8e11e6 --- /dev/null +++ b/arch/arm/include/asm/sync_bitops.h @@ -0,0 +1,26 @@ +#ifndef __ASM_SYNC_BITOPS_H__ +#define __ASM_SYNC_BITOPS_H__ + +#include <asm/bitops.h> + +/* sync_bitops functions are equivalent to the SMP implementation of the + * original functions, independently from CONFIG_SMP being defined. + * + * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But + * under Xen you might be communicating with a completely external entity + * who might be on another CPU (e.g. two uniprocessor guests communicating + * via event channels and grant tables). So we need a variant of the bit + * ops which are SMP safe even on a UP kernel. + */ + +#define sync_set_bit(nr, p)		_set_bit(nr, p) +#define sync_clear_bit(nr, p)		_clear_bit(nr, p) +#define sync_change_bit(nr, p)		_change_bit(nr, p) +#define sync_test_and_set_bit(nr, p)	_test_and_set_bit(nr, p) +#define sync_test_and_clear_bit(nr, p)	_test_and_clear_bit(nr, p) +#define sync_test_and_change_bit(nr, p)	_test_and_change_bit(nr, p) +#define sync_test_bit(nr, addr)		test_bit(nr, addr) +#define sync_cmpxchg			cmpxchg + + +#endif diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h new file mode 100644 index 00000000000..4651f6999b7 --- /dev/null +++ b/arch/arm/include/asm/syscall.h @@ -0,0 +1,112 @@ +/* + * Access to user system call parameters and results + * + * See asm-generic/syscall.h for descriptions of what we must do here. + */ + +#ifndef _ASM_ARM_SYSCALL_H +#define _ASM_ARM_SYSCALL_H + +#include <uapi/linux/audit.h> /* for AUDIT_ARCH_* */ +#include <linux/elf.h> /* for ELF_EM */ +#include <linux/err.h> +#include <linux/sched.h> + +#include <asm/unistd.h> + +#define NR_syscalls (__NR_syscalls) + +extern const unsigned long sys_call_table[]; + +static inline int syscall_get_nr(struct task_struct *task, +				 struct pt_regs *regs) +{ +	return task_thread_info(task)->syscall; +} + +static inline void syscall_rollback(struct task_struct *task, +				    struct pt_regs *regs) +{ +	regs->ARM_r0 = regs->ARM_ORIG_r0; +} + +static inline long syscall_get_error(struct task_struct *task, +				     struct pt_regs *regs) +{ +	unsigned long error = regs->ARM_r0; +	return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, +					    struct pt_regs *regs) +{ +	return regs->ARM_r0; +} + +static inline void syscall_set_return_value(struct task_struct *task, +					    struct pt_regs *regs, +					    int error, long val) +{ +	regs->ARM_r0 = (long) error ? error : val; +} + +#define SYSCALL_MAX_ARGS 7 + +static inline void syscall_get_arguments(struct task_struct *task, +					 struct pt_regs *regs, +					 unsigned int i, unsigned int n, +					 unsigned long *args) +{ +	if (n == 0) +		return; + +	if (i + n > SYSCALL_MAX_ARGS) { +		unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; +		unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; +		pr_warning("%s called with max args %d, handling only %d\n", +			   __func__, i + n, SYSCALL_MAX_ARGS); +		memset(args_bad, 0, n_bad * sizeof(args[0])); +		n = SYSCALL_MAX_ARGS - i; +	} + +	if (i == 0) { +		args[0] = regs->ARM_ORIG_r0; +		args++; +		i++; +		n--; +	} + +	memcpy(args, ®s->ARM_r0 + i, n * sizeof(args[0])); +} + +static inline void syscall_set_arguments(struct task_struct *task, +					 struct pt_regs *regs, +					 unsigned int i, unsigned int n, +					 const unsigned long *args) +{ +	if (n == 0) +		return; + +	if (i + n > SYSCALL_MAX_ARGS) { +		pr_warning("%s called with max args %d, handling only %d\n", +			   __func__, i + n, SYSCALL_MAX_ARGS); +		n = SYSCALL_MAX_ARGS - i; +	} + +	if (i == 0) { +		regs->ARM_ORIG_r0 = args[0]; +		args++; +		i++; +		n--; +	} + +	memcpy(®s->ARM_r0 + i, args, n * sizeof(args[0])); +} + +static inline int syscall_get_arch(void) +{ +	/* ARM tasks don't change audit architectures on the fly. */ +	return AUDIT_ARCH_ARM; +} + +#endif /* _ASM_ARM_SYSCALL_H */ diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h deleted file mode 100644 index 1120f18a6b1..00000000000 --- a/arch/arm/include/asm/system.h +++ /dev/null @@ -1,528 +0,0 @@ -#ifndef __ASM_ARM_SYSTEM_H -#define __ASM_ARM_SYSTEM_H - -#ifdef __KERNEL__ - -#define CPU_ARCH_UNKNOWN	0 -#define CPU_ARCH_ARMv3		1 -#define CPU_ARCH_ARMv4		2 -#define CPU_ARCH_ARMv4T		3 -#define CPU_ARCH_ARMv5		4 -#define CPU_ARCH_ARMv5T		5 -#define CPU_ARCH_ARMv5TE	6 -#define CPU_ARCH_ARMv5TEJ	7 -#define CPU_ARCH_ARMv6		8 -#define CPU_ARCH_ARMv7		9 - -/* - * CR1 bits (CP#15 CR1) - */ -#define CR_M	(1 << 0)	/* MMU enable				*/ -#define CR_A	(1 << 1)	/* Alignment abort enable		*/ -#define CR_C	(1 << 2)	/* Dcache enable			*/ -#define CR_W	(1 << 3)	/* Write buffer enable			*/ -#define CR_P	(1 << 4)	/* 32-bit exception handler		*/ -#define CR_D	(1 << 5)	/* 32-bit data address range		*/ -#define CR_L	(1 << 6)	/* Implementation defined		*/ -#define CR_B	(1 << 7)	/* Big endian				*/ -#define CR_S	(1 << 8)	/* System MMU protection		*/ -#define CR_R	(1 << 9)	/* ROM MMU protection			*/ -#define CR_F	(1 << 10)	/* Implementation defined		*/ -#define CR_Z	(1 << 11)	/* Implementation defined		*/ -#define CR_I	(1 << 12)	/* Icache enable			*/ -#define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/ -#define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/ -#define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/ -#define CR_DT	(1 << 16) -#define CR_IT	(1 << 18) -#define CR_ST	(1 << 19) -#define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/ -#define CR_U	(1 << 22)	/* Unaligned access operation		*/ -#define CR_XP	(1 << 23)	/* Extended page tables			*/ -#define CR_VE	(1 << 24)	/* Vectored interrupts			*/ -#define CR_EE	(1 << 25)	/* Exception (Big) Endian		*/ -#define CR_TRE	(1 << 28)	/* TEX remap enable			*/ -#define CR_AFE	(1 << 29)	/* Access flag enable			*/ -#define CR_TE	(1 << 30)	/* Thumb exception enable		*/ - -/* - * This is used to ensure the compiler did actually allocate the register we - * asked it for some inline assembly sequences.  Apparently we can't trust - * the compiler from one version to another so a bit of paranoia won't hurt. - * This string is meant to be concatenated with the inline asm string and - * will cause compilation to stop on mismatch. - * (for details, see gcc PR 15089) - */ -#define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t" - -#ifndef __ASSEMBLY__ - -#include <linux/linkage.h> -#include <linux/irqflags.h> - -#include <asm/outercache.h> - -#define __exception	__attribute__((section(".exception.text"))) - -struct thread_info; -struct task_struct; - -/* information about the system we're running on */ -extern unsigned int system_rev; -extern unsigned int system_serial_low; -extern unsigned int system_serial_high; -extern unsigned int mem_fclk_21285; - -struct pt_regs; - -void die(const char *msg, struct pt_regs *regs, int err); - -struct siginfo; -void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, -		unsigned long err, unsigned long trap); - -void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, -				       struct pt_regs *), -		     int sig, int code, const char *name); - -void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, -				       struct pt_regs *), -		     int sig, int code, const char *name); - -#define xchg(ptr,x) \ -	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -extern asmlinkage void __backtrace(void); -extern asmlinkage void c_backtrace(unsigned long fp, int pmode); - -struct mm_struct; -extern void show_pte(struct mm_struct *mm, unsigned long addr); -extern void __show_regs(struct pt_regs *); - -extern int cpu_architecture(void); -extern void cpu_init(void); - -void arm_machine_restart(char mode, const char *cmd); -extern void (*arm_pm_restart)(char str, const char *cmd); - -#define UDBG_UNDEFINED	(1 << 0) -#define UDBG_SYSCALL	(1 << 1) -#define UDBG_BADABORT	(1 << 2) -#define UDBG_SEGV	(1 << 3) -#define UDBG_BUS	(1 << 4) - -extern unsigned int user_debug; - -#if __LINUX_ARM_ARCH__ >= 4 -#define vectors_high()	(cr_alignment & CR_V) -#else -#define vectors_high()	(0) -#endif - -#if __LINUX_ARM_ARCH__ >= 7 -#define isb() __asm__ __volatile__ ("isb" : : : "memory") -#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") -#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") -#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ -				    : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ -				    : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ -				    : : "r" (0) : "memory") -#elif defined(CONFIG_CPU_FA526) -#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ -				    : : "r" (0) : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ -				    : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") -#else -#define isb() __asm__ __volatile__ ("" : : : "memory") -#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ -				    : : "r" (0) : "memory") -#define dmb() __asm__ __volatile__ ("" : : : "memory") -#endif - -#ifdef CONFIG_ARCH_HAS_BARRIERS -#include <mach/barriers.h> -#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) -#define mb()		do { dsb(); outer_sync(); } while (0) -#define rmb()		dmb() -#define wmb()		mb() -#else -#define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#define wmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) -#endif - -#ifndef CONFIG_SMP -#define smp_mb()	barrier() -#define smp_rmb()	barrier() -#define smp_wmb()	barrier() -#else -#define smp_mb()	dmb() -#define smp_rmb()	dmb() -#define smp_wmb()	dmb() -#endif - -#define read_barrier_depends()		do { } while(0) -#define smp_read_barrier_depends()	do { } while(0) - -#define set_mb(var, value)	do { var = value; smp_mb(); } while (0) -#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); - -extern unsigned long cr_no_alignment;	/* defined in entry-armv.S */ -extern unsigned long cr_alignment;	/* defined in entry-armv.S */ - -static inline unsigned int get_cr(void) -{ -	unsigned int val; -	asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc"); -	return val; -} - -static inline void set_cr(unsigned int val) -{ -	asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR" -	  : : "r" (val) : "cc"); -	isb(); -} - -#ifndef CONFIG_SMP -extern void adjust_cr(unsigned long mask, unsigned long set); -#endif - -#define CPACC_FULL(n)		(3 << (n * 2)) -#define CPACC_SVC(n)		(1 << (n * 2)) -#define CPACC_DISABLE(n)	(0 << (n * 2)) - -static inline unsigned int get_copro_access(void) -{ -	unsigned int val; -	asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" -	  : "=r" (val) : : "cc"); -	return val; -} - -static inline void set_copro_access(unsigned int val) -{ -	asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" -	  : : "r" (val) : "cc"); -	isb(); -} - -/* - * switch_mm() may do a full cache flush over the context switch, - * so enable interrupts over the context switch to avoid high - * latency. - */ -#define __ARCH_WANT_INTERRUPTS_ON_CTXSW - -/* - * switch_to(prev, next) should switch from task `prev' to `next' - * `prev' will never be the same as `next'.  schedule() itself - * contains the memory barrier to tell GCC not to cache `current'. - */ -extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); - -#define switch_to(prev,next,last)					\ -do {									\ -	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\ -} while (0) - -#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) -/* - * On the StrongARM, "swp" is terminally broken since it bypasses the - * cache totally.  This means that the cache becomes inconsistent, and, - * since we use normal loads/stores as well, this is really bad. - * Typically, this causes oopsen in filp_close, but could have other, - * more disasterous effects.  There are two work-arounds: - *  1. Disable interrupts and emulate the atomic swap - *  2. Clean the cache, perform atomic swap, flush the cache - * - * We choose (1) since its the "easiest" to achieve here and is not - * dependent on the processor type. - * - * NOTE that this solution won't work on an SMP system, so explcitly - * forbid it here. - */ -#define swp_is_buggy -#endif - -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -{ -	extern void __bad_xchg(volatile void *, int); -	unsigned long ret; -#ifdef swp_is_buggy -	unsigned long flags; -#endif -#if __LINUX_ARM_ARCH__ >= 6 -	unsigned int tmp; -#endif - -	smp_mb(); - -	switch (size) { -#if __LINUX_ARM_ARCH__ >= 6 -	case 1: -		asm volatile("@	__xchg1\n" -		"1:	ldrexb	%0, [%3]\n" -		"	strexb	%1, %2, [%3]\n" -		"	teq	%1, #0\n" -		"	bne	1b" -			: "=&r" (ret), "=&r" (tmp) -			: "r" (x), "r" (ptr) -			: "memory", "cc"); -		break; -	case 4: -		asm volatile("@	__xchg4\n" -		"1:	ldrex	%0, [%3]\n" -		"	strex	%1, %2, [%3]\n" -		"	teq	%1, #0\n" -		"	bne	1b" -			: "=&r" (ret), "=&r" (tmp) -			: "r" (x), "r" (ptr) -			: "memory", "cc"); -		break; -#elif defined(swp_is_buggy) -#ifdef CONFIG_SMP -#error SMP is not supported on this platform -#endif -	case 1: -		raw_local_irq_save(flags); -		ret = *(volatile unsigned char *)ptr; -		*(volatile unsigned char *)ptr = x; -		raw_local_irq_restore(flags); -		break; - -	case 4: -		raw_local_irq_save(flags); -		ret = *(volatile unsigned long *)ptr; -		*(volatile unsigned long *)ptr = x; -		raw_local_irq_restore(flags); -		break; -#else -	case 1: -		asm volatile("@	__xchg1\n" -		"	swpb	%0, %1, [%2]" -			: "=&r" (ret) -			: "r" (x), "r" (ptr) -			: "memory", "cc"); -		break; -	case 4: -		asm volatile("@	__xchg4\n" -		"	swp	%0, %1, [%2]" -			: "=&r" (ret) -			: "r" (x), "r" (ptr) -			: "memory", "cc"); -		break; -#endif -	default: -		__bad_xchg(ptr, size), ret = 0; -		break; -	} -	smp_mb(); - -	return ret; -} - -extern void disable_hlt(void); -extern void enable_hlt(void); - -void cpu_idle_wait(void); - -#include <asm-generic/cmpxchg-local.h> - -#if __LINUX_ARM_ARCH__ < 6 - -#ifdef CONFIG_SMP -#error "SMP is not supported on this platform" -#endif - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n)				  	       \ -	((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ -			(unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#ifndef CONFIG_SMP -#include <asm-generic/cmpxchg.h> -#endif - -#else	/* __LINUX_ARM_ARCH__ >= 6 */ - -extern void __bad_cmpxchg(volatile void *ptr, int size); - -/* - * cmpxchg only support 32-bits operands on ARMv6. - */ - -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, -				      unsigned long new, int size) -{ -	unsigned long oldval, res; - -	switch (size) { -#ifdef CONFIG_CPU_32v6K -	case 1: -		do { -			asm volatile("@ __cmpxchg1\n" -			"	ldrexb	%1, [%2]\n" -			"	mov	%0, #0\n" -			"	teq	%1, %3\n" -			"	strexbeq %0, %4, [%2]\n" -				: "=&r" (res), "=&r" (oldval) -				: "r" (ptr), "Ir" (old), "r" (new) -				: "memory", "cc"); -		} while (res); -		break; -	case 2: -		do { -			asm volatile("@ __cmpxchg1\n" -			"	ldrexh	%1, [%2]\n" -			"	mov	%0, #0\n" -			"	teq	%1, %3\n" -			"	strexheq %0, %4, [%2]\n" -				: "=&r" (res), "=&r" (oldval) -				: "r" (ptr), "Ir" (old), "r" (new) -				: "memory", "cc"); -		} while (res); -		break; -#endif /* CONFIG_CPU_32v6K */ -	case 4: -		do { -			asm volatile("@ __cmpxchg4\n" -			"	ldrex	%1, [%2]\n" -			"	mov	%0, #0\n" -			"	teq	%1, %3\n" -			"	strexeq %0, %4, [%2]\n" -				: "=&r" (res), "=&r" (oldval) -				: "r" (ptr), "Ir" (old), "r" (new) -				: "memory", "cc"); -		} while (res); -		break; -	default: -		__bad_cmpxchg(ptr, size); -		oldval = 0; -	} - -	return oldval; -} - -static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, -					 unsigned long new, int size) -{ -	unsigned long ret; - -	smp_mb(); -	ret = __cmpxchg(ptr, old, new, size); -	smp_mb(); - -	return ret; -} - -#define cmpxchg(ptr,o,n)						\ -	((__typeof__(*(ptr)))__cmpxchg_mb((ptr),			\ -					  (unsigned long)(o),		\ -					  (unsigned long)(n),		\ -					  sizeof(*(ptr)))) - -static inline unsigned long __cmpxchg_local(volatile void *ptr, -					    unsigned long old, -					    unsigned long new, int size) -{ -	unsigned long ret; - -	switch (size) { -#ifndef CONFIG_CPU_32v6K -	case 1: -	case 2: -		ret = __cmpxchg_local_generic(ptr, old, new, size); -		break; -#endif	/* !CONFIG_CPU_32v6K */ -	default: -		ret = __cmpxchg(ptr, old, new, size); -	} - -	return ret; -} - -#define cmpxchg_local(ptr,o,n)						\ -	((__typeof__(*(ptr)))__cmpxchg_local((ptr),			\ -				       (unsigned long)(o),		\ -				       (unsigned long)(n),		\ -				       sizeof(*(ptr)))) - -#ifdef CONFIG_CPU_32v6K - -/* - * Note : ARMv7-M (currently unsupported by Linux) does not support - * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should - * not be allowed to use __cmpxchg64. - */ -static inline unsigned long long __cmpxchg64(volatile void *ptr, -					     unsigned long long old, -					     unsigned long long new) -{ -	register unsigned long long oldval asm("r0"); -	register unsigned long long __old asm("r2") = old; -	register unsigned long long __new asm("r4") = new; -	unsigned long res; - -	do { -		asm volatile( -		"	@ __cmpxchg8\n" -		"	ldrexd	%1, %H1, [%2]\n" -		"	mov	%0, #0\n" -		"	teq	%1, %3\n" -		"	teqeq	%H1, %H3\n" -		"	strexdeq %0, %4, %H4, [%2]\n" -			: "=&r" (res), "=&r" (oldval) -			: "r" (ptr), "Ir" (__old), "r" (__new) -			: "memory", "cc"); -	} while (res); - -	return oldval; -} - -static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, -						unsigned long long old, -						unsigned long long new) -{ -	unsigned long long ret; - -	smp_mb(); -	ret = __cmpxchg64(ptr, old, new); -	smp_mb(); - -	return ret; -} - -#define cmpxchg64(ptr,o,n)						\ -	((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\ -					    (unsigned long long)(o),	\ -					    (unsigned long long)(n))) - -#define cmpxchg64_local(ptr,o,n)					\ -	((__typeof__(*(ptr)))__cmpxchg64((ptr),				\ -					 (unsigned long long)(o),	\ -					 (unsigned long long)(n))) - -#else	/* !CONFIG_CPU_32v6K */ - -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#endif	/* CONFIG_CPU_32v6K */ - -#endif	/* __LINUX_ARM_ARCH__ >= 6 */ - -#endif /* __ASSEMBLY__ */ - -#define arch_align_stack(x) (x) - -#endif /* __KERNEL__ */ - -#endif diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h new file mode 100644 index 00000000000..720ea0320a6 --- /dev/null +++ b/arch/arm/include/asm/system_info.h @@ -0,0 +1,28 @@ +#ifndef __ASM_ARM_SYSTEM_INFO_H +#define __ASM_ARM_SYSTEM_INFO_H + +#define CPU_ARCH_UNKNOWN	0 +#define CPU_ARCH_ARMv3		1 +#define CPU_ARCH_ARMv4		2 +#define CPU_ARCH_ARMv4T		3 +#define CPU_ARCH_ARMv5		4 +#define CPU_ARCH_ARMv5T		5 +#define CPU_ARCH_ARMv5TE	6 +#define CPU_ARCH_ARMv5TEJ	7 +#define CPU_ARCH_ARMv6		8 +#define CPU_ARCH_ARMv7		9 +#define CPU_ARCH_ARMv7M		10 + +#ifndef __ASSEMBLY__ + +/* information about the system we're running on */ +extern unsigned int system_rev; +extern unsigned int system_serial_low; +extern unsigned int system_serial_high; +extern unsigned int mem_fclk_21285; + +extern int __pure cpu_architecture(void); + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_ARM_SYSTEM_INFO_H */ diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h new file mode 100644 index 00000000000..a3d61ad984a --- /dev/null +++ b/arch/arm/include/asm/system_misc.h @@ -0,0 +1,27 @@ +#ifndef __ASM_ARM_SYSTEM_MISC_H +#define __ASM_ARM_SYSTEM_MISC_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <linux/linkage.h> +#include <linux/irqflags.h> +#include <linux/reboot.h> + +extern void cpu_init(void); + +void soft_restart(unsigned long); +extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); +extern void (*arm_pm_idle)(void); + +#define UDBG_UNDEFINED	(1 << 0) +#define UDBG_SYSCALL	(1 << 1) +#define UDBG_BADABORT	(1 << 2) +#define UDBG_SEGV	(1 << 3) +#define UDBG_BUS	(1 << 4) + +extern unsigned int user_debug; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_ARM_SYSTEM_MISC_H */ diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h index 5929ef5d927..8578d726ad7 100644 --- a/arch/arm/include/asm/tcm.h +++ b/arch/arm/include/asm/tcm.h @@ -27,5 +27,7 @@  void *tcm_alloc(size_t len);  void tcm_free(void *addr, size_t len); +bool tcm_dtcm_present(void); +bool tcm_itcm_present(void);  #endif diff --git a/arch/arm/include/asm/termbits.h b/arch/arm/include/asm/termbits.h deleted file mode 100644 index 704135d28d1..00000000000 --- a/arch/arm/include/asm/termbits.h +++ /dev/null @@ -1,198 +0,0 @@ -#ifndef __ASM_ARM_TERMBITS_H -#define __ASM_ARM_TERMBITS_H - -typedef unsigned char	cc_t; -typedef unsigned int	speed_t; -typedef unsigned int	tcflag_t; - -#define NCCS 19 -struct termios { -	tcflag_t c_iflag;		/* input mode flags */ -	tcflag_t c_oflag;		/* output mode flags */ -	tcflag_t c_cflag;		/* control mode flags */ -	tcflag_t c_lflag;		/* local mode flags */ -	cc_t c_line;			/* line discipline */ -	cc_t c_cc[NCCS];		/* control characters */ -}; - -struct termios2 { -	tcflag_t c_iflag;		/* input mode flags */ -	tcflag_t c_oflag;		/* output mode flags */ -	tcflag_t c_cflag;		/* control mode flags */ -	tcflag_t c_lflag;		/* local mode flags */ -	cc_t c_line;			/* line discipline */ -	cc_t c_cc[NCCS];		/* control characters */ -	speed_t c_ispeed;		/* input speed */ -	speed_t c_ospeed;		/* output speed */ -}; - -struct ktermios { -	tcflag_t c_iflag;		/* input mode flags */ -	tcflag_t c_oflag;		/* output mode flags */ -	tcflag_t c_cflag;		/* control mode flags */ -	tcflag_t c_lflag;		/* local mode flags */ -	cc_t c_line;			/* line discipline */ -	cc_t c_cc[NCCS];		/* control characters */ -	speed_t c_ispeed;		/* input speed */ -	speed_t c_ospeed;		/* output speed */ -}; - - -/* c_cc characters */ -#define VINTR 0 -#define VQUIT 1 -#define VERASE 2 -#define VKILL 3 -#define VEOF 4 -#define VTIME 5 -#define VMIN 6 -#define VSWTC 7 -#define VSTART 8 -#define VSTOP 9 -#define VSUSP 10 -#define VEOL 11 -#define VREPRINT 12 -#define VDISCARD 13 -#define VWERASE 14 -#define VLNEXT 15 -#define VEOL2 16 - -/* c_iflag bits */ -#define IGNBRK	0000001 -#define BRKINT	0000002 -#define IGNPAR	0000004 -#define PARMRK	0000010 -#define INPCK	0000020 -#define ISTRIP	0000040 -#define INLCR	0000100 -#define IGNCR	0000200 -#define ICRNL	0000400 -#define IUCLC	0001000 -#define IXON	0002000 -#define IXANY	0004000 -#define IXOFF	0010000 -#define IMAXBEL	0020000 -#define IUTF8	0040000 - -/* c_oflag bits */ -#define OPOST	0000001 -#define OLCUC	0000002 -#define ONLCR	0000004 -#define OCRNL	0000010 -#define ONOCR	0000020 -#define ONLRET	0000040 -#define OFILL	0000100 -#define OFDEL	0000200 -#define NLDLY	0000400 -#define   NL0	0000000 -#define   NL1	0000400 -#define CRDLY	0003000 -#define   CR0	0000000 -#define   CR1	0001000 -#define   CR2	0002000 -#define   CR3	0003000 -#define TABDLY	0014000 -#define   TAB0	0000000 -#define   TAB1	0004000 -#define   TAB2	0010000 -#define   TAB3	0014000 -#define   XTABS	0014000 -#define BSDLY	0020000 -#define   BS0	0000000 -#define   BS1	0020000 -#define VTDLY	0040000 -#define   VT0	0000000 -#define   VT1	0040000 -#define FFDLY	0100000 -#define   FF0	0000000 -#define   FF1	0100000 - -/* c_cflag bit meaning */ -#define CBAUD	0010017 -#define  B0	0000000		/* hang up */ -#define  B50	0000001 -#define  B75	0000002 -#define  B110	0000003 -#define  B134	0000004 -#define  B150	0000005 -#define  B200	0000006 -#define  B300	0000007 -#define  B600	0000010 -#define  B1200	0000011 -#define  B1800	0000012 -#define  B2400	0000013 -#define  B4800	0000014 -#define  B9600	0000015 -#define  B19200	0000016 -#define  B38400	0000017 -#define EXTA B19200 -#define EXTB B38400 -#define CSIZE	0000060 -#define   CS5	0000000 -#define   CS6	0000020 -#define   CS7	0000040 -#define   CS8	0000060 -#define CSTOPB	0000100 -#define CREAD	0000200 -#define PARENB	0000400 -#define PARODD	0001000 -#define HUPCL	0002000 -#define CLOCAL	0004000 -#define CBAUDEX 0010000 -#define    BOTHER 0010000 -#define    B57600 0010001 -#define   B115200 0010002 -#define   B230400 0010003 -#define   B460800 0010004 -#define   B500000 0010005 -#define   B576000 0010006 -#define   B921600 0010007 -#define  B1000000 0010010 -#define  B1152000 0010011 -#define  B1500000 0010012 -#define  B2000000 0010013 -#define  B2500000 0010014 -#define  B3000000 0010015 -#define  B3500000 0010016 -#define  B4000000 0010017 -#define CIBAUD	  002003600000		/* input baud rate */ -#define CMSPAR    010000000000		/* mark or space (stick) parity */ -#define CRTSCTS	  020000000000		/* flow control */ - -#define IBSHIFT	   16 - -/* c_lflag bits */ -#define ISIG	0000001 -#define ICANON	0000002 -#define XCASE	0000004 -#define ECHO	0000010 -#define ECHOE	0000020 -#define ECHOK	0000040 -#define ECHONL	0000100 -#define NOFLSH	0000200 -#define TOSTOP	0000400 -#define ECHOCTL	0001000 -#define ECHOPRT	0002000 -#define ECHOKE	0004000 -#define FLUSHO	0010000 -#define PENDIN	0040000 -#define IEXTEN	0100000 -#define EXTPROC	0200000 - -/* tcflow() and TCXONC use these */ -#define	TCOOFF		0 -#define	TCOON		1 -#define	TCIOFF		2 -#define	TCION		3 - -/* tcflush() and TCFLSH use these */ -#define	TCIFLUSH	0 -#define	TCOFLUSH	1 -#define	TCIOFLUSH	2 - -/* tcsetattr uses these */ -#define	TCSANOW		0 -#define	TCSADRAIN	1 -#define	TCSAFLUSH	2 - -#endif	/* __ASM_ARM_TERMBITS_H */ diff --git a/arch/arm/include/asm/termios.h b/arch/arm/include/asm/termios.h deleted file mode 100644 index 293e3f1bc3f..00000000000 --- a/arch/arm/include/asm/termios.h +++ /dev/null @@ -1,92 +0,0 @@ -#ifndef __ASM_ARM_TERMIOS_H -#define __ASM_ARM_TERMIOS_H - -#include <asm/termbits.h> -#include <asm/ioctls.h> - -struct winsize { -	unsigned short ws_row; -	unsigned short ws_col; -	unsigned short ws_xpixel; -	unsigned short ws_ypixel; -}; - -#define NCC 8 -struct termio { -	unsigned short c_iflag;		/* input mode flags */ -	unsigned short c_oflag;		/* output mode flags */ -	unsigned short c_cflag;		/* control mode flags */ -	unsigned short c_lflag;		/* local mode flags */ -	unsigned char c_line;		/* line discipline */ -	unsigned char c_cc[NCC];	/* control characters */ -}; - -#ifdef __KERNEL__ -/*	intr=^C		quit=^|		erase=del	kill=^U -	eof=^D		vtime=\0	vmin=\1		sxtc=\0 -	start=^Q	stop=^S		susp=^Z		eol=\0 -	reprint=^R	discard=^U	werase=^W	lnext=^V -	eol2=\0 -*/ -#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" -#endif - -/* modem lines */ -#define TIOCM_LE	0x001 -#define TIOCM_DTR	0x002 -#define TIOCM_RTS	0x004 -#define TIOCM_ST	0x008 -#define TIOCM_SR	0x010 -#define TIOCM_CTS	0x020 -#define TIOCM_CAR	0x040 -#define TIOCM_RNG	0x080 -#define TIOCM_DSR	0x100 -#define TIOCM_CD	TIOCM_CAR -#define TIOCM_RI	TIOCM_RNG -#define TIOCM_OUT1	0x2000 -#define TIOCM_OUT2	0x4000 -#define TIOCM_LOOP	0x8000 - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ - -#ifdef __KERNEL__ - -/* - * Translate a "termio" structure into a "termios". Ugh. - */ -#define SET_LOW_TERMIOS_BITS(termios, termio, x) {		\ -	unsigned short __tmp;					\ -	get_user(__tmp,&(termio)->x);				\ -	*(unsigned short *) &(termios)->x = __tmp;		\ -} - -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ -	SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ -	SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ -	SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ -	SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ -	copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) - -/* - * Translate a "termios" structure into a "termio". Ugh. - */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ -	put_user((termios)->c_iflag, &(termio)->c_iflag); \ -	put_user((termios)->c_oflag, &(termio)->c_oflag); \ -	put_user((termios)->c_cflag, &(termio)->c_cflag); \ -	put_user((termios)->c_lflag, &(termio)->c_lflag); \ -	put_user((termios)->c_line,  &(termio)->c_line); \ -	copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) -#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) - -#endif	/* __KERNEL__ */ - -#endif	/* __ASM_ARM_TERMIOS_H */ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 7b5cc8dae06..e4e4208a913 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -43,6 +43,16 @@ struct cpu_context_save {  	__u32	extra[2];		/* Xscale 'acc' register, etc */  }; +struct arm_restart_block { +	union { +		/* For user cache flushing */ +		struct { +			unsigned long start; +			unsigned long end; +		} cache; +	}; +}; +  /*   * low level task data that entry.S needs immediate access to.   * __switch_to() assumes cpu_context follows immediately after cpu_domain. @@ -58,14 +68,17 @@ struct thread_info {  	struct cpu_context_save	cpu_context;	/* cpu context */  	__u32			syscall;	/* syscall number */  	__u8			used_cp[16];	/* thread used copro */ -	unsigned long		tp_value; +	unsigned long		tp_value[2];	/* TLS registers */ +#ifdef CONFIG_CRUNCH  	struct crunch_state	crunchstate; +#endif  	union fp_state		fpstate __attribute__((aligned(8)));  	union vfp_state		vfpstate;  #ifdef CONFIG_ARM_THUMBEE  	unsigned long		thumbee_state;	/* ThumbEE Handler Base register */  #endif  	struct restart_block	restart_block; +	struct arm_restart_block	arm_restart_block;  };  #define INIT_THREAD_INFO(tsk)						\ @@ -101,8 +114,14 @@ static inline struct thread_info *current_thread_info(void)  	((unsigned long)(task_thread_info(tsk)->cpu_context.pc))  #define thread_saved_sp(tsk)	\  	((unsigned long)(task_thread_info(tsk)->cpu_context.sp)) + +#ifndef CONFIG_THUMB2_KERNEL  #define thread_saved_fp(tsk)	\  	((unsigned long)(task_thread_info(tsk)->cpu_context.fp)) +#else +#define thread_saved_fp(tsk)	\ +	((unsigned long)(task_thread_info(tsk)->cpu_context.r7)) +#endif  extern void crunch_task_disable(struct thread_info *);  extern void crunch_task_copy(struct thread_info *, void *); @@ -118,17 +137,19 @@ extern void iwmmxt_task_switch(struct thread_info *);  extern void vfp_sync_hwstate(struct thread_info *);  extern void vfp_flush_hwstate(struct thread_info *); -#endif +struct user_vfp; +struct user_vfp_exc; -/* - * We use bit 30 of the preempt_count to indicate that kernel - * preemption is occurring.  See <asm/hardirq.h>. - */ -#define PREEMPT_ACTIVE	0x40000000 +extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, +					   struct user_vfp_exc __user *); +extern int vfp_restore_user_hwstate(struct user_vfp __user *, +				    struct user_vfp_exc __user *); +#endif  /*   * thread information flags:   *  TIF_SYSCALL_TRACE	- syscall trace active + *  TIF_SYSCAL_AUDIT	- syscall auditing active   *  TIF_SIGPENDING	- signal pending   *  TIF_NEED_RESCHED	- rescheduling necessary   *  TIF_NOTIFY_RESUME	- callback before returning to user @@ -138,28 +159,35 @@ extern void vfp_flush_hwstate(struct thread_info *);  #define TIF_SIGPENDING		0  #define TIF_NEED_RESCHED	1  #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */ +#define TIF_UPROBE		7  #define TIF_SYSCALL_TRACE	8 -#define TIF_POLLING_NRFLAG	16 +#define TIF_SYSCALL_AUDIT	9 +#define TIF_SYSCALL_TRACEPOINT	10 +#define TIF_SECCOMP		11	/* seccomp syscall filtering active */ +#define TIF_NOHZ		12	/* in adaptive nohz mode */  #define TIF_USING_IWMMXT	17  #define TIF_MEMDIE		18	/* is terminating due to OOM killer */ -#define TIF_FREEZE		19  #define TIF_RESTORE_SIGMASK	20 -#define TIF_SECCOMP		21  #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)  #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)  #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME) +#define _TIF_UPROBE		(1 << TIF_UPROBE)  #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE) -#define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG) -#define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT) -#define _TIF_FREEZE		(1 << TIF_FREEZE) -#define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK) +#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)  #define _TIF_SECCOMP		(1 << TIF_SECCOMP) +#define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT) + +/* Checks for any syscall work in entry-common.S */ +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ +			   _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)  /*   * Change these and you break ASM code in entry-common.S   */ -#define _TIF_WORK_MASK		0x000000ff +#define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +				 _TIF_NOTIFY_RESUME | _TIF_UPROBE)  #endif /* __KERNEL__ */  #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h index c4391ba2035..1dc98067589 100644 --- a/arch/arm/include/asm/thread_notify.h +++ b/arch/arm/include/asm/thread_notify.h @@ -43,6 +43,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread)  #define THREAD_NOTIFY_FLUSH	0  #define THREAD_NOTIFY_EXIT	1  #define THREAD_NOTIFY_SWITCH	2 +#define THREAD_NOTIFY_COPY	3  #endif  #endif diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index 3be8de3adab..f6fcc67ef06 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h @@ -12,13 +12,7 @@  #ifndef _ASMARM_TIMEX_H  #define _ASMARM_TIMEX_H -#include <mach/timex.h> -  typedef unsigned long cycles_t; - -static inline cycles_t get_cycles (void) -{ -	return 0; -} +#define get_cycles()	({ cycles_t c; read_current_timer(&c) ? 0 : c; })  #endif diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f41a6f57cd1..f1a0dace3ef 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -18,16 +18,22 @@  #define __ASMARM_TLB_H  #include <asm/cacheflush.h> -#include <asm/tlbflush.h>  #ifndef CONFIG_MMU  #include <linux/pagemap.h> + +#define tlb_flush(tlb)	((void) tlb) +  #include <asm-generic/tlb.h>  #else /* !CONFIG_MMU */ +#include <linux/swap.h>  #include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +#define MMU_GATHER_BUNDLE	8  /*   * TLB handling.  This allows us to remove pages from the page @@ -36,33 +42,105 @@  struct mmu_gather {  	struct mm_struct	*mm;  	unsigned int		fullmm; +	struct vm_area_struct	*vma; +	unsigned long		start, end;  	unsigned long		range_start;  	unsigned long		range_end; +	unsigned int		nr; +	unsigned int		max; +	struct page		**pages; +	struct page		*local[MMU_GATHER_BUNDLE];  };  DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); -static inline struct mmu_gather * -tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) +/* + * This is unnecessarily complex.  There's three ways the TLB shootdown + * code is used: + *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region(). + *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. + *     tlb->vma will be non-NULL. + *  2. Unmapping all vmas.  See exit_mmap(). + *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. + *     tlb->vma will be non-NULL.  Additionally, page tables will be freed. + *  3. Unmapping argument pages.  See shift_arg_pages(). + *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. + *     tlb->vma will be NULL. + */ +static inline void tlb_flush(struct mmu_gather *tlb)  { -	struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); +	if (tlb->fullmm || !tlb->vma) +		flush_tlb_mm(tlb->mm); +	else if (tlb->range_end > 0) { +		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); +		tlb->range_start = TASK_SIZE; +		tlb->range_end = 0; +	} +} -	tlb->mm = mm; -	tlb->fullmm = full_mm_flush; +static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) +{ +	if (!tlb->fullmm) { +		if (addr < tlb->range_start) +			tlb->range_start = addr; +		if (addr + PAGE_SIZE > tlb->range_end) +			tlb->range_end = addr + PAGE_SIZE; +	} +} + +static inline void __tlb_alloc_page(struct mmu_gather *tlb) +{ +	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); + +	if (addr) { +		tlb->pages = (void *)addr; +		tlb->max = PAGE_SIZE / sizeof(struct page *); +	} +} + +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) +{ +	tlb_flush(tlb); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ +	free_pages_and_swap_cache(tlb->pages, tlb->nr); +	tlb->nr = 0; +	if (tlb->pages == tlb->local) +		__tlb_alloc_page(tlb); +} -	return tlb; +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ +	tlb_flush_mmu_tlbonly(tlb); +	tlb_flush_mmu_free(tlb); +} + +static inline void +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) +{ +	tlb->mm = mm; +	tlb->fullmm = !(start | (end+1)); +	tlb->start = start; +	tlb->end = end; +	tlb->vma = NULL; +	tlb->max = ARRAY_SIZE(tlb->local); +	tlb->pages = tlb->local; +	tlb->nr = 0; +	__tlb_alloc_page(tlb);  }  static inline void  tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)  { -	if (tlb->fullmm) -		flush_tlb_mm(tlb->mm); +	tlb_flush_mmu(tlb);  	/* keep the page table cache within bounds */  	check_pgt_cache(); -	put_cpu_var(mmu_gathers); +	if (tlb->pages != tlb->local) +		free_pages((unsigned long)tlb->pages, 0);  }  /* @@ -71,12 +149,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)  static inline void  tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)  { -	if (!tlb->fullmm) { -		if (addr < tlb->range_start) -			tlb->range_start = addr; -		if (addr + PAGE_SIZE > tlb->range_end) -			tlb->range_end = addr + PAGE_SIZE; -	} +	tlb_add_flush(tlb, addr);  }  /* @@ -89,6 +162,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)  {  	if (!tlb->fullmm) {  		flush_cache_range(vma, vma->vm_start, vma->vm_end); +		tlb->vma = vma;  		tlb->range_start = TASK_SIZE;  		tlb->range_end = 0;  	} @@ -97,13 +171,61 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)  static inline void  tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)  { -	if (!tlb->fullmm && tlb->range_end > 0) -		flush_tlb_range(vma, tlb->range_start, tlb->range_end); +	if (!tlb->fullmm) +		tlb_flush(tlb); +} + +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ +	tlb->pages[tlb->nr++] = page; +	VM_BUG_ON(tlb->nr > tlb->max); +	return tlb->max - tlb->nr; +} + +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ +	if (!__tlb_remove_page(tlb, page)) +		tlb_flush_mmu(tlb); +} + +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, +	unsigned long addr) +{ +	pgtable_page_dtor(pte); + +#ifdef CONFIG_ARM_LPAE +	tlb_add_flush(tlb, addr); +#else +	/* +	 * With the classic ARM MMU, a pte page has two corresponding pmd +	 * entries, each covering 1MB. +	 */ +	addr &= PMD_MASK; +	tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); +	tlb_add_flush(tlb, addr + SZ_1M); +#endif + +	tlb_remove_page(tlb, pte); +} + +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, +				  unsigned long addr) +{ +#ifdef CONFIG_ARM_LPAE +	tlb_add_flush(tlb, addr); +	tlb_remove_page(tlb, virt_to_page(pmdp)); +#endif +} + +static inline void +tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) +{ +	tlb_add_flush(tlb, addr);  } -#define tlb_remove_page(tlb,page)	free_page_and_swap_cache(page) -#define pte_free_tlb(tlb, ptep, addr)	pte_free((tlb)->mm, ptep) -#define pmd_free_tlb(tlb, pmdp, addr)	pmd_free((tlb)->mm, pmdp) +#define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr) +#define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr) +#define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)  #define tlb_migrate_finish(mm)		do { } while (0) diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index ce7378ea15a..def9e570199 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -10,16 +10,10 @@  #ifndef _ASMARM_TLBFLUSH_H  #define _ASMARM_TLBFLUSH_H - -#ifndef CONFIG_MMU - -#define tlb_flush(tlb)	((void) tlb) - -#else /* CONFIG_MMU */ +#ifdef CONFIG_MMU  #include <asm/glue.h> -#define TLB_V3_PAGE	(1 << 0)  #define TLB_V4_U_PAGE	(1 << 1)  #define TLB_V4_D_PAGE	(1 << 2)  #define TLB_V4_I_PAGE	(1 << 3) @@ -27,7 +21,6 @@  #define TLB_V6_D_PAGE	(1 << 5)  #define TLB_V6_I_PAGE	(1 << 6) -#define TLB_V3_FULL	(1 << 8)  #define TLB_V4_U_FULL	(1 << 9)  #define TLB_V4_D_FULL	(1 << 10)  #define TLB_V4_I_FULL	(1 << 11) @@ -39,16 +32,15 @@  #define TLB_V6_D_ASID	(1 << 17)  #define TLB_V6_I_ASID	(1 << 18) -#define TLB_BTB		(1 << 28) +#define TLB_V6_BP	(1 << 19)  /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ -#define TLB_V7_UIS_PAGE	(1 << 19) -#define TLB_V7_UIS_FULL (1 << 20) -#define TLB_V7_UIS_ASID (1 << 21) - -/* Inner Shareable BTB operation (ARMv7 MP extensions) */ -#define TLB_V7_IS_BTB	(1 << 22) +#define TLB_V7_UIS_PAGE	(1 << 20) +#define TLB_V7_UIS_FULL (1 << 21) +#define TLB_V7_UIS_ASID (1 << 22) +#define TLB_V7_UIS_BP	(1 << 23) +#define TLB_BARRIER	(1 << 28)  #define TLB_L2CLEAN_FR	(1 << 29)		/* Feroceon */  #define TLB_DCLEAN	(1 << 30)  #define TLB_WB		(1 << 31) @@ -58,12 +50,11 @@   *	=============   *   *	We have the following to choose from: - *	  v3    - ARMv3   *	  v4    - ARMv4 without write buffer   *	  v4wb  - ARMv4 with write buffer without I TLB flush entry instruction   *	  v4wbi - ARMv4 with write buffer with I TLB flush entry instruction   *	  fr    - Feroceon (v4wbi with non-outer-cacheable page table walks) - *	  fa    - Faraday (v4 with write buffer with UTLB and branch target buffer (BTB)) + *	  fa    - Faraday (v4 with write buffer with UTLB)   *	  v6wbi - ARMv6 with write buffer with I TLB flush entry instruction   *	  v7wbi - identical to v6wbi   */ @@ -74,21 +65,6 @@  #define MULTI_TLB 1  #endif -#define v3_tlb_flags	(TLB_V3_FULL | TLB_V3_PAGE) - -#ifdef CONFIG_CPU_TLB_V3 -# define v3_possible_flags	v3_tlb_flags -# define v3_always_flags	v3_tlb_flags -# ifdef _TLB -#  define MULTI_TLB 1 -# else -#  define _TLB v3 -# endif -#else -# define v3_possible_flags	0 -# define v3_always_flags	(-1UL) -#endif -  #define v4_tlb_flags	(TLB_V4_U_FULL | TLB_V4_U_PAGE)  #ifdef CONFIG_CPU_TLB_V4WT @@ -104,7 +80,7 @@  # define v4_always_flags	(-1UL)  #endif -#define fa_tlb_flags	(TLB_WB | TLB_BTB | TLB_DCLEAN | \ +#define fa_tlb_flags	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \  			 TLB_V4_U_FULL | TLB_V4_U_PAGE)  #ifdef CONFIG_CPU_TLB_FA @@ -171,10 +147,11 @@  # define v4wb_always_flags	(-1UL)  #endif -#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ +#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \  			 TLB_V6_I_FULL | TLB_V6_D_FULL | \  			 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ -			 TLB_V6_I_ASID | TLB_V6_D_ASID) +			 TLB_V6_I_ASID | TLB_V6_D_ASID | \ +			 TLB_V6_BP)  #ifdef CONFIG_CPU_TLB_V6  # define v6wbi_possible_flags	v6wbi_tlb_flags @@ -189,10 +166,12 @@  # define v6wbi_always_flags	(-1UL)  #endif -#define v7wbi_tlb_flags_smp	(TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ -			 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) -#define v7wbi_tlb_flags_up	(TLB_WB | TLB_DCLEAN | TLB_BTB | \ -			 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) +#define v7wbi_tlb_flags_smp	(TLB_WB | TLB_BARRIER | \ +				 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ +				 TLB_V7_UIS_ASID | TLB_V7_UIS_BP) +#define v7wbi_tlb_flags_up	(TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ +				 TLB_V6_U_FULL | TLB_V6_U_PAGE | \ +				 TLB_V6_U_ASID | TLB_V6_BP)  #ifdef CONFIG_CPU_TLB_V7 @@ -307,8 +286,7 @@ extern struct cpu_tlb_fns cpu_tlb;   * implemented the "%?" method, but this has been discontinued due to too   * many people getting it wrong.   */ -#define possible_tlb_flags	(v3_possible_flags | \ -				 v4_possible_flags | \ +#define possible_tlb_flags	(v4_possible_flags | \  				 v4wbi_possible_flags | \  				 fr_possible_flags | \  				 v4wb_possible_flags | \ @@ -316,8 +294,7 @@ extern struct cpu_tlb_fns cpu_tlb;  				 v6wbi_possible_flags | \  				 v7wbi_possible_flags) -#define always_tlb_flags	(v3_always_flags & \ -				 v4_always_flags & \ +#define always_tlb_flags	(v4_always_flags & \  				 v4wbi_always_flags & \  				 fr_always_flags & \  				 v4wb_always_flags & \ @@ -327,181 +304,263 @@ extern struct cpu_tlb_fns cpu_tlb;  #define tlb_flag(f)	((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) +#define __tlb_op(f, insnarg, arg)					\ +	do {								\ +		if (always_tlb_flags & (f))				\ +			asm("mcr " insnarg				\ +			    : : "r" (arg) : "cc");			\ +		else if (possible_tlb_flags & (f))			\ +			asm("tst %1, %2\n\t"				\ +			    "mcrne " insnarg				\ +			    : : "r" (arg), "r" (__tlb_flag), "Ir" (f)	\ +			    : "cc");					\ +	} while (0) + +#define tlb_op(f, regs, arg)	__tlb_op(f, "p15, 0, %0, " regs, arg) +#define tlb_l2_op(f, regs, arg)	__tlb_op(f, "p15, 1, %0, " regs, arg) + +static inline void __local_flush_tlb_all(void) +{ +	const int zero = 0; +	const unsigned int __tlb_flag = __cpu_tlb_flags; + +	tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); +	tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); +	tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); +} +  static inline void local_flush_tlb_all(void)  {  	const int zero = 0;  	const unsigned int __tlb_flag = __cpu_tlb_flags;  	if (tlb_flag(TLB_WB)) -		dsb(); - -	if (tlb_flag(TLB_V3_FULL)) -		asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); -	if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) -		asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); -	if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) -		asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); -	if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) -		asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); -	if (tlb_flag(TLB_V7_UIS_FULL)) -		asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); +		dsb(nshst); -	if (tlb_flag(TLB_BTB)) { -		/* flush the branch target cache */ -		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); -		dsb(); +	__local_flush_tlb_all(); +	tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero); + +	if (tlb_flag(TLB_BARRIER)) { +		dsb(nsh);  		isb();  	} -	if (tlb_flag(TLB_V7_IS_BTB)) { -		/* flush the branch target cache */ -		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); -		dsb(); +} + +static inline void __flush_tlb_all(void) +{ +	const int zero = 0; +	const unsigned int __tlb_flag = __cpu_tlb_flags; + +	if (tlb_flag(TLB_WB)) +		dsb(ishst); + +	__local_flush_tlb_all(); +	tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); + +	if (tlb_flag(TLB_BARRIER)) { +		dsb(ish);  		isb();  	}  } -static inline void local_flush_tlb_mm(struct mm_struct *mm) +static inline void __local_flush_tlb_mm(struct mm_struct *mm)  {  	const int zero = 0;  	const int asid = ASID(mm);  	const unsigned int __tlb_flag = __cpu_tlb_flags; -	if (tlb_flag(TLB_WB)) -		dsb(); - -	if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { -		if (tlb_flag(TLB_V3_FULL)) -			asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); -		if (tlb_flag(TLB_V4_U_FULL)) -			asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); -		if (tlb_flag(TLB_V4_D_FULL)) -			asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); -		if (tlb_flag(TLB_V4_I_FULL)) -			asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); +	if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { +		if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { +			tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); +			tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); +			tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); +		}  	} -	put_cpu(); - -	if (tlb_flag(TLB_V6_U_ASID)) -		asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); -	if (tlb_flag(TLB_V6_D_ASID)) -		asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); -	if (tlb_flag(TLB_V6_I_ASID)) -		asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); -	if (tlb_flag(TLB_V7_UIS_ASID)) + +	tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); +	tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); +	tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); +} + +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ +	const int asid = ASID(mm); +	const unsigned int __tlb_flag = __cpu_tlb_flags; + +	if (tlb_flag(TLB_WB)) +		dsb(nshst); + +	__local_flush_tlb_mm(mm); +	tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid); + +	if (tlb_flag(TLB_BARRIER)) +		dsb(nsh); +} + +static inline void __flush_tlb_mm(struct mm_struct *mm) +{ +	const unsigned int __tlb_flag = __cpu_tlb_flags; + +	if (tlb_flag(TLB_WB)) +		dsb(ishst); + +	__local_flush_tlb_mm(mm);  #ifdef CONFIG_ARM_ERRATA_720789 -		asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); +	tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);  #else -		asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); +	tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));  #endif -	if (tlb_flag(TLB_BTB)) { -		/* flush the branch target cache */ -		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); -		dsb(); -	} -	if (tlb_flag(TLB_V7_IS_BTB)) { -		/* flush the branch target cache */ -		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); -		dsb(); -		isb(); -	} +	if (tlb_flag(TLB_BARRIER)) +		dsb(ish);  }  static inline void -local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)  {  	const int zero = 0;  	const unsigned int __tlb_flag = __cpu_tlb_flags;  	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); -	if (tlb_flag(TLB_WB)) -		dsb(); - -	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { -		if (tlb_flag(TLB_V3_PAGE)) -			asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); -		if (tlb_flag(TLB_V4_U_PAGE)) -			asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); -		if (tlb_flag(TLB_V4_D_PAGE)) -			asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); -		if (tlb_flag(TLB_V4_I_PAGE)) -			asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); +	if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && +	    cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { +		tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); +		tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); +		tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);  		if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))  			asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");  	} -	if (tlb_flag(TLB_V6_U_PAGE)) -		asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); -	if (tlb_flag(TLB_V6_D_PAGE)) -		asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); -	if (tlb_flag(TLB_V6_I_PAGE)) -		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); -	if (tlb_flag(TLB_V7_UIS_PAGE)) +	tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); +	tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); +	tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); +} + +static inline void +local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ +	const unsigned int __tlb_flag = __cpu_tlb_flags; + +	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); + +	if (tlb_flag(TLB_WB)) +		dsb(nshst); + +	__local_flush_tlb_page(vma, uaddr); +	tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr); + +	if (tlb_flag(TLB_BARRIER)) +		dsb(nsh); +} + +static inline void +__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ +	const unsigned int __tlb_flag = __cpu_tlb_flags; + +	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); + +	if (tlb_flag(TLB_WB)) +		dsb(ishst); + +	__local_flush_tlb_page(vma, uaddr);  #ifdef CONFIG_ARM_ERRATA_720789 -		asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc"); +	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);  #else -		asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); +	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);  #endif -	if (tlb_flag(TLB_BTB)) { -		/* flush the branch target cache */ -		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); -		dsb(); -	} -	if (tlb_flag(TLB_V7_IS_BTB)) { -		/* flush the branch target cache */ -		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); -		dsb(); -		isb(); -	} +	if (tlb_flag(TLB_BARRIER)) +		dsb(ish);  } -static inline void local_flush_tlb_kernel_page(unsigned long kaddr) +static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)  {  	const int zero = 0;  	const unsigned int __tlb_flag = __cpu_tlb_flags; +	tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); +	tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); +	tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); +	if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) +		asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); + +	tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); +	tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); +	tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); +} + +static inline void local_flush_tlb_kernel_page(unsigned long kaddr) +{ +	const unsigned int __tlb_flag = __cpu_tlb_flags; +  	kaddr &= PAGE_MASK;  	if (tlb_flag(TLB_WB)) -		dsb(); - -	if (tlb_flag(TLB_V3_PAGE)) -		asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); -	if (tlb_flag(TLB_V4_U_PAGE)) -		asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); -	if (tlb_flag(TLB_V4_D_PAGE)) -		asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); -	if (tlb_flag(TLB_V4_I_PAGE)) -		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); -	if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) -		asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); +		dsb(nshst); + +	__local_flush_tlb_kernel_page(kaddr); +	tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr); -	if (tlb_flag(TLB_V6_U_PAGE)) -		asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); -	if (tlb_flag(TLB_V6_D_PAGE)) -		asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); -	if (tlb_flag(TLB_V6_I_PAGE)) -		asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); -	if (tlb_flag(TLB_V7_UIS_PAGE)) -		asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); - -	if (tlb_flag(TLB_BTB)) { -		/* flush the branch target cache */ -		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); -		dsb(); +	if (tlb_flag(TLB_BARRIER)) { +		dsb(nsh);  		isb();  	} -	if (tlb_flag(TLB_V7_IS_BTB)) { -		/* flush the branch target cache */ -		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); -		dsb(); +} + +static inline void __flush_tlb_kernel_page(unsigned long kaddr) +{ +	const unsigned int __tlb_flag = __cpu_tlb_flags; + +	kaddr &= PAGE_MASK; + +	if (tlb_flag(TLB_WB)) +		dsb(ishst); + +	__local_flush_tlb_kernel_page(kaddr); +	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); + +	if (tlb_flag(TLB_BARRIER)) { +		dsb(ish);  		isb();  	}  }  /* + * Branch predictor maintenance is paired with full TLB invalidation, so + * there is no need for any barriers here. + */ +static inline void __local_flush_bp_all(void) +{ +	const int zero = 0; +	const unsigned int __tlb_flag = __cpu_tlb_flags; + +	if (tlb_flag(TLB_V6_BP)) +		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); +} + +static inline void local_flush_bp_all(void) +{ +	const int zero = 0; +	const unsigned int __tlb_flag = __cpu_tlb_flags; + +	__local_flush_bp_all(); +	if (tlb_flag(TLB_V7_UIS_BP)) +		asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); +} + +static inline void __flush_bp_all(void) +{ +	const int zero = 0; +	const unsigned int __tlb_flag = __cpu_tlb_flags; + +	__local_flush_bp_all(); +	if (tlb_flag(TLB_V7_UIS_BP)) +		asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); +} + +/*   *	flush_pmd_entry   *   *	Flush a PMD entry (word aligned, or double-word aligned) to @@ -514,35 +573,26 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)   *	these operations.  This is typically used when we are removing   *	PMD entries.   */ -static inline void flush_pmd_entry(pmd_t *pmd) +static inline void flush_pmd_entry(void *pmd)  {  	const unsigned int __tlb_flag = __cpu_tlb_flags; -	if (tlb_flag(TLB_DCLEAN)) -		asm("mcr	p15, 0, %0, c7, c10, 1	@ flush_pmd" -			: : "r" (pmd) : "cc"); - -	if (tlb_flag(TLB_L2CLEAN_FR)) -		asm("mcr	p15, 1, %0, c15, c9, 1  @ L2 flush_pmd" -			: : "r" (pmd) : "cc"); +	tlb_op(TLB_DCLEAN, "c7, c10, 1	@ flush_pmd", pmd); +	tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);  	if (tlb_flag(TLB_WB)) -		dsb(); +		dsb(ishst);  } -static inline void clean_pmd_entry(pmd_t *pmd) +static inline void clean_pmd_entry(void *pmd)  {  	const unsigned int __tlb_flag = __cpu_tlb_flags; -	if (tlb_flag(TLB_DCLEAN)) -		asm("mcr	p15, 0, %0, c7, c10, 1	@ flush_pmd" -			: : "r" (pmd) : "cc"); - -	if (tlb_flag(TLB_L2CLEAN_FR)) -		asm("mcr	p15, 1, %0, c15, c9, 1  @ L2 flush_pmd" -			: : "r" (pmd) : "cc"); +	tlb_op(TLB_DCLEAN, "c7, c10, 1	@ flush_pmd", pmd); +	tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);  } +#undef tlb_op  #undef tlb_flag  #undef always_tlb_flags  #undef possible_tlb_flags @@ -560,6 +610,7 @@ static inline void clean_pmd_entry(pmd_t *pmd)  #define flush_tlb_kernel_page	local_flush_tlb_kernel_page  #define flush_tlb_range		local_flush_tlb_range  #define flush_tlb_kernel_range	local_flush_tlb_kernel_range +#define flush_bp_all		local_flush_bp_all  #else  extern void flush_tlb_all(void);  extern void flush_tlb_mm(struct mm_struct *mm); @@ -567,6 +618,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);  extern void flush_tlb_kernel_page(unsigned long kaddr);  extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);  extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_bp_all(void);  #endif  /* @@ -585,8 +637,50 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,  }  #endif +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) + +#endif + +#elif defined(CONFIG_SMP)	/* !CONFIG_MMU */ + +#ifndef __ASSEMBLY__ + +#include <linux/mm_types.h> + +static inline void local_flush_tlb_all(void)									{ } +static inline void local_flush_tlb_mm(struct mm_struct *mm)							{ } +static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)			{ } +static inline void local_flush_tlb_kernel_page(unsigned long kaddr)						{ } +static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)	{ } +static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)				{ } +static inline void local_flush_bp_all(void)									{ } + +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); +extern void flush_tlb_kernel_page(unsigned long kaddr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void flush_bp_all(void); +#endif	/* __ASSEMBLY__ */ + +#endif + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_ARM_ERRATA_798181 +extern void erratum_a15_798181_init(void); +#else +static inline void erratum_a15_798181_init(void) {}  #endif +extern bool (*erratum_a15_798181_handler)(void); -#endif /* CONFIG_MMU */ +static inline bool erratum_a15_798181(void) +{ +	if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) && +		erratum_a15_798181_handler)) +		return erratum_a15_798181_handler(); +	return false; +} +#endif  #endif diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index e71d6ff8d10..83259b87333 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -2,23 +2,30 @@  #define __ASMARM_TLS_H  #ifdef __ASSEMBLY__ -	.macro set_tls_none, tp, tmp1, tmp2 +#include <asm/asm-offsets.h> +	.macro switch_tls_none, base, tp, tpuser, tmp1, tmp2  	.endm -	.macro set_tls_v6k, tp, tmp1, tmp2 +	.macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2 +	mrc	p15, 0, \tmp2, c13, c0, 2	@ get the user r/w register  	mcr	p15, 0, \tp, c13, c0, 3		@ set TLS register +	mcr	p15, 0, \tpuser, c13, c0, 2	@ and the user r/w register +	str	\tmp2, [\base, #TI_TP_VALUE + 4] @ save it  	.endm -	.macro set_tls_v6, tp, tmp1, tmp2 +	.macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2  	ldr	\tmp1, =elf_hwcap  	ldr	\tmp1, [\tmp1, #0]  	mov	\tmp2, #0xffff0fff  	tst	\tmp1, #HWCAP_TLS		@ hardware TLS available? -	mcrne	p15, 0, \tp, c13, c0, 3		@ yes, set TLS register  	streq	\tp, [\tmp2, #-15]		@ set TLS value at 0xffff0ff0 +	mrcne	p15, 0, \tmp2, c13, c0, 2	@ get the user r/w register +	mcrne	p15, 0, \tp, c13, c0, 3		@ yes, set TLS register +	mcrne	p15, 0, \tpuser, c13, c0, 2	@ set user r/w register +	strne	\tmp2, [\base, #TI_TP_VALUE + 4] @ save it  	.endm -	.macro set_tls_software, tp, tmp1, tmp2 +	.macro switch_tls_software, base, tp, tpuser, tmp1, tmp2  	mov	\tmp1, #0xffff0fff  	str	\tp, [\tmp1, #-15]		@ set TLS value at 0xffff0ff0  	.endm @@ -27,20 +34,30 @@  #ifdef CONFIG_TLS_REG_EMUL  #define tls_emu		1  #define has_tls_reg		1 -#define set_tls		set_tls_none -#elif __LINUX_ARM_ARCH__ >= 7 ||					\ -	(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) -#define tls_emu		0 -#define has_tls_reg		1 -#define set_tls		set_tls_v6k -#elif __LINUX_ARM_ARCH__ == 6 +#define switch_tls	switch_tls_none +#elif defined(CONFIG_CPU_V6)  #define tls_emu		0  #define has_tls_reg		(elf_hwcap & HWCAP_TLS) -#define set_tls		set_tls_v6 +#define switch_tls	switch_tls_v6 +#elif defined(CONFIG_CPU_32v6K) +#define tls_emu		0 +#define has_tls_reg		1 +#define switch_tls	switch_tls_v6k  #else  #define tls_emu		0  #define has_tls_reg		0 -#define set_tls		set_tls_software +#define switch_tls	switch_tls_software  #endif +#ifndef __ASSEMBLY__ +static inline unsigned long get_tpuser(void) +{ +	unsigned long reg = 0; + +	if (has_tls_reg && !tls_emu) +		__asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg)); + +	return reg; +} +#endif  #endif	/* __ASMARM_TLS_H */ diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index accbd7cad9b..2fe85fff5cc 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -1,6 +1,36 @@  #ifndef _ASM_ARM_TOPOLOGY_H  #define _ASM_ARM_TOPOLOGY_H +#ifdef CONFIG_ARM_CPU_TOPOLOGY + +#include <linux/cpumask.h> + +struct cputopo_arm { +	int thread_id; +	int core_id; +	int socket_id; +	cpumask_t thread_sibling; +	cpumask_t core_sibling; +}; + +extern struct cputopo_arm cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu)	(cpu_topology[cpu].socket_id) +#define topology_core_id(cpu)		(cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu)	(&cpu_topology[cpu].core_sibling) +#define topology_thread_cpumask(cpu)	(&cpu_topology[cpu].thread_sibling) + +void init_cpu_topology(void); +void store_cpu_topology(unsigned int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); + +#else + +static inline void init_cpu_topology(void) { } +static inline void store_cpu_topology(unsigned int cpuid) { } + +#endif +  #include <asm-generic/topology.h>  #endif /* _ASM_ARM_TOPOLOGY_H */ diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 491960bf426..f555bb3664d 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -3,6 +3,9 @@  #include <linux/list.h> +struct pt_regs; +struct task_struct; +  struct undef_hook {  	struct list_head node;  	u32 instr_mask; @@ -15,16 +18,38 @@ struct undef_hook {  void register_undef_hook(struct undef_hook *hook);  void unregister_undef_hook(struct undef_hook *hook); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static inline int __in_irqentry_text(unsigned long ptr) +{ +	extern char __irqentry_text_start[]; +	extern char __irqentry_text_end[]; + +	return ptr >= (unsigned long)&__irqentry_text_start && +	       ptr < (unsigned long)&__irqentry_text_end; +} +#else +static inline int __in_irqentry_text(unsigned long ptr) +{ +	return 0; +} +#endif +  static inline int in_exception_text(unsigned long ptr)  {  	extern char __exception_text_start[];  	extern char __exception_text_end[]; +	int in; -	return ptr >= (unsigned long)&__exception_text_start && -	       ptr < (unsigned long)&__exception_text_end; +	in = ptr >= (unsigned long)&__exception_text_start && +	     ptr < (unsigned long)&__exception_text_end; + +	return in ? : __in_irqentry_text(ptr);  } -extern void __init early_trap_init(void); +extern void __init early_trap_init(void *);  extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); +extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); + +extern void *vectors_page;  #endif diff --git a/arch/arm/include/asm/trusted_foundations.h b/arch/arm/include/asm/trusted_foundations.h new file mode 100644 index 00000000000..624e1d436c6 --- /dev/null +++ b/arch/arm/include/asm/trusted_foundations.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2013, NVIDIA Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + */ + +/* + * Support for the Trusted Foundations secure monitor. + * + * Trusted Foundation comes active on some ARM consumer devices (most + * Tegra-based devices sold on the market are concerned). Such devices can only + * perform some basic operations, like setting the CPU reset vector, through + * SMC calls to the secure monitor. The calls are completely specific to + * Trusted Foundations, and do *not* follow the SMC calling convention or the + * PSCI standard. + */ + +#ifndef __ASM_ARM_TRUSTED_FOUNDATIONS_H +#define __ASM_ARM_TRUSTED_FOUNDATIONS_H + +#include <linux/kconfig.h> +#include <linux/printk.h> +#include <linux/bug.h> +#include <linux/of.h> +#include <linux/cpu.h> +#include <linux/smp.h> + +struct trusted_foundations_platform_data { +	unsigned int version_major; +	unsigned int version_minor; +}; + +#if IS_ENABLED(CONFIG_TRUSTED_FOUNDATIONS) + +void register_trusted_foundations(struct trusted_foundations_platform_data *pd); +void of_register_trusted_foundations(void); + +#else /* CONFIG_TRUSTED_FOUNDATIONS */ + +static inline void register_trusted_foundations( +				   struct trusted_foundations_platform_data *pd) +{ +	/* +	 * If the system requires TF and we cannot provide it, continue booting +	 * but disable features that cannot be provided. +	 */ +	pr_err("No support for Trusted Foundations, continuing in degraded mode.\n"); +	pr_err("Secondary processors as well as CPU PM will be disabled.\n"); +#if IS_ENABLED(CONFIG_SMP) +	setup_max_cpus = 0; +#endif +	cpu_idle_poll_ctrl(true); +} + +static inline void of_register_trusted_foundations(void) +{ +	/* +	 * If we find the target should enable TF but does not support it, +	 * fail as the system won't be able to do much anyway +	 */ +	if (of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations")) +		register_trusted_foundations(NULL); +} +#endif /* CONFIG_TRUSTED_FOUNDATIONS */ + +#endif diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h index 345df01534a..a53cdb8f068 100644 --- a/arch/arm/include/asm/types.h +++ b/arch/arm/include/asm/types.h @@ -1,31 +1,40 @@ -#ifndef __ASM_ARM_TYPES_H -#define __ASM_ARM_TYPES_H +#ifndef _ASM_TYPES_H +#define _ASM_TYPES_H  #include <asm-generic/int-ll64.h> -#ifndef __ASSEMBLY__ - -typedef unsigned short umode_t; - -#endif /* __ASSEMBLY__ */ -  /* - * These aren't exported outside the kernel to avoid name space clashes + * The C99 types uintXX_t that are usually defined in 'stdint.h' are not as + * unambiguous on ARM as you would expect. For the types below, there is a + * difference on ARM between GCC built for bare metal ARM, GCC built for glibc + * and the kernel itself, which results in build errors if you try to build with + * -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h' + * in order to use NEON intrinsics) + * + * As the typedefs for these types in 'stdint.h' are based on builtin defines + * supplied by GCC, we can tweak these to align with the kernel's idea of those + * types, so 'linux/types.h' and 'stdint.h' can be safely included from the same + * source file (provided that -ffreestanding is used). + * + *                    int32_t         uint32_t               uintptr_t + * bare metal GCC     long            unsigned long          unsigned int + * glibc GCC          int             unsigned int           unsigned int + * kernel             int             unsigned int           unsigned long   */ -#ifdef __KERNEL__ - -#define BITS_PER_LONG 32 - -#ifndef __ASSEMBLY__ -/* Dma addresses are 32-bits wide.  */ - -typedef u32 dma_addr_t; -typedef u32 dma64_addr_t; - -#endif /* __ASSEMBLY__ */ +#ifdef __INT32_TYPE__ +#undef __INT32_TYPE__ +#define __INT32_TYPE__		int +#endif -#endif /* __KERNEL__ */ +#ifdef __UINT32_TYPE__ +#undef __UINT32_TYPE__ +#define __UINT32_TYPE__	unsigned int +#endif +#ifdef __UINTPTR_TYPE__ +#undef __UINTPTR_TYPE__ +#define __UINTPTR_TYPE__	unsigned long  #endif +#endif /* _ASM_TYPES_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 33e4a48fe10..75d95799b6e 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -16,8 +16,15 @@  #include <asm/errno.h>  #include <asm/memory.h>  #include <asm/domain.h> -#include <asm/system.h>  #include <asm/unified.h> +#include <asm/compiler.h> + +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#include <asm-generic/uaccess-unaligned.h> +#else +#define __get_user_unaligned __get_user +#define __put_user_unaligned __put_user +#endif  #define VERIFY_READ 0  #define VERIFY_WRITE 1 @@ -101,28 +108,39 @@ extern int __get_user_1(void *);  extern int __get_user_2(void *);  extern int __get_user_4(void *); -#define __get_user_x(__r2,__p,__e,__s,__i...)				\ +#define __GUP_CLOBBER_1	"lr", "cc" +#ifdef CONFIG_CPU_USE_DOMAINS +#define __GUP_CLOBBER_2	"ip", "lr", "cc" +#else +#define __GUP_CLOBBER_2 "lr", "cc" +#endif +#define __GUP_CLOBBER_4	"lr", "cc" + +#define __get_user_x(__r2,__p,__e,__l,__s)				\  	   __asm__ __volatile__ (					\  		__asmeq("%0", "r0") __asmeq("%1", "r2")			\ +		__asmeq("%3", "r1")					\  		"bl	__get_user_" #__s				\  		: "=&r" (__e), "=r" (__r2)				\ -		: "0" (__p)						\ -		: __i, "cc") +		: "0" (__p), "r" (__l)					\ +		: __GUP_CLOBBER_##__s) -#define get_user(x,p)							\ +#define __get_user_check(x,p)							\  	({								\ +		unsigned long __limit = current_thread_info()->addr_limit - 1; \  		register const typeof(*(p)) __user *__p asm("r0") = (p);\  		register unsigned long __r2 asm("r2");			\ +		register unsigned long __l asm("r1") = __limit;		\  		register int __e asm("r0");				\  		switch (sizeof(*(__p))) {				\  		case 1:							\ -			__get_user_x(__r2, __p, __e, 1, "lr");		\ -	       		break;						\ +			__get_user_x(__r2, __p, __e, __l, 1);		\ +			break;						\  		case 2:							\ -			__get_user_x(__r2, __p, __e, 2, "r3", "lr");	\ +			__get_user_x(__r2, __p, __e, __l, 2);		\  			break;						\  		case 4:							\ -	       		__get_user_x(__r2, __p, __e, 4, "lr");		\ +			__get_user_x(__r2, __p, __e, __l, 4);		\  			break;						\  		default: __e = __get_user_bad(); break;			\  		}							\ @@ -130,42 +148,58 @@ extern int __get_user_4(void *);  		__e;							\  	}) +#define get_user(x,p)							\ +	({								\ +		might_fault();						\ +		__get_user_check(x,p);					\ +	 }) +  extern int __put_user_1(void *, unsigned int);  extern int __put_user_2(void *, unsigned int);  extern int __put_user_4(void *, unsigned int);  extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2,__p,__e,__s)					\ +#define __put_user_x(__r2,__p,__e,__l,__s)				\  	   __asm__ __volatile__ (					\  		__asmeq("%0", "r0") __asmeq("%2", "r2")			\ +		__asmeq("%3", "r1")					\  		"bl	__put_user_" #__s				\  		: "=&r" (__e)						\ -		: "0" (__p), "r" (__r2)					\ +		: "0" (__p), "r" (__r2), "r" (__l)			\  		: "ip", "lr", "cc") -#define put_user(x,p)							\ +#define __put_user_check(x,p)							\  	({								\ +		unsigned long __limit = current_thread_info()->addr_limit - 1; \ +		const typeof(*(p)) __user *__tmp_p = (p);		\  		register const typeof(*(p)) __r2 asm("r2") = (x);	\ -		register const typeof(*(p)) __user *__p asm("r0") = (p);\ +		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ +		register unsigned long __l asm("r1") = __limit;		\  		register int __e asm("r0");				\  		switch (sizeof(*(__p))) {				\  		case 1:							\ -			__put_user_x(__r2, __p, __e, 1);		\ +			__put_user_x(__r2, __p, __e, __l, 1);		\  			break;						\  		case 2:							\ -			__put_user_x(__r2, __p, __e, 2);		\ +			__put_user_x(__r2, __p, __e, __l, 2);		\  			break;						\  		case 4:							\ -			__put_user_x(__r2, __p, __e, 4);		\ +			__put_user_x(__r2, __p, __e, __l, 4);		\  			break;						\  		case 8:							\ -			__put_user_x(__r2, __p, __e, 8);		\ +			__put_user_x(__r2, __p, __e, __l, 8);		\  			break;						\  		default: __e = __put_user_bad(); break;			\  		}							\  		__e;							\  	}) +#define put_user(x,p)							\ +	({								\ +		might_fault();						\ +		__put_user_check(x,p);					\ +	 }) +  #else /* CONFIG_MMU */  /* @@ -174,8 +208,8 @@ extern int __put_user_8(void *, unsigned long long);  #define USER_DS			KERNEL_DS  #define segment_eq(a,b)		(1) -#define __addr_ok(addr)		(1) -#define __range_ok(addr,size)	(0) +#define __addr_ok(addr)		((void)(addr),1) +#define __range_ok(addr,size)	((void)(addr),0)  #define get_fs()		(KERNEL_DS)  static inline void set_fs(mm_segment_t fs) @@ -189,6 +223,9 @@ static inline void set_fs(mm_segment_t fs)  #define access_ok(type,addr,size)	(__range_ok(addr,size) == 0) +#define user_addr_max() \ +	(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) +  /*   * The "__xxx" versions of the user access functions do not verify the   * address space - it must have been done previously with a separate @@ -216,6 +253,7 @@ do {									\  	unsigned long __gu_addr = (unsigned long)(ptr);			\  	unsigned long __gu_val;						\  	__chk_user_ptr(ptr);						\ +	might_fault();							\  	switch (sizeof(*(ptr))) {					\  	case 1:	__get_user_asm_byte(__gu_val,__gu_addr,err);	break;	\  	case 2:	__get_user_asm_half(__gu_val,__gu_addr,err);	break;	\ @@ -227,7 +265,7 @@ do {									\  #define __get_user_asm_byte(x,addr,err)				\  	__asm__ __volatile__(					\ -	"1:	ldrbt	%1,[%2]\n"				\ +	"1:	" TUSER(ldrb) "	%1,[%2],#0\n"			\  	"2:\n"							\  	"	.pushsection .fixup,\"ax\"\n"			\  	"	.align	2\n"					\ @@ -263,7 +301,7 @@ do {									\  #define __get_user_asm_word(x,addr,err)				\  	__asm__ __volatile__(					\ -	"1:	ldrt	%1,[%2]\n"				\ +	"1:	" TUSER(ldr) "	%1,[%2],#0\n"			\  	"2:\n"							\  	"	.pushsection .fixup,\"ax\"\n"			\  	"	.align	2\n"					\ @@ -297,6 +335,7 @@ do {									\  	unsigned long __pu_addr = (unsigned long)(ptr);			\  	__typeof__(*(ptr)) __pu_val = (x);				\  	__chk_user_ptr(ptr);						\ +	might_fault();							\  	switch (sizeof(*(ptr))) {					\  	case 1: __put_user_asm_byte(__pu_val,__pu_addr,err);	break;	\  	case 2: __put_user_asm_half(__pu_val,__pu_addr,err);	break;	\ @@ -308,7 +347,7 @@ do {									\  #define __put_user_asm_byte(x,__pu_addr,err)			\  	__asm__ __volatile__(					\ -	"1:	strbt	%1,[%2]\n"				\ +	"1:	" TUSER(strb) "	%1,[%2],#0\n"			\  	"2:\n"							\  	"	.pushsection .fixup,\"ax\"\n"			\  	"	.align	2\n"					\ @@ -341,7 +380,7 @@ do {									\  #define __put_user_asm_word(x,__pu_addr,err)			\  	__asm__ __volatile__(					\ -	"1:	strt	%1,[%2]\n"				\ +	"1:	" TUSER(str) "	%1,[%2],#0\n"			\  	"2:\n"							\  	"	.pushsection .fixup,\"ax\"\n"			\  	"	.align	2\n"					\ @@ -366,10 +405,10 @@ do {									\  #define __put_user_asm_dword(x,__pu_addr,err)			\  	__asm__ __volatile__(					\ - ARM(	"1:	strt	" __reg_oper1 ", [%1], #4\n"	)	\ - ARM(	"2:	strt	" __reg_oper0 ", [%1]\n"	)	\ - THUMB(	"1:	strt	" __reg_oper1 ", [%1]\n"	)	\ - THUMB(	"2:	strt	" __reg_oper0 ", [%1, #4]\n"	)	\ + ARM(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1], #4\n"	) \ + ARM(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1]\n"	) \ + THUMB(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1]\n"	) \ + THUMB(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1, #4]\n"	) \  	"3:\n"							\  	"	.pushsection .fixup,\"ax\"\n"			\  	"	.align	2\n"					\ @@ -398,9 +437,6 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l  #define __clear_user(addr,n)		(memset((void __force *)addr, 0, n), 0)  #endif -extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); -extern unsigned long __must_check __strnlen_user(const char __user *s, long n); -  static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)  {  	if (access_ok(VERIFY_READ, from, n)) @@ -427,24 +463,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo  	return n;  } -static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) -{ -	long res = -EFAULT; -	if (access_ok(VERIFY_READ, src, 1)) -		res = __strncpy_from_user(dst, src, count); -	return res; -} - -#define strlen_user(s)	strnlen_user(s, ~0UL >> 1) - -static inline long __must_check strnlen_user(const char __user *s, long n) -{ -	unsigned long res = 0; - -	if (__addr_ok(s)) -		res = __strnlen_user(s, n); +extern long strncpy_from_user(char *dest, const char __user *src, long count); -	return res; -} +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n);  #endif /* _ASMARM_UACCESS_H */ diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h index 47f023aa849..14749aec94b 100644 --- a/arch/arm/include/asm/ucontext.h +++ b/arch/arm/include/asm/ucontext.h @@ -47,7 +47,7 @@ struct crunch_sigframe {  #endif  #ifdef CONFIG_IWMMXT -/* iwmmxt_area is 0x98 bytes long, preceeded by 8 bytes of signature */ +/* iwmmxt_area is 0x98 bytes long, preceded by 8 bytes of signature */  #define IWMMXT_MAGIC		0x12ef842a  #define IWMMXT_STORAGE_SIZE	(IWMMXT_SIZE + 8) diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h deleted file mode 100644 index 44593a89490..00000000000 --- a/arch/arm/include/asm/unaligned.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _ASM_ARM_UNALIGNED_H -#define _ASM_ARM_UNALIGNED_H - -#include <linux/unaligned/le_byteshift.h> -#include <linux/unaligned/be_byteshift.h> -#include <linux/unaligned/generic.h> - -/* - * Select endianness - */ -#ifndef __ARMEB__ -#define get_unaligned	__get_unaligned_le -#define put_unaligned	__put_unaligned_le -#else -#define get_unaligned	__get_unaligned_be -#define put_unaligned	__put_unaligned_be -#endif - -#endif /* _ASM_ARM_UNALIGNED_H */ diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h index bc631161e9c..b88beaba6b4 100644 --- a/arch/arm/include/asm/unified.h +++ b/arch/arm/include/asm/unified.h @@ -37,8 +37,10 @@  #define THUMB(x...)	x  #ifdef __ASSEMBLY__  #define W(instr)	instr.w -#endif  #define BSYM(sym)	sym + 1 +#else +#define WASM(instr)	#instr ".w" +#endif  #else	/* !CONFIG_THUMB2_KERNEL */ @@ -49,8 +51,10 @@  #define THUMB(x...)  #ifdef __ASSEMBLY__  #define W(instr)	instr -#endif  #define BSYM(sym)	sym +#else +#define WASM(instr)	#instr +#endif  #endif	/* CONFIG_THUMB2_KERNEL */ diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index c891eb76c0e..43876245fc5 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -13,430 +13,11 @@  #ifndef __ASM_ARM_UNISTD_H  #define __ASM_ARM_UNISTD_H -#define __NR_OABI_SYSCALL_BASE	0x900000 +#include <uapi/asm/unistd.h> -#if defined(__thumb__) || defined(__ARM_EABI__) -#define __NR_SYSCALL_BASE	0 -#else -#define __NR_SYSCALL_BASE	__NR_OABI_SYSCALL_BASE -#endif - -/* - * This file contains the system call numbers. - */ - -#define __NR_restart_syscall		(__NR_SYSCALL_BASE+  0) -#define __NR_exit			(__NR_SYSCALL_BASE+  1) -#define __NR_fork			(__NR_SYSCALL_BASE+  2) -#define __NR_read			(__NR_SYSCALL_BASE+  3) -#define __NR_write			(__NR_SYSCALL_BASE+  4) -#define __NR_open			(__NR_SYSCALL_BASE+  5) -#define __NR_close			(__NR_SYSCALL_BASE+  6) -					/* 7 was sys_waitpid */ -#define __NR_creat			(__NR_SYSCALL_BASE+  8) -#define __NR_link			(__NR_SYSCALL_BASE+  9) -#define __NR_unlink			(__NR_SYSCALL_BASE+ 10) -#define __NR_execve			(__NR_SYSCALL_BASE+ 11) -#define __NR_chdir			(__NR_SYSCALL_BASE+ 12) -#define __NR_time			(__NR_SYSCALL_BASE+ 13) -#define __NR_mknod			(__NR_SYSCALL_BASE+ 14) -#define __NR_chmod			(__NR_SYSCALL_BASE+ 15) -#define __NR_lchown			(__NR_SYSCALL_BASE+ 16) -					/* 17 was sys_break */ -					/* 18 was sys_stat */ -#define __NR_lseek			(__NR_SYSCALL_BASE+ 19) -#define __NR_getpid			(__NR_SYSCALL_BASE+ 20) -#define __NR_mount			(__NR_SYSCALL_BASE+ 21) -#define __NR_umount			(__NR_SYSCALL_BASE+ 22) -#define __NR_setuid			(__NR_SYSCALL_BASE+ 23) -#define __NR_getuid			(__NR_SYSCALL_BASE+ 24) -#define __NR_stime			(__NR_SYSCALL_BASE+ 25) -#define __NR_ptrace			(__NR_SYSCALL_BASE+ 26) -#define __NR_alarm			(__NR_SYSCALL_BASE+ 27) -					/* 28 was sys_fstat */ -#define __NR_pause			(__NR_SYSCALL_BASE+ 29) -#define __NR_utime			(__NR_SYSCALL_BASE+ 30) -					/* 31 was sys_stty */ -					/* 32 was sys_gtty */ -#define __NR_access			(__NR_SYSCALL_BASE+ 33) -#define __NR_nice			(__NR_SYSCALL_BASE+ 34) -					/* 35 was sys_ftime */ -#define __NR_sync			(__NR_SYSCALL_BASE+ 36) -#define __NR_kill			(__NR_SYSCALL_BASE+ 37) -#define __NR_rename			(__NR_SYSCALL_BASE+ 38) -#define __NR_mkdir			(__NR_SYSCALL_BASE+ 39) -#define __NR_rmdir			(__NR_SYSCALL_BASE+ 40) -#define __NR_dup			(__NR_SYSCALL_BASE+ 41) -#define __NR_pipe			(__NR_SYSCALL_BASE+ 42) -#define __NR_times			(__NR_SYSCALL_BASE+ 43) -					/* 44 was sys_prof */ -#define __NR_brk			(__NR_SYSCALL_BASE+ 45) -#define __NR_setgid			(__NR_SYSCALL_BASE+ 46) -#define __NR_getgid			(__NR_SYSCALL_BASE+ 47) -					/* 48 was sys_signal */ -#define __NR_geteuid			(__NR_SYSCALL_BASE+ 49) -#define __NR_getegid			(__NR_SYSCALL_BASE+ 50) -#define __NR_acct			(__NR_SYSCALL_BASE+ 51) -#define __NR_umount2			(__NR_SYSCALL_BASE+ 52) -					/* 53 was sys_lock */ -#define __NR_ioctl			(__NR_SYSCALL_BASE+ 54) -#define __NR_fcntl			(__NR_SYSCALL_BASE+ 55) -					/* 56 was sys_mpx */ -#define __NR_setpgid			(__NR_SYSCALL_BASE+ 57) -					/* 58 was sys_ulimit */ -					/* 59 was sys_olduname */ -#define __NR_umask			(__NR_SYSCALL_BASE+ 60) -#define __NR_chroot			(__NR_SYSCALL_BASE+ 61) -#define __NR_ustat			(__NR_SYSCALL_BASE+ 62) -#define __NR_dup2			(__NR_SYSCALL_BASE+ 63) -#define __NR_getppid			(__NR_SYSCALL_BASE+ 64) -#define __NR_getpgrp			(__NR_SYSCALL_BASE+ 65) -#define __NR_setsid			(__NR_SYSCALL_BASE+ 66) -#define __NR_sigaction			(__NR_SYSCALL_BASE+ 67) -					/* 68 was sys_sgetmask */ -					/* 69 was sys_ssetmask */ -#define __NR_setreuid			(__NR_SYSCALL_BASE+ 70) -#define __NR_setregid			(__NR_SYSCALL_BASE+ 71) -#define __NR_sigsuspend			(__NR_SYSCALL_BASE+ 72) -#define __NR_sigpending			(__NR_SYSCALL_BASE+ 73) -#define __NR_sethostname		(__NR_SYSCALL_BASE+ 74) -#define __NR_setrlimit			(__NR_SYSCALL_BASE+ 75) -#define __NR_getrlimit			(__NR_SYSCALL_BASE+ 76)	/* Back compat 2GB limited rlimit */ -#define __NR_getrusage			(__NR_SYSCALL_BASE+ 77) -#define __NR_gettimeofday		(__NR_SYSCALL_BASE+ 78) -#define __NR_settimeofday		(__NR_SYSCALL_BASE+ 79) -#define __NR_getgroups			(__NR_SYSCALL_BASE+ 80) -#define __NR_setgroups			(__NR_SYSCALL_BASE+ 81) -#define __NR_select			(__NR_SYSCALL_BASE+ 82) -#define __NR_symlink			(__NR_SYSCALL_BASE+ 83) -					/* 84 was sys_lstat */ -#define __NR_readlink			(__NR_SYSCALL_BASE+ 85) -#define __NR_uselib			(__NR_SYSCALL_BASE+ 86) -#define __NR_swapon			(__NR_SYSCALL_BASE+ 87) -#define __NR_reboot			(__NR_SYSCALL_BASE+ 88) -#define __NR_readdir			(__NR_SYSCALL_BASE+ 89) -#define __NR_mmap			(__NR_SYSCALL_BASE+ 90) -#define __NR_munmap			(__NR_SYSCALL_BASE+ 91) -#define __NR_truncate			(__NR_SYSCALL_BASE+ 92) -#define __NR_ftruncate			(__NR_SYSCALL_BASE+ 93) -#define __NR_fchmod			(__NR_SYSCALL_BASE+ 94) -#define __NR_fchown			(__NR_SYSCALL_BASE+ 95) -#define __NR_getpriority		(__NR_SYSCALL_BASE+ 96) -#define __NR_setpriority		(__NR_SYSCALL_BASE+ 97) -					/* 98 was sys_profil */ -#define __NR_statfs			(__NR_SYSCALL_BASE+ 99) -#define __NR_fstatfs			(__NR_SYSCALL_BASE+100) -					/* 101 was sys_ioperm */ -#define __NR_socketcall			(__NR_SYSCALL_BASE+102) -#define __NR_syslog			(__NR_SYSCALL_BASE+103) -#define __NR_setitimer			(__NR_SYSCALL_BASE+104) -#define __NR_getitimer			(__NR_SYSCALL_BASE+105) -#define __NR_stat			(__NR_SYSCALL_BASE+106) -#define __NR_lstat			(__NR_SYSCALL_BASE+107) -#define __NR_fstat			(__NR_SYSCALL_BASE+108) -					/* 109 was sys_uname */ -					/* 110 was sys_iopl */ -#define __NR_vhangup			(__NR_SYSCALL_BASE+111) -					/* 112 was sys_idle */ -#define __NR_syscall			(__NR_SYSCALL_BASE+113) /* syscall to call a syscall! */ -#define __NR_wait4			(__NR_SYSCALL_BASE+114) -#define __NR_swapoff			(__NR_SYSCALL_BASE+115) -#define __NR_sysinfo			(__NR_SYSCALL_BASE+116) -#define __NR_ipc			(__NR_SYSCALL_BASE+117) -#define __NR_fsync			(__NR_SYSCALL_BASE+118) -#define __NR_sigreturn			(__NR_SYSCALL_BASE+119) -#define __NR_clone			(__NR_SYSCALL_BASE+120) -#define __NR_setdomainname		(__NR_SYSCALL_BASE+121) -#define __NR_uname			(__NR_SYSCALL_BASE+122) -					/* 123 was sys_modify_ldt */ -#define __NR_adjtimex			(__NR_SYSCALL_BASE+124) -#define __NR_mprotect			(__NR_SYSCALL_BASE+125) -#define __NR_sigprocmask		(__NR_SYSCALL_BASE+126) -					/* 127 was sys_create_module */ -#define __NR_init_module		(__NR_SYSCALL_BASE+128) -#define __NR_delete_module		(__NR_SYSCALL_BASE+129) -					/* 130 was sys_get_kernel_syms */ -#define __NR_quotactl			(__NR_SYSCALL_BASE+131) -#define __NR_getpgid			(__NR_SYSCALL_BASE+132) -#define __NR_fchdir			(__NR_SYSCALL_BASE+133) -#define __NR_bdflush			(__NR_SYSCALL_BASE+134) -#define __NR_sysfs			(__NR_SYSCALL_BASE+135) -#define __NR_personality		(__NR_SYSCALL_BASE+136) -					/* 137 was sys_afs_syscall */ -#define __NR_setfsuid			(__NR_SYSCALL_BASE+138) -#define __NR_setfsgid			(__NR_SYSCALL_BASE+139) -#define __NR__llseek			(__NR_SYSCALL_BASE+140) -#define __NR_getdents			(__NR_SYSCALL_BASE+141) -#define __NR__newselect			(__NR_SYSCALL_BASE+142) -#define __NR_flock			(__NR_SYSCALL_BASE+143) -#define __NR_msync			(__NR_SYSCALL_BASE+144) -#define __NR_readv			(__NR_SYSCALL_BASE+145) -#define __NR_writev			(__NR_SYSCALL_BASE+146) -#define __NR_getsid			(__NR_SYSCALL_BASE+147) -#define __NR_fdatasync			(__NR_SYSCALL_BASE+148) -#define __NR__sysctl			(__NR_SYSCALL_BASE+149) -#define __NR_mlock			(__NR_SYSCALL_BASE+150) -#define __NR_munlock			(__NR_SYSCALL_BASE+151) -#define __NR_mlockall			(__NR_SYSCALL_BASE+152) -#define __NR_munlockall			(__NR_SYSCALL_BASE+153) -#define __NR_sched_setparam		(__NR_SYSCALL_BASE+154) -#define __NR_sched_getparam		(__NR_SYSCALL_BASE+155) -#define __NR_sched_setscheduler		(__NR_SYSCALL_BASE+156) -#define __NR_sched_getscheduler		(__NR_SYSCALL_BASE+157) -#define __NR_sched_yield		(__NR_SYSCALL_BASE+158) -#define __NR_sched_get_priority_max	(__NR_SYSCALL_BASE+159) -#define __NR_sched_get_priority_min	(__NR_SYSCALL_BASE+160) -#define __NR_sched_rr_get_interval	(__NR_SYSCALL_BASE+161) -#define __NR_nanosleep			(__NR_SYSCALL_BASE+162) -#define __NR_mremap			(__NR_SYSCALL_BASE+163) -#define __NR_setresuid			(__NR_SYSCALL_BASE+164) -#define __NR_getresuid			(__NR_SYSCALL_BASE+165) -					/* 166 was sys_vm86 */ -					/* 167 was sys_query_module */ -#define __NR_poll			(__NR_SYSCALL_BASE+168) -#define __NR_nfsservctl			(__NR_SYSCALL_BASE+169) -#define __NR_setresgid			(__NR_SYSCALL_BASE+170) -#define __NR_getresgid			(__NR_SYSCALL_BASE+171) -#define __NR_prctl			(__NR_SYSCALL_BASE+172) -#define __NR_rt_sigreturn		(__NR_SYSCALL_BASE+173) -#define __NR_rt_sigaction		(__NR_SYSCALL_BASE+174) -#define __NR_rt_sigprocmask		(__NR_SYSCALL_BASE+175) -#define __NR_rt_sigpending		(__NR_SYSCALL_BASE+176) -#define __NR_rt_sigtimedwait		(__NR_SYSCALL_BASE+177) -#define __NR_rt_sigqueueinfo		(__NR_SYSCALL_BASE+178) -#define __NR_rt_sigsuspend		(__NR_SYSCALL_BASE+179) -#define __NR_pread64			(__NR_SYSCALL_BASE+180) -#define __NR_pwrite64			(__NR_SYSCALL_BASE+181) -#define __NR_chown			(__NR_SYSCALL_BASE+182) -#define __NR_getcwd			(__NR_SYSCALL_BASE+183) -#define __NR_capget			(__NR_SYSCALL_BASE+184) -#define __NR_capset			(__NR_SYSCALL_BASE+185) -#define __NR_sigaltstack		(__NR_SYSCALL_BASE+186) -#define __NR_sendfile			(__NR_SYSCALL_BASE+187) -					/* 188 reserved */ -					/* 189 reserved */ -#define __NR_vfork			(__NR_SYSCALL_BASE+190) -#define __NR_ugetrlimit			(__NR_SYSCALL_BASE+191)	/* SuS compliant getrlimit */ -#define __NR_mmap2			(__NR_SYSCALL_BASE+192) -#define __NR_truncate64			(__NR_SYSCALL_BASE+193) -#define __NR_ftruncate64		(__NR_SYSCALL_BASE+194) -#define __NR_stat64			(__NR_SYSCALL_BASE+195) -#define __NR_lstat64			(__NR_SYSCALL_BASE+196) -#define __NR_fstat64			(__NR_SYSCALL_BASE+197) -#define __NR_lchown32			(__NR_SYSCALL_BASE+198) -#define __NR_getuid32			(__NR_SYSCALL_BASE+199) -#define __NR_getgid32			(__NR_SYSCALL_BASE+200) -#define __NR_geteuid32			(__NR_SYSCALL_BASE+201) -#define __NR_getegid32			(__NR_SYSCALL_BASE+202) -#define __NR_setreuid32			(__NR_SYSCALL_BASE+203) -#define __NR_setregid32			(__NR_SYSCALL_BASE+204) -#define __NR_getgroups32		(__NR_SYSCALL_BASE+205) -#define __NR_setgroups32		(__NR_SYSCALL_BASE+206) -#define __NR_fchown32			(__NR_SYSCALL_BASE+207) -#define __NR_setresuid32		(__NR_SYSCALL_BASE+208) -#define __NR_getresuid32		(__NR_SYSCALL_BASE+209) -#define __NR_setresgid32		(__NR_SYSCALL_BASE+210) -#define __NR_getresgid32		(__NR_SYSCALL_BASE+211) -#define __NR_chown32			(__NR_SYSCALL_BASE+212) -#define __NR_setuid32			(__NR_SYSCALL_BASE+213) -#define __NR_setgid32			(__NR_SYSCALL_BASE+214) -#define __NR_setfsuid32			(__NR_SYSCALL_BASE+215) -#define __NR_setfsgid32			(__NR_SYSCALL_BASE+216) -#define __NR_getdents64			(__NR_SYSCALL_BASE+217) -#define __NR_pivot_root			(__NR_SYSCALL_BASE+218) -#define __NR_mincore			(__NR_SYSCALL_BASE+219) -#define __NR_madvise			(__NR_SYSCALL_BASE+220) -#define __NR_fcntl64			(__NR_SYSCALL_BASE+221) -					/* 222 for tux */ -					/* 223 is unused */ -#define __NR_gettid			(__NR_SYSCALL_BASE+224) -#define __NR_readahead			(__NR_SYSCALL_BASE+225) -#define __NR_setxattr			(__NR_SYSCALL_BASE+226) -#define __NR_lsetxattr			(__NR_SYSCALL_BASE+227) -#define __NR_fsetxattr			(__NR_SYSCALL_BASE+228) -#define __NR_getxattr			(__NR_SYSCALL_BASE+229) -#define __NR_lgetxattr			(__NR_SYSCALL_BASE+230) -#define __NR_fgetxattr			(__NR_SYSCALL_BASE+231) -#define __NR_listxattr			(__NR_SYSCALL_BASE+232) -#define __NR_llistxattr			(__NR_SYSCALL_BASE+233) -#define __NR_flistxattr			(__NR_SYSCALL_BASE+234) -#define __NR_removexattr		(__NR_SYSCALL_BASE+235) -#define __NR_lremovexattr		(__NR_SYSCALL_BASE+236) -#define __NR_fremovexattr		(__NR_SYSCALL_BASE+237) -#define __NR_tkill			(__NR_SYSCALL_BASE+238) -#define __NR_sendfile64			(__NR_SYSCALL_BASE+239) -#define __NR_futex			(__NR_SYSCALL_BASE+240) -#define __NR_sched_setaffinity		(__NR_SYSCALL_BASE+241) -#define __NR_sched_getaffinity		(__NR_SYSCALL_BASE+242) -#define __NR_io_setup			(__NR_SYSCALL_BASE+243) -#define __NR_io_destroy			(__NR_SYSCALL_BASE+244) -#define __NR_io_getevents		(__NR_SYSCALL_BASE+245) -#define __NR_io_submit			(__NR_SYSCALL_BASE+246) -#define __NR_io_cancel			(__NR_SYSCALL_BASE+247) -#define __NR_exit_group			(__NR_SYSCALL_BASE+248) -#define __NR_lookup_dcookie		(__NR_SYSCALL_BASE+249) -#define __NR_epoll_create		(__NR_SYSCALL_BASE+250) -#define __NR_epoll_ctl			(__NR_SYSCALL_BASE+251) -#define __NR_epoll_wait			(__NR_SYSCALL_BASE+252) -#define __NR_remap_file_pages		(__NR_SYSCALL_BASE+253) -					/* 254 for set_thread_area */ -					/* 255 for get_thread_area */ -#define __NR_set_tid_address		(__NR_SYSCALL_BASE+256) -#define __NR_timer_create		(__NR_SYSCALL_BASE+257) -#define __NR_timer_settime		(__NR_SYSCALL_BASE+258) -#define __NR_timer_gettime		(__NR_SYSCALL_BASE+259) -#define __NR_timer_getoverrun		(__NR_SYSCALL_BASE+260) -#define __NR_timer_delete		(__NR_SYSCALL_BASE+261) -#define __NR_clock_settime		(__NR_SYSCALL_BASE+262) -#define __NR_clock_gettime		(__NR_SYSCALL_BASE+263) -#define __NR_clock_getres		(__NR_SYSCALL_BASE+264) -#define __NR_clock_nanosleep		(__NR_SYSCALL_BASE+265) -#define __NR_statfs64			(__NR_SYSCALL_BASE+266) -#define __NR_fstatfs64			(__NR_SYSCALL_BASE+267) -#define __NR_tgkill			(__NR_SYSCALL_BASE+268) -#define __NR_utimes			(__NR_SYSCALL_BASE+269) -#define __NR_arm_fadvise64_64		(__NR_SYSCALL_BASE+270) -#define __NR_pciconfig_iobase		(__NR_SYSCALL_BASE+271) -#define __NR_pciconfig_read		(__NR_SYSCALL_BASE+272) -#define __NR_pciconfig_write		(__NR_SYSCALL_BASE+273) -#define __NR_mq_open			(__NR_SYSCALL_BASE+274) -#define __NR_mq_unlink			(__NR_SYSCALL_BASE+275) -#define __NR_mq_timedsend		(__NR_SYSCALL_BASE+276) -#define __NR_mq_timedreceive		(__NR_SYSCALL_BASE+277) -#define __NR_mq_notify			(__NR_SYSCALL_BASE+278) -#define __NR_mq_getsetattr		(__NR_SYSCALL_BASE+279) -#define __NR_waitid			(__NR_SYSCALL_BASE+280) -#define __NR_socket			(__NR_SYSCALL_BASE+281) -#define __NR_bind			(__NR_SYSCALL_BASE+282) -#define __NR_connect			(__NR_SYSCALL_BASE+283) -#define __NR_listen			(__NR_SYSCALL_BASE+284) -#define __NR_accept			(__NR_SYSCALL_BASE+285) -#define __NR_getsockname		(__NR_SYSCALL_BASE+286) -#define __NR_getpeername		(__NR_SYSCALL_BASE+287) -#define __NR_socketpair			(__NR_SYSCALL_BASE+288) -#define __NR_send			(__NR_SYSCALL_BASE+289) -#define __NR_sendto			(__NR_SYSCALL_BASE+290) -#define __NR_recv			(__NR_SYSCALL_BASE+291) -#define __NR_recvfrom			(__NR_SYSCALL_BASE+292) -#define __NR_shutdown			(__NR_SYSCALL_BASE+293) -#define __NR_setsockopt			(__NR_SYSCALL_BASE+294) -#define __NR_getsockopt			(__NR_SYSCALL_BASE+295) -#define __NR_sendmsg			(__NR_SYSCALL_BASE+296) -#define __NR_recvmsg			(__NR_SYSCALL_BASE+297) -#define __NR_semop			(__NR_SYSCALL_BASE+298) -#define __NR_semget			(__NR_SYSCALL_BASE+299) -#define __NR_semctl			(__NR_SYSCALL_BASE+300) -#define __NR_msgsnd			(__NR_SYSCALL_BASE+301) -#define __NR_msgrcv			(__NR_SYSCALL_BASE+302) -#define __NR_msgget			(__NR_SYSCALL_BASE+303) -#define __NR_msgctl			(__NR_SYSCALL_BASE+304) -#define __NR_shmat			(__NR_SYSCALL_BASE+305) -#define __NR_shmdt			(__NR_SYSCALL_BASE+306) -#define __NR_shmget			(__NR_SYSCALL_BASE+307) -#define __NR_shmctl			(__NR_SYSCALL_BASE+308) -#define __NR_add_key			(__NR_SYSCALL_BASE+309) -#define __NR_request_key		(__NR_SYSCALL_BASE+310) -#define __NR_keyctl			(__NR_SYSCALL_BASE+311) -#define __NR_semtimedop			(__NR_SYSCALL_BASE+312) -#define __NR_vserver			(__NR_SYSCALL_BASE+313) -#define __NR_ioprio_set			(__NR_SYSCALL_BASE+314) -#define __NR_ioprio_get			(__NR_SYSCALL_BASE+315) -#define __NR_inotify_init		(__NR_SYSCALL_BASE+316) -#define __NR_inotify_add_watch		(__NR_SYSCALL_BASE+317) -#define __NR_inotify_rm_watch		(__NR_SYSCALL_BASE+318) -#define __NR_mbind			(__NR_SYSCALL_BASE+319) -#define __NR_get_mempolicy		(__NR_SYSCALL_BASE+320) -#define __NR_set_mempolicy		(__NR_SYSCALL_BASE+321) -#define __NR_openat			(__NR_SYSCALL_BASE+322) -#define __NR_mkdirat			(__NR_SYSCALL_BASE+323) -#define __NR_mknodat			(__NR_SYSCALL_BASE+324) -#define __NR_fchownat			(__NR_SYSCALL_BASE+325) -#define __NR_futimesat			(__NR_SYSCALL_BASE+326) -#define __NR_fstatat64			(__NR_SYSCALL_BASE+327) -#define __NR_unlinkat			(__NR_SYSCALL_BASE+328) -#define __NR_renameat			(__NR_SYSCALL_BASE+329) -#define __NR_linkat			(__NR_SYSCALL_BASE+330) -#define __NR_symlinkat			(__NR_SYSCALL_BASE+331) -#define __NR_readlinkat			(__NR_SYSCALL_BASE+332) -#define __NR_fchmodat			(__NR_SYSCALL_BASE+333) -#define __NR_faccessat			(__NR_SYSCALL_BASE+334) -#define __NR_pselect6			(__NR_SYSCALL_BASE+335) -#define __NR_ppoll			(__NR_SYSCALL_BASE+336) -#define __NR_unshare			(__NR_SYSCALL_BASE+337) -#define __NR_set_robust_list		(__NR_SYSCALL_BASE+338) -#define __NR_get_robust_list		(__NR_SYSCALL_BASE+339) -#define __NR_splice			(__NR_SYSCALL_BASE+340) -#define __NR_arm_sync_file_range	(__NR_SYSCALL_BASE+341) -#define __NR_sync_file_range2		__NR_arm_sync_file_range -#define __NR_tee			(__NR_SYSCALL_BASE+342) -#define __NR_vmsplice			(__NR_SYSCALL_BASE+343) -#define __NR_move_pages			(__NR_SYSCALL_BASE+344) -#define __NR_getcpu			(__NR_SYSCALL_BASE+345) -#define __NR_epoll_pwait		(__NR_SYSCALL_BASE+346) -#define __NR_kexec_load			(__NR_SYSCALL_BASE+347) -#define __NR_utimensat			(__NR_SYSCALL_BASE+348) -#define __NR_signalfd			(__NR_SYSCALL_BASE+349) -#define __NR_timerfd_create		(__NR_SYSCALL_BASE+350) -#define __NR_eventfd			(__NR_SYSCALL_BASE+351) -#define __NR_fallocate			(__NR_SYSCALL_BASE+352) -#define __NR_timerfd_settime		(__NR_SYSCALL_BASE+353) -#define __NR_timerfd_gettime		(__NR_SYSCALL_BASE+354) -#define __NR_signalfd4			(__NR_SYSCALL_BASE+355) -#define __NR_eventfd2			(__NR_SYSCALL_BASE+356) -#define __NR_epoll_create1		(__NR_SYSCALL_BASE+357) -#define __NR_dup3			(__NR_SYSCALL_BASE+358) -#define __NR_pipe2			(__NR_SYSCALL_BASE+359) -#define __NR_inotify_init1		(__NR_SYSCALL_BASE+360) -#define __NR_preadv			(__NR_SYSCALL_BASE+361) -#define __NR_pwritev			(__NR_SYSCALL_BASE+362) -#define __NR_rt_tgsigqueueinfo		(__NR_SYSCALL_BASE+363) -#define __NR_perf_event_open		(__NR_SYSCALL_BASE+364) -#define __NR_recvmmsg			(__NR_SYSCALL_BASE+365) -#define __NR_accept4			(__NR_SYSCALL_BASE+366) -#define __NR_fanotify_init		(__NR_SYSCALL_BASE+367) -#define __NR_fanotify_mark		(__NR_SYSCALL_BASE+368) -#define __NR_prlimit64			(__NR_SYSCALL_BASE+369) - -/* - * The following SWIs are ARM private. - */ -#define __ARM_NR_BASE			(__NR_SYSCALL_BASE+0x0f0000) -#define __ARM_NR_breakpoint		(__ARM_NR_BASE+1) -#define __ARM_NR_cacheflush		(__ARM_NR_BASE+2) -#define __ARM_NR_usr26			(__ARM_NR_BASE+3) -#define __ARM_NR_usr32			(__ARM_NR_BASE+4) -#define __ARM_NR_set_tls		(__ARM_NR_BASE+5) - -/* - * *NOTE*: This is a ghost syscall private to the kernel.  Only the - * __kuser_cmpxchg code in entry-armv.S should be aware of its - * existence.  Don't ever use this from user code. - */ -#ifdef __KERNEL__ +#define __NR_syscalls  (384)  #define __ARM_NR_cmpxchg		(__ARM_NR_BASE+0x00fff0) -#endif - -/* - * The following syscalls are obsolete and no longer available for EABI. - */ -#if defined(__ARM_EABI__) && !defined(__KERNEL__) -#undef __NR_time -#undef __NR_umount -#undef __NR_stime -#undef __NR_alarm -#undef __NR_utime -#undef __NR_getrlimit -#undef __NR_select -#undef __NR_readdir -#undef __NR_mmap -#undef __NR_socketcall -#undef __NR_syscall -#undef __NR_ipc -#endif -#ifdef __KERNEL__ - -#define __ARCH_WANT_IPC_PARSE_VERSION  #define __ARCH_WANT_STAT64  #define __ARCH_WANT_SYS_GETHOSTNAME  #define __ARCH_WANT_SYS_PAUSE @@ -445,8 +26,6 @@  #define __ARCH_WANT_SYS_NICE  #define __ARCH_WANT_SYS_SIGPENDING  #define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_RT_SIGACTION -#define __ARCH_WANT_SYS_RT_SIGSUSPEND  #define __ARCH_WANT_SYS_OLD_MMAP  #define __ARCH_WANT_SYS_OLD_SELECT @@ -460,20 +39,14 @@  #define __ARCH_WANT_OLD_READDIR  #define __ARCH_WANT_SYS_SOCKETCALL  #endif - -/* - * "Conditional" syscalls - * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), - * but it doesn't work on all toolchains, so we just do it by hand - */ -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_CLONE  /*   * Unimplemented (or alternatively implemented) syscalls   */ -#define __IGNORE_fadvise64_64		1 -#define __IGNORE_migrate_pages		1 +#define __IGNORE_fadvise64_64 +#define __IGNORE_migrate_pages -#endif /* __KERNEL__ */  #endif /* __ASM_ARM_UNISTD_H */ diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h index a5edf421005..d1c3f3a71c9 100644 --- a/arch/arm/include/asm/unwind.h +++ b/arch/arm/include/asm/unwind.h @@ -30,14 +30,15 @@ enum unwind_reason_code {  };  struct unwind_idx { -	unsigned long addr; +	unsigned long addr_offset;  	unsigned long insn;  };  struct unwind_table {  	struct list_head list; -	struct unwind_idx *start; -	struct unwind_idx *stop; +	const struct unwind_idx *start; +	const struct unwind_idx *origin; +	const struct unwind_idx *stop;  	unsigned long begin_addr;  	unsigned long end_addr;  }; @@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,  extern void unwind_table_del(struct unwind_table *tab);  extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk); -#ifdef CONFIG_ARM_UNWIND -extern int __init unwind_init(void); -#else -static inline int __init unwind_init(void) -{ -	return 0; -} -#endif -  #endif	/* !__ASSEMBLY__ */  #ifdef CONFIG_ARM_UNWIND diff --git a/arch/arm/include/asm/uprobes.h b/arch/arm/include/asm/uprobes.h new file mode 100644 index 00000000000..9472c20b7d4 --- /dev/null +++ b/arch/arm/include/asm/uprobes.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2012 Rabin Vincent <rabin at rab.in> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ASM_UPROBES_H +#define _ASM_UPROBES_H + +#include <asm/probes.h> +#include <asm/opcodes.h> + +typedef u32 uprobe_opcode_t; + +#define MAX_UINSN_BYTES		4 +#define UPROBE_XOL_SLOT_BYTES	64 + +#define UPROBE_SWBP_ARM_INSN	0xe7f001f9 +#define UPROBE_SS_ARM_INSN	0xe7f001fa +#define UPROBE_SWBP_INSN	__opcode_to_mem_arm(UPROBE_SWBP_ARM_INSN) +#define UPROBE_SWBP_INSN_SIZE	4 + +struct arch_uprobe_task { +	u32 backup; +	unsigned long	saved_trap_no; +}; + +struct arch_uprobe { +	u8 insn[MAX_UINSN_BYTES]; +	unsigned long ixol[2]; +	uprobe_opcode_t bpinsn; +	bool simulate; +	u32 pcreg; +	void (*prehandler)(struct arch_uprobe *auprobe, +			   struct arch_uprobe_task *autask, +			   struct pt_regs *regs); +	void (*posthandler)(struct arch_uprobe *auprobe, +			    struct arch_uprobe_task *autask, +			    struct pt_regs *regs); +	struct arch_probes_insn asi; +}; + +#endif diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index 05ac4b06876..35917b3a97f 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h @@ -71,7 +71,7 @@ struct user{  				/* the registers. */    unsigned long magic;		/* To uniquely identify a core file */    char u_comm[32];		/* User command that was responsible */ -  int u_debugreg[8]; +  int u_debugreg[8];		/* No longer used */    struct user_fp u_fp;		/* FP state */    struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */    				/* the FP registers. */ diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h new file mode 100644 index 00000000000..615781c6162 --- /dev/null +++ b/arch/arm/include/asm/v7m.h @@ -0,0 +1,56 @@ +/* + * Common defines for v7m cpus + */ +#define V7M_SCS_ICTR			IOMEM(0xe000e004) +#define V7M_SCS_ICTR_INTLINESNUM_MASK		0x0000000f + +#define BASEADDR_V7M_SCB		IOMEM(0xe000ed00) + +#define V7M_SCB_CPUID			0x00 + +#define V7M_SCB_ICSR			0x04 +#define V7M_SCB_ICSR_PENDSVSET			(1 << 28) +#define V7M_SCB_ICSR_PENDSVCLR			(1 << 27) +#define V7M_SCB_ICSR_RETTOBASE			(1 << 11) + +#define V7M_SCB_VTOR			0x08 + +#define V7M_SCB_AIRCR			0x0c +#define V7M_SCB_AIRCR_VECTKEY			(0x05fa << 16) +#define V7M_SCB_AIRCR_SYSRESETREQ		(1 << 2) + +#define V7M_SCB_SCR			0x10 +#define V7M_SCB_SCR_SLEEPDEEP			(1 << 2) + +#define V7M_SCB_CCR			0x14 +#define V7M_SCB_CCR_STKALIGN			(1 << 9) + +#define V7M_SCB_SHPR2			0x1c +#define V7M_SCB_SHPR3			0x20 + +#define V7M_SCB_SHCSR			0x24 +#define V7M_SCB_SHCSR_USGFAULTENA		(1 << 18) +#define V7M_SCB_SHCSR_BUSFAULTENA		(1 << 17) +#define V7M_SCB_SHCSR_MEMFAULTENA		(1 << 16) + +#define V7M_xPSR_FRAMEPTRALIGN			0x00000200 +#define V7M_xPSR_EXCEPTIONNO			0x000001ff + +/* + * When branching to an address that has bits [31:28] == 0xf an exception return + * occurs. Bits [27:5] are reserved (SBOP). If the processor implements the FP + * extension Bit [4] defines if the exception frame has space allocated for FP + * state information, SBOP otherwise. Bit [3] defines the mode that is returned + * to (0 -> handler mode; 1 -> thread mode). Bit [2] defines which sp is used + * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01. + */ +#define EXC_RET_STACK_MASK			0x00000004 +#define EXC_RET_THREADMODE_PROCESSSTACK		0xfffffffd + +#ifndef __ASSEMBLY__ + +enum reboot_mode; + +void armv7m_restart(enum reboot_mode mode, const char *cmd); + +#endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index 3d5fc41ae8d..301c1db3e99 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -5,7 +5,7 @@   */  #include <asm/hwcap.h> -#include "vfp.h" +#include <asm/vfp.h>  @ Macros to allow building with old toolkits (with no VFP support)  	.macro	VFPFMRX, rd, sysreg, cond @@ -27,9 +27,9 @@  #if __LINUX_ARM_ARCH__ <= 6  	ldr	\tmp, =elf_hwcap		    @ may not have MVFR regs  	ldr	\tmp, [\tmp, #0] -	tst	\tmp, #HWCAP_VFPv3D16 -	ldceq	p11, cr0, [\base],#32*4		    @ FLDMIAD \base!, {d16-d31} -	addne	\base, \base, #32*4		    @ step over unused register space +	tst	\tmp, #HWCAP_VFPD32 +	ldcnel	p11, cr0, [\base],#32*4		    @ FLDMIAD \base!, {d16-d31} +	addeq	\base, \base, #32*4		    @ step over unused register space  #else  	VFPFMRX	\tmp, MVFR0			    @ Media and VFP Feature Register 0  	and	\tmp, \tmp, #MVFR0_A_SIMD_MASK	    @ A_SIMD field @@ -51,9 +51,9 @@  #if __LINUX_ARM_ARCH__ <= 6  	ldr	\tmp, =elf_hwcap		    @ may not have MVFR regs  	ldr	\tmp, [\tmp, #0] -	tst	\tmp, #HWCAP_VFPv3D16 -	stceq	p11, cr0, [\base],#32*4		    @ FSTMIAD \base!, {d16-d31} -	addne	\base, \base, #32*4		    @ step over unused register space +	tst	\tmp, #HWCAP_VFPD32 +	stcnel	p11, cr0, [\base],#32*4		    @ FSTMIAD \base!, {d16-d31} +	addeq	\base, \base, #32*4		    @ step over unused register space  #else  	VFPFMRX	\tmp, MVFR0			    @ Media and VFP Feature Register 0  	and	\tmp, \tmp, #MVFR0_A_SIMD_MASK	    @ A_SIMD field diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h index 250a4dd0063..91f40217bfa 100644 --- a/arch/arm/include/asm/vga.h +++ b/arch/arm/include/asm/vga.h @@ -2,9 +2,10 @@  #define ASMARM_VGA_H  #include <linux/io.h> -#include <mach/hardware.h> -#define VGA_MAP_MEM(x,s)	(PCIMEM_BASE + (x)) +extern unsigned long vga_base; + +#define VGA_MAP_MEM(x,s)	(vga_base + (x))  #define vga_readb(x)	(*((volatile unsigned char *)x))  #define vga_writeb(x,y)	(*((volatile unsigned char *)y) = (x)) diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h new file mode 100644 index 00000000000..4371f45c578 --- /dev/null +++ b/arch/arm/include/asm/virt.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2012 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef VIRT_H +#define VIRT_H + +#include <asm/ptrace.h> + +/* + * Flag indicating that the kernel was not entered in the same mode on every + * CPU.  The zImage loader stashes this value in an SPSR, so we need an + * architecturally defined flag bit here. + */ +#define BOOT_CPU_MODE_MISMATCH	PSR_N_BIT + +#ifndef __ASSEMBLY__ +#include <asm/cacheflush.h> + +#ifdef CONFIG_ARM_VIRT_EXT +/* + * __boot_cpu_mode records what mode the primary CPU was booted in. + * A correctly-implemented bootloader must start all CPUs in the same mode: + * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate + * that some CPU(s) were booted in a different mode. + * + * This allows the kernel to flag an error when the secondaries have come up. + */ +extern int __boot_cpu_mode; + +static inline void sync_boot_mode(void) +{ +	/* +	 * As secondaries write to __boot_cpu_mode with caches disabled, we +	 * must flush the corresponding cache entries to ensure the visibility +	 * of their writes. +	 */ +	sync_cache_r(&__boot_cpu_mode); +} + +void __hyp_set_vectors(unsigned long phys_vector_base); +unsigned long __hyp_get_vectors(void); +#else +#define __boot_cpu_mode	(SVC_MODE) +#define sync_boot_mode() +#endif + +#ifndef ZIMAGE +void hyp_mode_check(void); + +/* Reports the availability of HYP mode */ +static inline bool is_hyp_mode_available(void) +{ +	return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE && +		!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH)); +} + +/* Check if the bootloader has booted CPUs in different modes */ +static inline bool is_hyp_mode_mismatched(void) +{ +	return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); +} +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* ! VIRT_H */ diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h new file mode 100644 index 00000000000..a6d0a29861e --- /dev/null +++ b/arch/arm/include/asm/word-at-a-time.h @@ -0,0 +1,98 @@ +#ifndef __ASM_ARM_WORD_AT_A_TIME_H +#define __ASM_ARM_WORD_AT_A_TIME_H + +#ifndef __ARMEB__ + +/* + * Little-endian word-at-a-time zero byte handling. + * Heavily based on the x86 algorithm. + */ +#include <linux/kernel.h> + +struct word_at_a_time { +	const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, +				     const struct word_at_a_time *c) +{ +	unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; +	*bits = mask; +	return mask; +} + +#define prep_zero_mask(a, bits, c) (bits) + +static inline unsigned long create_zero_mask(unsigned long bits) +{ +	bits = (bits - 1) & ~bits; +	return bits >> 7; +} + +static inline unsigned long find_zero(unsigned long mask) +{ +	unsigned long ret; + +#if __LINUX_ARM_ARCH__ >= 5 +	/* We have clz available. */ +	ret = fls(mask) >> 3; +#else +	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ +	ret = (0x0ff0001 + mask) >> 23; +	/* Fix the 1 for 00 case */ +	ret &= mask; +#endif + +	return ret; +} + +#define zero_bytemask(mask) (mask) + +#else	/* __ARMEB__ */ +#include <asm-generic/word-at-a-time.h> +#endif + +#ifdef CONFIG_DCACHE_WORD_ACCESS + +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ +	unsigned long ret, offset; + +	/* Load word from unaligned pointer addr */ +	asm( +	"1:	ldr	%0, [%2]\n" +	"2:\n" +	"	.pushsection .fixup,\"ax\"\n" +	"	.align 2\n" +	"3:	and	%1, %2, #0x3\n" +	"	bic	%2, %2, #0x3\n" +	"	ldr	%0, [%2]\n" +	"	lsl	%1, %1, #0x3\n" +#ifndef __ARMEB__ +	"	lsr	%0, %0, %1\n" +#else +	"	lsl	%0, %0, %1\n" +#endif +	"	b	2b\n" +	"	.popsection\n" +	"	.pushsection __ex_table,\"a\"\n" +	"	.align	3\n" +	"	.long	1b, 3b\n" +	"	.popsection" +	: "=&r" (ret), "=&r" (offset) +	: "r" (addr), "Qo" (*(unsigned long *)addr)); + +	return ret; +} + +#endif	/* DCACHE_WORD_ACCESS */ +#endif /* __ASM_ARM_WORD_AT_A_TIME_H */ diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h new file mode 100644 index 00000000000..8b1f37bfeee --- /dev/null +++ b/arch/arm/include/asm/xen/events.h @@ -0,0 +1,23 @@ +#ifndef _ASM_ARM_XEN_EVENTS_H +#define _ASM_ARM_XEN_EVENTS_H + +#include <asm/ptrace.h> +#include <asm/atomic.h> + +enum ipi_vector { +	XEN_PLACEHOLDER_VECTOR, + +	/* Xen IPIs go here */ +	XEN_NR_IPIS, +}; + +static inline int xen_irqs_disabled(struct pt_regs *regs) +{ +	return raw_irqs_disabled_flags(regs->ARM_cpsr); +} + +#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr),	\ +							    atomic64_t,	\ +							    counter), (val)) + +#endif /* _ASM_ARM_XEN_EVENTS_H */ diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h new file mode 100644 index 00000000000..712b50e0a6d --- /dev/null +++ b/arch/arm/include/asm/xen/hypercall.h @@ -0,0 +1,77 @@ +/****************************************************************************** + * hypercall.h + * + * Linux-specific hypervisor handling. + * + * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _ASM_ARM_XEN_HYPERCALL_H +#define _ASM_ARM_XEN_HYPERCALL_H + +#include <xen/interface/xen.h> +#include <xen/interface/sched.h> + +long privcmd_call(unsigned call, unsigned long a1, +		unsigned long a2, unsigned long a3, +		unsigned long a4, unsigned long a5); +int HYPERVISOR_xen_version(int cmd, void *arg); +int HYPERVISOR_console_io(int cmd, int count, char *str); +int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); +int HYPERVISOR_sched_op(int cmd, void *arg); +int HYPERVISOR_event_channel_op(int cmd, void *arg); +unsigned long HYPERVISOR_hvm_op(int op, void *arg); +int HYPERVISOR_memory_op(unsigned int cmd, void *arg); +int HYPERVISOR_physdev_op(int cmd, void *arg); +int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); +int HYPERVISOR_tmem_op(void *arg); +int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); + +static inline int +HYPERVISOR_suspend(unsigned long start_info_mfn) +{ +	struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; + +	/* start_info_mfn is unused on ARM */ +	return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); +} + +static inline void +MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, +			unsigned int new_val, unsigned long flags) +{ +	BUG(); +} + +static inline void +MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, +		 int count, int *success_count, domid_t domid) +{ +	BUG(); +} + +#endif /* _ASM_ARM_XEN_HYPERCALL_H */ diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h new file mode 100644 index 00000000000..1317ee40f4d --- /dev/null +++ b/arch/arm/include/asm/xen/hypervisor.h @@ -0,0 +1,21 @@ +#ifndef _ASM_ARM_XEN_HYPERVISOR_H +#define _ASM_ARM_XEN_HYPERVISOR_H + +extern struct shared_info *HYPERVISOR_shared_info; +extern struct start_info *xen_start_info; + +/* Lazy mode for batching updates / context switch */ +enum paravirt_lazy_mode { +	PARAVIRT_LAZY_NONE, +	PARAVIRT_LAZY_MMU, +	PARAVIRT_LAZY_CPU, +}; + +static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) +{ +	return PARAVIRT_LAZY_NONE; +} + +extern struct dma_map_ops *xen_dma_ops; + +#endif /* _ASM_ARM_XEN_HYPERVISOR_H */ diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h new file mode 100644 index 00000000000..50066006e6b --- /dev/null +++ b/arch/arm/include/asm/xen/interface.h @@ -0,0 +1,82 @@ +/****************************************************************************** + * Guest OS interface to ARM Xen. + * + * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 + */ + +#ifndef _ASM_ARM_XEN_INTERFACE_H +#define _ASM_ARM_XEN_INTERFACE_H + +#include <linux/types.h> + +#define uint64_aligned_t uint64_t __attribute__((aligned(8))) + +#define __DEFINE_GUEST_HANDLE(name, type) \ +	typedef struct { union { type *p; uint64_aligned_t q; }; }  \ +        __guest_handle_ ## name + +#define DEFINE_GUEST_HANDLE_STRUCT(name) \ +	__DEFINE_GUEST_HANDLE(name, struct name) +#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) +#define GUEST_HANDLE(name)        __guest_handle_ ## name + +#define set_xen_guest_handle(hnd, val)			\ +	do {						\ +		if (sizeof(hnd) == 8)			\ +			*(uint64_t *)&(hnd) = 0;	\ +		(hnd).p = val;				\ +	} while (0) + +#ifndef __ASSEMBLY__ +/* Explicitly size integers that represent pfns in the interface with + * Xen so that we can have one ABI that works for 32 and 64 bit guests. + * Note that this means that the xen_pfn_t type may be capable of + * representing pfn's which the guest cannot represent in its own pfn + * type. However since pfn space is controlled by the guest this is + * fine since it simply wouldn't be able to create any sure pfns in + * the first place. + */ +typedef uint64_t xen_pfn_t; +#define PRI_xen_pfn "llx" +typedef uint64_t xen_ulong_t; +#define PRI_xen_ulong "llx" +typedef int64_t xen_long_t; +#define PRI_xen_long "llx" +/* Guest handles for primitive C types. */ +__DEFINE_GUEST_HANDLE(uchar, unsigned char); +__DEFINE_GUEST_HANDLE(uint,  unsigned int); +DEFINE_GUEST_HANDLE(char); +DEFINE_GUEST_HANDLE(int); +DEFINE_GUEST_HANDLE(void); +DEFINE_GUEST_HANDLE(uint64_t); +DEFINE_GUEST_HANDLE(uint32_t); +DEFINE_GUEST_HANDLE(xen_pfn_t); +DEFINE_GUEST_HANDLE(xen_ulong_t); + +/* Maximum number of virtual CPUs in multi-processor guests. */ +#define MAX_VIRT_CPUS 1 + +struct arch_vcpu_info { }; +struct arch_shared_info { }; + +/* TODO: Move pvclock definitions some place arch independent */ +struct pvclock_vcpu_time_info { +	u32   version; +	u32   pad0; +	u64   tsc_timestamp; +	u64   system_time; +	u32   tsc_to_system_mul; +	s8    tsc_shift; +	u8    flags; +	u8    pad[2]; +} __attribute__((__packed__)); /* 32 bytes */ + +/* It is OK to have a 12 bytes struct with no padding because it is packed */ +struct pvclock_wall_clock { +	u32   version; +	u32   sec; +	u32   nsec; +} __attribute__((__packed__)); +#endif + +#endif /* _ASM_ARM_XEN_INTERFACE_H */ diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h new file mode 100644 index 00000000000..1109017499e --- /dev/null +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -0,0 +1,50 @@ +#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H +#define _ASM_ARM_XEN_PAGE_COHERENT_H + +#include <asm/page.h> +#include <linux/dma-attrs.h> +#include <linux/dma-mapping.h> + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, +		dma_addr_t *dma_handle, gfp_t flags, +		struct dma_attrs *attrs) +{ +	return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, +		void *cpu_addr, dma_addr_t dma_handle, +		struct dma_attrs *attrs) +{ +	__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); +} + +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, +	     unsigned long offset, size_t size, enum dma_data_direction dir, +	     struct dma_attrs *attrs) +{ +	__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); +} + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, +		size_t size, enum dma_data_direction dir, +		struct dma_attrs *attrs) +{ +	if (__generic_dma_ops(hwdev)->unmap_page) +		__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); +} + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, +		dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ +	if (__generic_dma_ops(hwdev)->sync_single_for_cpu) +		__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); +} + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, +		dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ +	if (__generic_dma_ops(hwdev)->sync_single_for_device) +		__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); +} +#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h new file mode 100644 index 00000000000..ded062f9b35 --- /dev/null +++ b/arch/arm/include/asm/xen/page.h @@ -0,0 +1,119 @@ +#ifndef _ASM_ARM_XEN_PAGE_H +#define _ASM_ARM_XEN_PAGE_H + +#include <asm/page.h> +#include <asm/pgtable.h> + +#include <linux/pfn.h> +#include <linux/types.h> +#include <linux/dma-mapping.h> + +#include <xen/xen.h> +#include <xen/interface/grant_table.h> + +#define phys_to_machine_mapping_valid(pfn) (1) +#define mfn_to_virt(m)			(__va(mfn_to_pfn(m) << PAGE_SHIFT)) + +#define pte_mfn	    pte_pfn +#define mfn_pte	    pfn_pte + +/* Xen machine address */ +typedef struct xmaddr { +	phys_addr_t maddr; +} xmaddr_t; + +/* Xen pseudo-physical address */ +typedef struct xpaddr { +	phys_addr_t paddr; +} xpaddr_t; + +#define XMADDR(x)	((xmaddr_t) { .maddr = (x) }) +#define XPADDR(x)	((xpaddr_t) { .paddr = (x) }) + +#define INVALID_P2M_ENTRY      (~0UL) + +unsigned long __pfn_to_mfn(unsigned long pfn); +unsigned long __mfn_to_pfn(unsigned long mfn); +extern struct rb_root phys_to_mach; + +static inline unsigned long pfn_to_mfn(unsigned long pfn) +{ +	unsigned long mfn; + +	if (phys_to_mach.rb_node != NULL) { +		mfn = __pfn_to_mfn(pfn); +		if (mfn != INVALID_P2M_ENTRY) +			return mfn; +	} + +	return pfn; +} + +static inline unsigned long mfn_to_pfn(unsigned long mfn) +{ +	unsigned long pfn; + +	if (phys_to_mach.rb_node != NULL) { +		pfn = __mfn_to_pfn(mfn); +		if (pfn != INVALID_P2M_ENTRY) +			return pfn; +	} + +	return mfn; +} + +#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) + +static inline xmaddr_t phys_to_machine(xpaddr_t phys) +{ +	unsigned offset = phys.paddr & ~PAGE_MASK; +	return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); +} + +static inline xpaddr_t machine_to_phys(xmaddr_t machine) +{ +	unsigned offset = machine.maddr & ~PAGE_MASK; +	return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); +} +/* VIRT <-> MACHINE conversion */ +#define virt_to_machine(v)	(phys_to_machine(XPADDR(__pa(v)))) +#define virt_to_mfn(v)		(pfn_to_mfn(virt_to_pfn(v))) +#define mfn_to_virt(m)		(__va(mfn_to_pfn(m) << PAGE_SHIFT)) + +static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) +{ +	/* TODO: assuming it is mapped in the kernel 1:1 */ +	return virt_to_machine(vaddr); +} + +/* TODO: this shouldn't be here but it is because the frontend drivers + * are using it (its rolled in headers) even though we won't hit the code path. + * So for right now just punt with this. + */ +static inline pte_t *lookup_address(unsigned long address, unsigned int *level) +{ +	BUG(); +	return NULL; +} + +extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, +				   struct gnttab_map_grant_ref *kmap_ops, +				   struct page **pages, unsigned int count); + +extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, +				     struct gnttab_map_grant_ref *kmap_ops, +				     struct page **pages, unsigned int count); + +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); +bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, +		unsigned long nr_pages); + +static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ +	return __set_phys_to_machine(pfn, mfn); +} + +#define xen_remap(cookie, size) ioremap_cache((cookie), (size)) +#define xen_unmap(cookie) iounmap((cookie)) + +#endif /* _ASM_ARM_XEN_PAGE_H */ diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h index 7604673dc42..4ffb26d4cad 100644 --- a/arch/arm/include/asm/xor.h +++ b/arch/arm/include/asm/xor.h @@ -7,7 +7,10 @@   * it under the terms of the GNU General Public License version 2 as   * published by the Free Software Foundation.   */ +#include <linux/hardirq.h>  #include <asm-generic/xor.h> +#include <asm/hwcap.h> +#include <asm/neon.h>  #define __XOR(a1, a2) a1 ^= a2 @@ -138,4 +141,74 @@ static struct xor_block_template xor_block_arm4regs = {  		xor_speed(&xor_block_arm4regs);	\  		xor_speed(&xor_block_8regs);	\  		xor_speed(&xor_block_32regs);	\ +		NEON_TEMPLATES;			\  	} while (0) + +#ifdef CONFIG_KERNEL_MODE_NEON + +extern struct xor_block_template const xor_block_neon_inner; + +static void +xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ +	if (in_interrupt()) { +		xor_arm4regs_2(bytes, p1, p2); +	} else { +		kernel_neon_begin(); +		xor_block_neon_inner.do_2(bytes, p1, p2); +		kernel_neon_end(); +	} +} + +static void +xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, +		unsigned long *p3) +{ +	if (in_interrupt()) { +		xor_arm4regs_3(bytes, p1, p2, p3); +	} else { +		kernel_neon_begin(); +		xor_block_neon_inner.do_3(bytes, p1, p2, p3); +		kernel_neon_end(); +	} +} + +static void +xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, +		unsigned long *p3, unsigned long *p4) +{ +	if (in_interrupt()) { +		xor_arm4regs_4(bytes, p1, p2, p3, p4); +	} else { +		kernel_neon_begin(); +		xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4); +		kernel_neon_end(); +	} +} + +static void +xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, +		unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ +	if (in_interrupt()) { +		xor_arm4regs_5(bytes, p1, p2, p3, p4, p5); +	} else { +		kernel_neon_begin(); +		xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5); +		kernel_neon_end(); +	} +} + +static struct xor_block_template xor_block_neon = { +	.name	= "neon", +	.do_2	= xor_neon_2, +	.do_3	= xor_neon_3, +	.do_4	= xor_neon_4, +	.do_5	= xor_neon_5 +}; + +#define NEON_TEMPLATES	\ +	do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0) +#else +#define NEON_TEMPLATES +#endif  | 
