diff options
Diffstat (limited to 'arch/xtensa/include')
57 files changed, 1881 insertions, 534 deletions
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 6d130278999..c3d20ba6eb8 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -8,16 +8,19 @@ generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h generic-y += fcntl.h -generic-y += futex.h generic-y += hardirq.h +generic-y += hash.h generic-y += ioctl.h generic-y += irq_regs.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kvm_para.h +generic-y += linkage.h generic-y += local.h generic-y += local64.h +generic-y += mcs_spinlock.h generic-y += percpu.h +generic-y += preempt.h generic-y += resource.h generic-y += scatterlist.h generic-y += sections.h @@ -25,4 +28,5 @@ generic-y += siginfo.h generic-y += statfs.h generic-y += termios.h generic-y += topology.h +generic-y += trace_clock.h generic-y += xor.h diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 24f50cada70..e5103b47a8c 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -7,7 +7,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2008 Tensilica Inc. */ #ifndef _XTENSA_ATOMIC_H @@ -19,16 +19,17 @@ #ifdef __KERNEL__ #include <asm/processor.h> #include <asm/cmpxchg.h> +#include <asm/barrier.h> #define ATOMIC_INIT(i) { (i) } /* * This Xtensa implementation assumes that the right mechanism - * for exclusion is for locking interrupts to level 1. + * for exclusion is for locking interrupts to level EXCM_LEVEL. * * Locking interrupts looks like this: * - * rsil a15, 1 + * rsil a15, LOCKLEVEL * <code> * wsr a15, PS * rsync @@ -66,19 +67,35 @@ */ static inline void atomic_add(int i, atomic_t * v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "add %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " add %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " add %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); +#endif } /** @@ -90,19 +107,35 @@ static inline void atomic_add(int i, atomic_t * v) */ static inline void atomic_sub(int i, atomic_t *v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "sub %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " sub %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " sub %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); +#endif } /* @@ -111,40 +144,78 @@ static inline void atomic_sub(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t * v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "add %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); - - return vval; +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " add %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + " add %0, %0, %2\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); + + return result; +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " add %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); + + return vval; +#endif } static inline int atomic_sub_return(int i, atomic_t * v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "sub %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); - - return vval; +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " sub %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + " sub %0, %0, %2\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); + + return result; +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " sub %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); + + return vval; +#endif } /** @@ -251,47 +322,72 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { - unsigned int all_f = -1; - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "xor %1, %4, %3 \n\t" - "and %0, %0, %4 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval), "=a" (mask) - : "a" (v), "a" (all_f), "1" (mask) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (~mask), "a" (v) + : "memory" + ); +#else + unsigned int all_f = -1; + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " xor %1, %4, %3\n" + " and %0, %0, %4\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval), "=a" (mask) + : "a" (v), "a" (all_f), "1" (mask) + : "a15", "memory" + ); +#endif } static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "or %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (mask), "a" (v) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (mask), "a" (v) + : "memory" + ); +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " or %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (mask), "a" (v) + : "a15", "memory" + ); +#endif } -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ - diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index 55707a8009d..5b88774c75a 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h @@ -3,27 +3,19 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2012 Tensilica Inc. */ #ifndef _XTENSA_SYSTEM_H #define _XTENSA_SYSTEM_H -#define smp_read_barrier_depends() do { } while(0) -#define read_barrier_depends() do { } while(0) - -#define mb() barrier() -#define rmb() mb() +#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) +#define rmb() barrier() #define wmb() mb() -#ifdef CONFIG_SMP -#error smp_* not defined -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#endif +#define smp_mb__before_atomic() barrier() +#define smp_mb__after_atomic() barrier() -#define set_mb(var, value) do { var = value; mb(); } while (0) +#include <asm-generic/barrier.h> #endif /* _XTENSA_SYSTEM_H */ diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 5270197ddd3..3f44fa2a53e 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -21,15 +21,8 @@ #include <asm/processor.h> #include <asm/byteorder.h> +#include <asm/barrier.h> -#ifdef CONFIG_SMP -# error SMP not supported on this architecture -#endif - -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - -#include <asm-generic/bitops/atomic.h> #include <asm-generic/bitops/non-atomic.h> #if XCHAL_HAVE_NSA @@ -104,6 +97,132 @@ static inline unsigned long __fls(unsigned long word) #endif #include <asm-generic/bitops/fls64.h> + +#if XCHAL_HAVE_S32C1I + +static inline void set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); +} + +static inline void clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (~mask), "a" (p) + : "memory"); +} + +static inline void change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " xor %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); +} + +static inline int +test_and_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +static inline int +test_and_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (~mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +static inline int +test_and_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " xor %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +#else + +#include <asm-generic/bitops/atomic.h> + +#endif /* XCHAL_HAVE_S32C1I */ + #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/le.h> diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h index 9983f2c1b7e..892aab399ac 100644 --- a/arch/xtensa/include/asm/bootparam.h +++ b/arch/xtensa/include/asm/bootparam.h @@ -20,8 +20,9 @@ #define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/ #define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */ #define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */ -#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */ +#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console. */ #define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */ +#define BP_TAG_FDT 0x1006 /* flat device tree addr */ #define BP_TAG_FIRST 0x7B0B /* first tag with a version number */ #define BP_TAG_LAST 0x7E0B /* last tag */ @@ -31,31 +32,19 @@ /* All records are aligned to 4 bytes */ typedef struct bp_tag { - unsigned short id; /* tag id */ - unsigned short size; /* size of this record excluding the structure*/ - unsigned long data[0]; /* data */ + unsigned short id; /* tag id */ + unsigned short size; /* size of this record excluding the structure*/ + unsigned long data[0]; /* data */ } bp_tag_t; -typedef struct meminfo { - unsigned long type; - unsigned long start; - unsigned long end; -} meminfo_t; - -#define SYSMEM_BANKS_MAX 5 +struct bp_meminfo { + unsigned long type; + unsigned long start; + unsigned long end; +}; #define MEMORY_TYPE_CONVENTIONAL 0x1000 #define MEMORY_TYPE_NONE 0x2000 -typedef struct sysmem_info { - int nr_banks; - meminfo_t bank[SYSMEM_BANKS_MAX]; -} sysmem_info_t; - -extern sysmem_info_t sysmem; - #endif #endif - - - diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h index 2c20a58f94c..60e18773ecb 100644 --- a/arch/xtensa/include/asm/cacheasm.h +++ b/arch/xtensa/include/asm/cacheasm.h @@ -174,4 +174,3 @@ __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH .endm - diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 569fec4f9a2..555a98a1845 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -1,18 +1,14 @@ /* - * include/asm-xtensa/cacheflush.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * - * (C) 2001 - 2007 Tensilica Inc. + * (C) 2001 - 2013 Tensilica Inc. */ #ifndef _XTENSA_CACHEFLUSH_H #define _XTENSA_CACHEFLUSH_H -#ifdef __KERNEL__ - #include <linux/mm.h> #include <asm/processor.h> #include <asm/page.h> @@ -51,7 +47,6 @@ extern void __invalidate_icache_page(unsigned long); extern void __invalidate_icache_range(unsigned long, unsigned long); extern void __invalidate_dcache_range(unsigned long, unsigned long); - #if XCHAL_DCACHE_IS_WRITEBACK extern void __flush_invalidate_dcache_all(void); extern void __flush_dcache_page(unsigned long); @@ -87,9 +82,22 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, * (see also Documentation/cachetlb.txt) */ -#if (DCACHE_WAY_SIZE > PAGE_SIZE) +#if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP) -#define flush_cache_all() \ +#ifdef CONFIG_SMP +void flush_cache_all(void); +void flush_cache_range(struct vm_area_struct*, ulong, ulong); +void flush_icache_range(unsigned long start, unsigned long end); +void flush_cache_page(struct vm_area_struct*, + unsigned long, unsigned long); +#else +#define flush_cache_all local_flush_cache_all +#define flush_cache_range local_flush_cache_range +#define flush_icache_range local_flush_icache_range +#define flush_cache_page local_flush_cache_page +#endif + +#define local_flush_cache_all() \ do { \ __flush_invalidate_dcache_all(); \ __invalidate_icache_all(); \ @@ -103,8 +111,11 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page*); -extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); -extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); + +void local_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +void local_flush_cache_page(struct vm_area_struct *vma, + unsigned long address, unsigned long pfn); #else @@ -118,13 +129,14 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) -#define flush_cache_page(vma,addr,pfn) do { } while (0) -#define flush_cache_range(vma,start,end) do { } while (0) +#define flush_icache_range local_flush_icache_range +#define flush_cache_page(vma, addr, pfn) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) #endif /* Ensure consistency between data and instruction cache. */ -#define flush_icache_range(start,end) \ +#define local_flush_icache_range(start, end) \ do { \ __flush_dcache_range(start, (end) - (start)); \ __invalidate_icache_range(start,(end) - (start)); \ @@ -252,5 +264,4 @@ static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size) } } -#endif /* __KERNEL__ */ #endif /* _XTENSA_CACHEFLUSH_H */ diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index e4d831a3077..0593de689b5 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -12,6 +12,7 @@ #define _XTENSA_CHECKSUM_H #include <linux/in6.h> +#include <asm/uaccess.h> #include <variant/core.h> /* @@ -36,8 +37,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); * better 64-bit) boundary */ -asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum, - int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); /* * Note: when you get a NULL pointer exception here this means someone @@ -54,7 +56,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, static inline __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) + int len, __wsum sum, int *err_ptr) { return csum_partial_copy_generic((__force const void *)src, dst, len, sum, err_ptr, NULL); @@ -112,7 +114,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) /* Since the input registers which are loaded with iph and ihl are modified, we must also specify them as outputs, or gcc will assume they contain their original values. */ - : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr) + : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), + "=&r" (endaddr) : "1" (iph), "2" (ihl) : "memory"); @@ -168,7 +171,7 @@ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, static __inline__ __sum16 ip_compute_csum(const void *buff, int len) { - return csum_fold (csum_partial(buff, len, 0)); + return csum_fold (csum_partial(buff, len, 0)); } #define _HAVE_ARCH_IPV6_CSUM @@ -238,11 +241,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum sum, int *err_ptr) +static __inline__ __wsum csum_and_copy_to_user(const void *src, + void __user *dst, int len, + __wsum sum, int *err_ptr) { if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); + return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr); if (len) *err_ptr = -EFAULT; diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 64dad04a9d2..370b26f3841 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -22,17 +22,30 @@ static inline unsigned long __cmpxchg_u32(volatile int *p, int old, int new) { - __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %1, 0 \n\t" - "bne %0, %2, 1f \n\t" - "s32i %3, %1, 0 \n\t" - "1: \n\t" - "wsr a15, ps \n\t" - "rsync \n\t" - : "=&a" (old) - : "a" (p), "a" (old), "r" (new) - : "a15", "memory"); - return old; +#if XCHAL_HAVE_S32C1I + __asm__ __volatile__( + " wsr %2, scompare1\n" + " s32c1i %0, %1, 0\n" + : "+a" (new) + : "a" (p), "a" (old) + : "memory" + ); + + return new; +#else + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %1, 0\n" + " bne %0, %2, 1f\n" + " s32i %3, %1, 0\n" + "1:\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (old) + : "a" (p), "a" (old), "r" (new) + : "a15", "memory"); + return old; +#endif } /* This function doesn't exist, so you'll get a linker error * if something tries to do an invalid cmpxchg(). */ @@ -80,6 +93,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ (unsigned long)(n), sizeof(*(ptr)))) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) /* * xchg_u32 @@ -93,19 +107,36 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, static inline unsigned long xchg_u32(volatile int * m, unsigned long val) { - unsigned long tmp; - __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %1, 0 \n\t" - "s32i %2, %1, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n\t" - : "=&a" (tmp) - : "a" (m), "a" (val) - : "a15", "memory"); - return tmp; +#if XCHAL_HAVE_S32C1I + unsigned long tmp, result; + __asm__ __volatile__( + "1: l32i %1, %2, 0\n" + " mov %0, %3\n" + " wsr %1, scompare1\n" + " s32c1i %0, %2, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (m), "a" (val) + : "memory" + ); + return result; +#else + unsigned long tmp; + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %1, 0\n" + " s32i %2, %1, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (tmp) + : "a" (m), "a" (val) + : "a15", "memory"); + return tmp; +#endif } -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) /* * This only works if the compiler isn't horribly bad at optimizing. diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h index 8d1eb5d7864..47e46dcf5d4 100644 --- a/arch/xtensa/include/asm/current.h +++ b/arch/xtensa/include/asm/current.h @@ -30,7 +30,7 @@ static inline struct task_struct *get_current(void) #define GET_CURRENT(reg,sp) \ GET_THREAD_INFO(reg,sp); \ - l32i reg, reg, TI_TASK \ + l32i reg, reg, TI_TASK \ #endif diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h index 58c0a4fd400..24304b39a5c 100644 --- a/arch/xtensa/include/asm/delay.h +++ b/arch/xtensa/include/asm/delay.h @@ -12,38 +12,64 @@ #ifndef _XTENSA_DELAY_H #define _XTENSA_DELAY_H -#include <asm/processor.h> +#include <asm/timex.h> #include <asm/param.h> extern unsigned long loops_per_jiffy; static inline void __delay(unsigned long loops) { - /* 2 cycles per loop. */ - __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" - : "=r" (loops) : "0" (loops)); + if (__builtin_constant_p(loops) && loops < 2) + __asm__ __volatile__ ("nop"); + else if (loops >= 2) + /* 2 cycles per loop. */ + __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" + : "+r" (loops)); } -static __inline__ u32 xtensa_get_ccount(void) +/* Undefined function to get compile-time error */ +void __bad_udelay(void); +void __bad_ndelay(void); + +#define __MAX_UDELAY 30000 +#define __MAX_NDELAY 30000 + +static inline void __udelay(unsigned long usecs) { - u32 ccount; - asm volatile ("rsr %0, ccount\n" : "=r" (ccount)); - return ccount; + unsigned long start = get_ccount(); + unsigned long cycles = (usecs * (ccount_freq >> 15)) >> 5; + + /* Note: all variables are unsigned (can wrap around)! */ + while (((unsigned long)get_ccount()) - start < cycles) + cpu_relax(); } -/* For SMP/NUMA systems, change boot_cpu_data to something like - * local_cpu_data->... where local_cpu_data points to the current - * cpu. */ +static inline void udelay(unsigned long usec) +{ + if (__builtin_constant_p(usec) && usec >= __MAX_UDELAY) + __bad_udelay(); + else + __udelay(usec); +} -static __inline__ void udelay (unsigned long usecs) +static inline void __ndelay(unsigned long nsec) { - unsigned long start = xtensa_get_ccount(); - unsigned long cycles = usecs * (loops_per_jiffy / (1000000UL / HZ)); + /* + * Inner shift makes sure multiplication doesn't overflow + * for legitimate nsec values + */ + unsigned long cycles = (nsec * (ccount_freq >> 15)) >> 15; + __delay(cycles); +} - /* Note: all variables are unsigned (can wrap around)! */ - while (((unsigned long)xtensa_get_ccount()) - start < cycles) - ; +#define ndelay(n) ndelay(n) + +static inline void ndelay(unsigned long nsec) +{ + if (__builtin_constant_p(nsec) && nsec >= __MAX_NDELAY) + __bad_ndelay(); + else + __ndelay(nsec); } #endif - diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 492c95790ad..172a02a6ad1 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -16,6 +16,8 @@ #include <linux/mm.h> #include <linux/scatterlist.h> +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) + /* * DMA-consistent mapping functions. */ @@ -98,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, } static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction direction) { consistent_sync((void *)bus_to_virt(dma_handle), size, direction); } @@ -168,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, consistent_sync(vaddr, size, direction); } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size) +{ + return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + return -EINVAL; +} + #endif /* _XTENSA_DMA_MAPPING_H */ diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h index 5293312bc6a..eacb25a4171 100644 --- a/arch/xtensa/include/asm/elf.h +++ b/arch/xtensa/include/asm/elf.h @@ -84,7 +84,8 @@ typedef struct { elf_greg_t sar; elf_greg_t windowstart; elf_greg_t windowbase; - elf_greg_t reserved[8+48]; + elf_greg_t threadptr; + elf_greg_t reserved[7+48]; elf_greg_t a[64]; } xtensa_gregset_t; @@ -168,11 +169,11 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); */ #define ELF_PLAT_INIT(_r, load_addr) \ - do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \ - _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \ - _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \ - _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ - } while (0) + do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \ + _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \ + _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \ + _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ + } while (0) typedef struct { xtregs_opt_t opt; diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h new file mode 100644 index 00000000000..9f6c33d0428 --- /dev/null +++ b/arch/xtensa/include/asm/fixmap.h @@ -0,0 +1,58 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 + */ + +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H + +#include <asm/pgtable.h> +#ifdef CONFIG_HIGHMEM +#include <linux/threads.h> +#include <asm/kmap_types.h> +#endif + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. We allocate these special addresses + * from the end of the consistent memory region backwards. + * Also this lets us do fail-safe vmalloc(), we + * can guarantee that these special addresses and + * vmalloc()-ed addresses never overlap. + * + * these 'compile-time allocated' memory buffers are + * fixed-size 4k pages. (or larger if used with an increment + * higher than 1) use fixmap_set(idx,phys) to associate + * physical memory with fixmap indices. + */ +enum fixed_addresses { +#ifdef CONFIG_HIGHMEM + /* reserved pte's for temporary kernel mappings */ + FIX_KMAP_BEGIN, + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, +#endif + __end_of_fixed_addresses +}; + +#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE) +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) + +#include <asm-generic/fixmap.h> + +#define kmap_get_fixmap_pte(vaddr) \ + pte_offset_kernel( \ + pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \ + (vaddr) \ + ) + +#endif diff --git a/arch/xtensa/include/asm/ftrace.h b/arch/xtensa/include/asm/ftrace.h index 40a8c178f10..6c6d9a9f185 100644 --- a/arch/xtensa/include/asm/ftrace.h +++ b/arch/xtensa/include/asm/ftrace.h @@ -1 +1,40 @@ -/* empty */ +/* + * arch/xtensa/include/asm/ftrace.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Tensilica Inc. + */ +#ifndef _XTENSA_FTRACE_H +#define _XTENSA_FTRACE_H + +#include <asm/processor.h> + +#ifndef __ASSEMBLY__ +#define ftrace_return_address0 ({ unsigned long a0, a1; \ + __asm__ __volatile__ ( \ + "mov %0, a0\n" \ + "mov %1, a1\n" \ + : "=r"(a0), "=r"(a1)); \ + MAKE_PC_FROM_RA(a0, a1); }) + +#ifdef CONFIG_FRAME_POINTER +extern unsigned long return_address(unsigned level); +#define ftrace_return_address(n) return_address(n) +#endif +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_FUNCTION_TRACER + +#define MCOUNT_ADDR ((unsigned long)(_mcount)) +#define MCOUNT_INSN_SIZE 3 + +#ifndef __ASSEMBLY__ +extern void _mcount(void); +#define mcount _mcount +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_FUNCTION_TRACER */ + +#endif /* _XTENSA_FTRACE_H */ diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h new file mode 100644 index 00000000000..b39531babec --- /dev/null +++ b/arch/xtensa/include/asm/futex.h @@ -0,0 +1,147 @@ +/* + * Atomic futex routines + * + * Based on the PowerPC implementataion + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Copyright (C) 2013 TangoTec Ltd. + * + * Baruch Siach <baruch@tkos.co.il> + */ + +#ifndef _ASM_XTENSA_FUTEX_H +#define _ASM_XTENSA_FUTEX_H + +#ifdef __KERNEL__ + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <linux/errno.h> + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ + __asm__ __volatile( \ + "1: l32i %0, %2, 0\n" \ + insn "\n" \ + " wsr %0, scompare1\n" \ + "2: s32c1i %1, %2, 0\n" \ + " bne %1, %0, 1b\n" \ + " movi %1, 0\n" \ + "3:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 4\n" \ + "4: .long 3b\n" \ + "5: l32r %0, 4b\n" \ + " movi %1, %3\n" \ + " jx %0\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .long 1b,5b,2b,5b\n" \ + " .previous\n" \ + : "=&r" (oldval), "=&r" (ret) \ + : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \ + : "memory") + +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + +#if !XCHAL_HAVE_S32C1I + return -ENOSYS; +#endif + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr, + oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr, + oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr, + ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr, + oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (ret) + return ret; + + switch (cmp) { + case FUTEX_OP_CMP_EQ: return (oldval == cmparg); + case FUTEX_OP_CMP_NE: return (oldval != cmparg); + case FUTEX_OP_CMP_LT: return (oldval < cmparg); + case FUTEX_OP_CMP_GE: return (oldval >= cmparg); + case FUTEX_OP_CMP_LE: return (oldval <= cmparg); + case FUTEX_OP_CMP_GT: return (oldval > cmparg); + } + + return -ENOSYS; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0; + u32 prev; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + +#if !XCHAL_HAVE_S32C1I + return -ENOSYS; +#endif + + __asm__ __volatile__ ( + " # futex_atomic_cmpxchg_inatomic\n" + "1: l32i %1, %3, 0\n" + " mov %0, %5\n" + " wsr %1, scompare1\n" + "2: s32c1i %0, %3, 0\n" + "3:\n" + " .section .fixup,\"ax\"\n" + " .align 4\n" + "4: .long 3b\n" + "5: l32r %1, 4b\n" + " movi %0, %6\n" + " jx %1\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .long 1b,5b,2b,5b\n" + " .previous\n" + : "+r" (ret), "=&r" (prev), "+m" (*uaddr) + : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT) + : "memory"); + + *uval = prev; + return ret; +} + +#endif /* __KERNEL__ */ +#endif /* _ASM_XTENSA_FUTEX_H */ diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 0a046ca5a68..2653ef5d55f 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h @@ -6,12 +6,54 @@ * this archive for more details. * * Copyright (C) 2003 - 2005 Tensilica Inc. + * Copyright (C) 2014 Cadence Design Systems Inc. */ #ifndef _XTENSA_HIGHMEM_H #define _XTENSA_HIGHMEM_H -extern void flush_cache_kmaps(void); +#include <asm/cacheflush.h> +#include <asm/fixmap.h> +#include <asm/kmap_types.h> +#include <asm/pgtable.h> -#endif +#define PKMAP_BASE (FIXADDR_START - PMD_SIZE) +#define LAST_PKMAP PTRS_PER_PTE +#define LAST_PKMAP_MASK (LAST_PKMAP - 1) +#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +#define kmap_prot PAGE_KERNEL + +extern pte_t *pkmap_page_table; + +void *kmap_high(struct page *page); +void kunmap_high(struct page *page); + +static inline void *kmap(struct page *page) +{ + BUG_ON(in_interrupt()); + if (!PageHighMem(page)) + return page_address(page); + return kmap_high(page); +} +static inline void kunmap(struct page *page) +{ + BUG_ON(in_interrupt()); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} + +static inline void flush_cache_kmaps(void) +{ + flush_cache_all(); +} + +void *kmap_atomic(struct page *page); +void __kunmap_atomic(void *kvaddr); + +void kmap_init(void); + +#endif diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h new file mode 100644 index 00000000000..600781edc8a --- /dev/null +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -0,0 +1,163 @@ +/* + * arch/xtensa/include/asm/initialize_mmu.h + * + * Initializes MMU: + * + * For the new V3 MMU we remap the TLB from virtual == physical + * to the standard Linux mapping used in earlier MMU's. + * + * The the MMU we also support a new configuration register that + * specifies how the S32C1I instruction operates with the cache + * controller. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2008 - 2012 Tensilica, Inc. + * + * Marc Gauthier <marc@tensilica.com> + * Pete Delaney <piet@tensilica.com> + */ + +#ifndef _XTENSA_INITIALIZE_MMU_H +#define _XTENSA_INITIALIZE_MMU_H + +#include <asm/pgtable.h> +#include <asm/vectors.h> + +#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC) +#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC) + +#ifdef __ASSEMBLY__ + +#define XTENSA_HWVERSION_RC_2009_0 230000 + + .macro initialize_mmu + +#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) +/* + * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it. + * For details see Documentation/xtensa/atomctl.txt + */ +#if XCHAL_DCACHE_IS_COHERENT + movi a3, 0x25 /* For SMP/MX -- internal for writeback, + * RCW otherwise + */ +#else + movi a3, 0x29 /* non-MX -- Most cores use Std Memory + * Controlers which usually can't use RCW + */ +#endif + wsr a3, atomctl +#endif /* XCHAL_HAVE_S32C1I && + * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) + */ + +#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY +/* + * Have MMU v3 + */ + +#if !XCHAL_HAVE_VECBASE +# error "MMU v3 requires reloc vectors" +#endif + + movi a1, 0 + _call0 1f + _j 2f + + .align 4 +1: movi a2, 0x10000000 + movi a3, 0x18000000 + add a2, a2, a0 +9: bgeu a2, a3, 9b /* PC is out of the expected range */ + + /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */ + + movi a2, 0x40000006 + idtlb a2 + iitlb a2 + isync + + /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code + * and jump to the new mapping. + */ + + srli a3, a0, 27 + slli a3, a3, 27 + addi a3, a3, CA_BYPASS + addi a7, a2, -1 + wdtlb a3, a7 + witlb a3, a7 + isync + + slli a4, a0, 5 + srli a4, a4, 5 + addi a5, a2, -6 + add a4, a4, a5 + jx a4 + + /* Step 3: unmap everything other than current area. + * Start at 0x60000000, wrap around, and end with 0x20000000 + */ +2: movi a4, 0x20000000 + add a5, a2, a4 +3: idtlb a5 + iitlb a5 + add a5, a5, a4 + bne a5, a2, 3b + + /* Step 4: Setup MMU with the old V2 mappings. */ + movi a6, 0x01000000 + wsr a6, ITLBCFG + wsr a6, DTLBCFG + isync + + movi a5, 0xd0000005 + movi a4, CA_WRITEBACK + wdtlb a4, a5 + witlb a4, a5 + + movi a5, 0xd8000005 + movi a4, CA_BYPASS + wdtlb a4, a5 + witlb a4, a5 + + movi a5, XCHAL_KIO_CACHED_VADDR + 6 + movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK + wdtlb a4, a5 + witlb a4, a5 + + movi a5, XCHAL_KIO_BYPASS_VADDR + 6 + movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_BYPASS + wdtlb a4, a5 + witlb a4, a5 + + isync + + /* Jump to self, using MMU v2 mappings. */ + movi a4, 1f + jx a4 + +1: + movi a2, VECBASE_RESET_VADDR + wsr a2, vecbase + + /* Step 5: remove temporary mapping. */ + idtlb a7 + iitlb a7 + isync + + movi a0, 0 + wsr a0, ptevaddr + rsync + +#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && + XCHAL_HAVE_SPANNING_WAY */ + + .endm + +#endif /*__ASSEMBLY__*/ + +#endif /* _XTENSA_INITIALIZE_MMU_H */ diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index 700c2e6f2d2..74944207167 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -14,20 +14,26 @@ #ifdef __KERNEL__ #include <asm/byteorder.h> #include <asm/page.h> +#include <asm/vectors.h> #include <linux/bug.h> #include <linux/kernel.h> #include <linux/types.h> -#define XCHAL_KIO_CACHED_VADDR 0xe0000000 -#define XCHAL_KIO_BYPASS_VADDR 0xf0000000 -#define XCHAL_KIO_PADDR 0xf0000000 -#define XCHAL_KIO_SIZE 0x10000000 - #define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x)) #define IO_SPACE_LIMIT ~0 #ifdef CONFIG_MMU + +#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) +extern unsigned long xtensa_kio_paddr; + +static inline unsigned long xtensa_get_kio_paddr(void) +{ + return xtensa_kio_paddr; +} +#endif + /* * Return the virtual address for the specified bus memory. * Note that we currently don't support any address outside the KIO segment. diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h index 4c0ccc9c4f4..f71f88ea764 100644 --- a/arch/xtensa/include/asm/irq.h +++ b/arch/xtensa/include/asm/irq.h @@ -43,5 +43,14 @@ static __inline__ int irq_canonicalize(int irq) } struct irqaction; +struct irq_domain; + +void migrate_irqs(void); +int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize, + unsigned long int_irq, unsigned long ext_irq, + unsigned long *out_hwirq, unsigned int *out_type); +int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw); +unsigned xtensa_map_ext_irq(unsigned ext_irq); +unsigned xtensa_get_ext_irq_no(unsigned irq); #endif /* _XTENSA_IRQ_H */ diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h index f865b1c1eae..ea36674c6ec 100644 --- a/arch/xtensa/include/asm/irqflags.h +++ b/arch/xtensa/include/asm/irqflags.h @@ -47,7 +47,10 @@ static inline void arch_local_irq_restore(unsigned long flags) static inline bool arch_irqs_disabled_flags(unsigned long flags) { - return (flags & 0xf) != 0; +#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL +#error "XCHAL_EXCM_LEVEL and 1<<PS_EXCM_BIT must be no less than LOCKLEVEL" +#endif + return (flags & (PS_INTLEVEL_MASK | (1 << PS_EXCM_BIT))) >= LOCKLEVEL; } static inline bool arch_irqs_disabled(void) diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h deleted file mode 100644 index bf2128a99d7..00000000000 --- a/arch/xtensa/include/asm/linkage.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * include/asm-xtensa/linkage.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2005 Tensilica Inc. - */ - -#ifndef _XTENSA_LINKAGE_H -#define _XTENSA_LINKAGE_H - -/* Nothing to do here ... */ - -#endif /* _XTENSA_LINKAGE_H */ diff --git a/arch/xtensa/include/asm/mmu.h b/arch/xtensa/include/asm/mmu.h index 04890d6e233..71afe418d0e 100644 --- a/arch/xtensa/include/asm/mmu.h +++ b/arch/xtensa/include/asm/mmu.h @@ -1,22 +1,22 @@ /* - * include/asm-xtensa/mmu.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2013 Tensilica Inc. */ #ifndef _XTENSA_MMU_H #define _XTENSA_MMU_H #ifndef CONFIG_MMU -#include <asm/nommu.h> +#include <asm-generic/mmu.h> #else -/* Default "unsigned long" context */ -typedef unsigned long mm_context_t; +typedef struct { + unsigned long asid[NR_CPUS]; + unsigned int cpu; +} mm_context_t; #endif /* CONFIG_MMU */ #endif /* _XTENSA_MMU_H */ diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index feb10af9651..d33c71a8c9e 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h @@ -1,13 +1,11 @@ /* - * include/asm-xtensa/mmu_context.h - * * Switch an MMU context. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2013 Tensilica Inc. */ #ifndef _XTENSA_MMU_CONTEXT_H @@ -20,22 +18,25 @@ #include <linux/stringify.h> #include <linux/sched.h> -#include <variant/core.h> +#include <asm/vectors.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm-generic/mm_hooks.h> +#include <asm-generic/percpu.h> #if (XCHAL_HAVE_TLBS != 1) # error "Linux must have an MMU!" #endif -extern unsigned long asid_cache; +DECLARE_PER_CPU(unsigned long, asid_cache); +#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu) /* * NO_CONTEXT is the invalid ASID value that we don't ever assign to - * any user or kernel context. + * any user or kernel context. We use the reserved values in the + * ASID_INSERT macro below. * * 0 invalid * 1 kernel @@ -49,6 +50,12 @@ extern unsigned long asid_cache; #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) +#ifdef CONFIG_MMU +void init_mmu(void); +#else +static inline void init_mmu(void) { } +#endif + static inline void set_rasid_register (unsigned long val) { __asm__ __volatile__ (" wsr %0, rasid\n\t" @@ -62,64 +69,77 @@ static inline unsigned long get_rasid_register (void) return tmp; } -static inline void -__get_new_mmu_context(struct mm_struct *mm) +static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu) { - extern void flush_tlb_all(void); - if (! (++asid_cache & ASID_MASK) ) { - flush_tlb_all(); /* start new asid cycle */ - asid_cache += ASID_USER_FIRST; + unsigned long asid = cpu_asid_cache(cpu); + if ((++asid & ASID_MASK) == 0) { + /* + * Start new asid cycle; continue counting with next + * incarnation bits; skipping over 0, 1, 2, 3. + */ + local_flush_tlb_all(); + asid += ASID_USER_FIRST; } - mm->context = asid_cache; + cpu_asid_cache(cpu) = asid; + mm->context.asid[cpu] = asid; + mm->context.cpu = cpu; } -static inline void -__load_mmu_context(struct mm_struct *mm) +static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) { - set_rasid_register(ASID_INSERT(mm->context)); + /* + * Check if our ASID is of an older version and thus invalid. + */ + + if (mm) { + unsigned long asid = mm->context.asid[cpu]; + + if (asid == NO_CONTEXT || + ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK)) + get_new_mmu_context(mm, cpu); + } +} + +static inline void activate_context(struct mm_struct *mm, unsigned int cpu) +{ + get_mmu_context(mm, cpu); + set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); invalidate_page_directory(); } /* * Initialize the context related info for a new mm_struct - * instance. + * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing + * to -1 says the process has never run on any core. */ -static inline int -init_new_context(struct task_struct *tsk, struct mm_struct *mm) +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) { - mm->context = NO_CONTEXT; + int cpu; + for_each_possible_cpu(cpu) { + mm->context.asid[cpu] = NO_CONTEXT; + } + mm->context.cpu = -1; return 0; } -/* - * After we have set current->mm to a new value, this activates - * the context for the new mm so we see the new mappings. - */ -static inline void -activate_mm(struct mm_struct *prev, struct mm_struct *next) -{ - /* Unconditionally get a new ASID. */ - - __get_new_mmu_context(next); - __load_mmu_context(next); -} - - static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) + struct task_struct *tsk) { - unsigned long asid = asid_cache; - - /* Check if our ASID is of an older version and thus invalid */ - - if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK)) - __get_new_mmu_context(next); - - __load_mmu_context(next); + unsigned int cpu = smp_processor_id(); + int migrated = next->context.cpu != cpu; + /* Flush the icache if we migrated to a new core. */ + if (migrated) { + __invalidate_icache_all(); + next->context.cpu = cpu; + } + if (migrated || prev != next) + activate_context(next, cpu); } -#define deactivate_mm(tsk, mm) do { } while(0) +#define activate_mm(prev, next) switch_mm((prev), (next), NULL) +#define deactivate_mm(tsk, mm) do { } while (0) /* * Destroy context related info for an mm_struct that is about diff --git a/arch/xtensa/include/asm/mxregs.h b/arch/xtensa/include/asm/mxregs.h new file mode 100644 index 00000000000..73dcc5456f6 --- /dev/null +++ b/arch/xtensa/include/asm/mxregs.h @@ -0,0 +1,46 @@ +/* + * Xtensa MX interrupt distributor + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 - 2013 Tensilica Inc. + */ + +#ifndef _XTENSA_MXREGS_H +#define _XTENSA_MXREGS_H + +/* + * RER/WER at, as Read/write external register + * at: value + * as: address + * + * Address Value + * 00nn 0...0p..p Interrupt Routing, route IRQ n to processor p + * 01pp 0...0d..d 16 bits (d) 'ored' as single IPI to processor p + * 0180 0...0m..m Clear enable specified by mask (m) + * 0184 0...0m..m Set enable specified by mask (m) + * 0190 0...0x..x 8-bit IPI partition register + * VVVVVVVVPPPPUUUUUUUUUUUUUUUUU + * V (10-bit) Release/Version + * P ( 4-bit) Number of cores - 1 + * U (18-bit) ID + * 01a0 i.......i 32-bit ConfigID + * 0200 0...0m..m RunStall core 'n' + * 0220 c Cache coherency enabled + */ + +#define MIROUT(irq) (0x000 + (irq)) +#define MIPICAUSE(cpu) (0x100 + (cpu)) +#define MIPISET(cause) (0x140 + (cause)) +#define MIENG 0x180 +#define MIENGSET 0x184 +#define MIASG 0x188 /* Read Global Assert Register */ +#define MIASGSET 0x18c /* Set Global Addert Regiter */ +#define MIPIPART 0x190 +#define SYSCFGID 0x1a0 +#define MPSCORE 0x200 +#define CCON 0x220 + +#endif /* _XTENSA_MXREGS_H */ diff --git a/arch/xtensa/include/asm/nommu.h b/arch/xtensa/include/asm/nommu.h deleted file mode 100644 index dce2c438c5b..00000000000 --- a/arch/xtensa/include/asm/nommu.h +++ /dev/null @@ -1,3 +0,0 @@ -typedef struct { - unsigned long end_brk; -} mm_context_t; diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h index 599e7a2e729..3407cf7989b 100644 --- a/arch/xtensa/include/asm/nommu_context.h +++ b/arch/xtensa/include/asm/nommu_context.h @@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } -static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm) { return 0; } diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 7a5591a71f8..47f582333f6 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -29,19 +29,19 @@ * PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_SHIFT 12 +#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) #ifdef CONFIG_MMU -#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR -#define MAX_MEM_PFN XCHAL_KSEG_SIZE +#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR +#define MAX_MEM_PFN XCHAL_KSEG_SIZE #else -#define PAGE_OFFSET 0 -#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) +#define PAGE_OFFSET 0 +#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) #endif -#define PGTABLE_START 0x80000000 +#define PGTABLE_START 0x80000000 /* * Cache aliasing: @@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*); #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) -#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) +#define pfn_valid(pfn) \ + ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) + #ifdef CONFIG_DISCONTIGMEM # error CONFIG_DISCONTIGMEM not supported #endif diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h index 00fcbd7c534..0b68c76ec1e 100644 --- a/arch/xtensa/include/asm/pci-bridge.h +++ b/arch/xtensa/include/asm/pci-bridge.h @@ -35,7 +35,7 @@ struct pci_space { struct pci_controller { int index; /* used for pci_controller_num */ struct pci_controller *next; - struct pci_bus *bus; + struct pci_bus *bus; void *arch_data; int first_busno; diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h index 05244f07dd3..5d52dc43dfe 100644 --- a/arch/xtensa/include/asm/pci.h +++ b/arch/xtensa/include/asm/pci.h @@ -22,11 +22,6 @@ extern struct pci_controller* pcibios_alloc_controller(void); -static inline void pcibios_penalize_isa_irq(int irq) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* Assume some values. (We should revise them, if necessary) */ #define PCIBIOS_MIN_IO 0x2000 @@ -53,7 +48,7 @@ struct pci_dev; /* Map a range of PCI memory or I/O space for a device into user space */ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); + enum pci_mmap_state mmap_state, int write_combine); /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ #define HAVE_PCI_MMAP 1 diff --git a/arch/xtensa/include/asm/perf_event.h b/arch/xtensa/include/asm/perf_event.h new file mode 100644 index 00000000000..5aa4590acaa --- /dev/null +++ b/arch/xtensa/include/asm/perf_event.h @@ -0,0 +1,4 @@ +#ifndef __ASM_XTENSA_PERF_EVENT_H +#define __ASM_XTENSA_PERF_EVENT_H + +#endif /* __ASM_XTENSA_PERF_EVENT_H */ diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index 40cf9bceda2..d38eb9237e6 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h @@ -38,35 +38,46 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) free_page((unsigned long)pgd); } -/* Use a slab cache for the pte pages (see also sparc64 implementation) */ - -extern struct kmem_cache *pgtable_cache; - -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); + pte_t *ptep; + int i; + + ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + if (!ptep) + return NULL; + for (i = 0; i < 1024; i++) + pte_clear(NULL, 0, ptep + i); + return ptep; } static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) { + pte_t *pte; struct page *page; - page = virt_to_page(pte_alloc_one_kernel(mm, addr)); - pgtable_page_ctor(page); + pte = pte_alloc_one_kernel(mm, addr); + if (!pte) + return NULL; + page = virt_to_page(pte); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } return page; } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - kmem_cache_free(pgtable_cache, pte); + free_page((unsigned long)pte); } static inline void pte_free(struct mm_struct *mm, pgtable_t pte) { pgtable_page_dtor(pte); - kmem_cache_free(pgtable_cache, page_address(pte)); + __free_page(pte); } #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index b03c043ce75..4b0ca35a93b 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -5,7 +5,7 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * Copyright (C) 2001 - 2007 Tensilica Inc. + * Copyright (C) 2001 - 2013 Tensilica Inc. */ #ifndef _XTENSA_PGTABLE_H @@ -64,41 +64,82 @@ * Virtual memory area. We keep a distance to other memory regions to be * on the safe side. We also use this area for cache aliasing. */ - #define VMALLOC_START 0xC0000000 #define VMALLOC_END 0xC7FEFFFF #define TLBTEMP_BASE_1 0xC7FF0000 #define TLBTEMP_BASE_2 0xC7FF8000 /* - * Xtensa Linux config PTE layout (when present): - * 31-12: PPN - * 11-6: Software - * 5-4: RING - * 3-0: CA + * For the Xtensa architecture, the PTE layout is as follows: + * + * 31------12 11 10-9 8-6 5-4 3-2 1-0 + * +-----------------------------------------+ + * | | Software | HARDWARE | + * | PPN | ADW | RI |Attribute| + * +-----------------------------------------+ + * pte_none | MBZ | 01 | 11 | 00 | + * +-----------------------------------------+ + * present | PPN | 0 | 00 | ADW | RI | CA | wx | + * +- - - - - - - - - - - - - - - - - - - - -+ + * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 | + * +-----------------------------------------+ + * swap | index | type | 01 | 11 | 00 | + * +- - - - - - - - - - - - - - - - - - - - -+ + * file | file offset | 01 | 11 | 10 | + * +-----------------------------------------+ * - * Similar to the Alpha and MIPS ports, we need to keep track of the ref - * and mod bits in software. We have a software "you can read - * from this page" bit, and a hardware one which actually lets the - * process read from the page. On the same token we have a software - * writable bit and the real hardware one which actually lets the - * process write to the page. + * For T1050 hardware and earlier the layout differs for present and (PAGE_NONE) + * +-----------------------------------------+ + * present | PPN | 0 | 00 | ADW | RI | CA | w1 | + * +-----------------------------------------+ + * (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 01 | 00 | + * +-----------------------------------------+ * - * See further below for PTE layout for swapped-out pages. + * Legend: + * PPN Physical Page Number + * ADW software: accessed (young) / dirty / writable + * RI ring (0=privileged, 1=user, 2 and 3 are unused) + * CA cache attribute: 00 bypass, 01 writeback, 10 writethrough + * (11 is invalid and used to mark pages that are not present) + * w page is writable (hw) + * x page is executable (hw) + * index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB) + * (note that the index is always non-zero) + * type swap type (5 bits -> 32 types) + * file offset 26-bit offset into the file, in increments of PAGE_SIZE + * + * Notes: + * - (PROT_NONE) is a special case of 'present' but causes an exception for + * any access (read, write, and execute). + * - 'multihit-exception' has the highest priority of all MMU exceptions, + * so the ring must be set to 'RING_USER' even for 'non-present' pages. + * - on older hardware, the exectuable flag was not supported and + * used as a 'valid' flag, so it needs to be always set. + * - we need to keep track of certain flags in software (dirty and young) + * to do this, we use write exceptions and have a separate software w-flag. + * - attribute value 1101 (and 1111 on T1050 and earlier) is reserved */ +#define _PAGE_ATTRIB_MASK 0xf + #define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */ #define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */ -#define _PAGE_FILE (1<<1) /* non-linear mapping, if !present */ -#define _PAGE_PROTNONE (3<<0) /* special case for VM_PROT_NONE */ - -/* None of these cache modes include MP coherency: */ #define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */ #define _PAGE_CA_WB (1<<2) /* write-back */ #define _PAGE_CA_WT (2<<2) /* write-through */ #define _PAGE_CA_MASK (3<<2) -#define _PAGE_INVALID (3<<2) +#define _PAGE_CA_INVALID (3<<2) + +/* We use invalid attribute values to distinguish special pte entries */ +#if XCHAL_HW_VERSION_MAJOR < 2000 +#define _PAGE_HW_VALID 0x01 /* older HW needed this bit set */ +#define _PAGE_NONE 0x04 +#else +#define _PAGE_HW_VALID 0x00 +#define _PAGE_NONE 0x0f +#endif +#define _PAGE_FILE (1<<1) /* file mapped page, only if !present */ #define _PAGE_USER (1<<4) /* user access (ring=1) */ @@ -108,19 +149,12 @@ #define _PAGE_DIRTY (1<<7) /* software: page dirty */ #define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ -/* On older HW revisions, we always have to set bit 0 */ -#if XCHAL_HW_VERSION_MAJOR < 2000 -# define _PAGE_VALID (1<<0) -#else -# define _PAGE_VALID 0 -#endif - -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _PAGE_PRESENT (_PAGE_VALID | _PAGE_CA_WB | _PAGE_ACCESSED) - #ifdef CONFIG_MMU -#define PAGE_NONE __pgprot(_PAGE_INVALID | _PAGE_USER | _PAGE_PROTNONE) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED) + +#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER) #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER) #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER) @@ -132,9 +166,9 @@ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) #if (DCACHE_WAY_SIZE > PAGE_SIZE) -# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED) +# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS) #else -# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB) +# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB) #endif #else /* no mmu */ @@ -186,12 +220,11 @@ extern unsigned long empty_zero_page[1024]; #ifdef CONFIG_MMU extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)]; extern void paging_init(void); -extern void pgtable_cache_init(void); #else # define swapper_pg_dir NULL static inline void paging_init(void) { } -static inline void pgtable_cache_init(void) { } #endif +static inline void pgtable_cache_init(void) { } /* * The pmd contains the kernel virtual address of the pte page. @@ -202,12 +235,16 @@ static inline void pgtable_cache_init(void) { } /* * pte status. */ -#define pte_none(pte) (pte_val(pte) == _PAGE_INVALID) -#define pte_present(pte) \ - (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_INVALID) \ - || ((pte_val(pte) & _PAGE_PROTNONE) == _PAGE_PROTNONE)) +# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER)) +#if XCHAL_HW_VERSION_MAJOR < 2000 +# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) +#else +# define pte_present(pte) \ + (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \ + || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE)) +#endif #define pte_clear(mm,addr,ptep) \ - do { update_pte(ptep, __pte(_PAGE_INVALID)); } while(0) + do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) @@ -273,6 +310,10 @@ set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) update_pte(ptep, pteval); } +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + update_pte(ptep, pteval); +} static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) @@ -284,7 +325,7 @@ struct vm_area_struct; static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, - pte_t *ptep) + pte_t *ptep) { pte_t pte = *ptep; if (!pte_young(pte)) @@ -304,8 +345,8 @@ ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_t pte = *ptep; - update_pte(ptep, pte_wrprotect(pte)); + pte_t pte = *ptep; + update_pte(ptep, pte_wrprotect(pte)); } /* to find an entry in a kernel page-table-directory */ @@ -328,35 +369,23 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) /* - * Encode and decode a swap entry. - * - * Format of swap pte: - * bit 0 MBZ - * bit 1 page-file (must be zero) - * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID) - * bits 4 - 5 ring protection (must be 01: _PAGE_USER) - * bits 6 - 10 swap type (5 bits -> 32 types) - * bits 11 - 31 swap offset / PAGE_SIZE (21 bits -> 8GB) - - * Format of file pte: - * bit 0 MBZ - * bit 1 page-file (must be one: _PAGE_FILE) - * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID) - * bits 4 - 5 ring protection (must be 01: _PAGE_USER) - * bits 6 - 31 file offset / PAGE_SIZE + * Encode and decode a swap and file entry. */ +#define SWP_TYPE_BITS 5 +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) #define __swp_type(entry) (((entry).val >> 6) & 0x1f) #define __swp_offset(entry) ((entry).val >> 11) #define __swp_entry(type,offs) \ - ((swp_entry_t) {((type) << 6) | ((offs) << 11) | _PAGE_INVALID}) + ((swp_entry_t){((type) << 6) | ((offs) << 11) | \ + _PAGE_CA_INVALID | _PAGE_USER}) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#define PTE_FILE_MAX_BITS 28 -#define pte_to_pgoff(pte) (pte_val(pte) >> 4) +#define PTE_FILE_MAX_BITS 26 +#define pte_to_pgoff(pte) (pte_val(pte) >> 6) #define pgoff_to_pte(off) \ - ((pte_t) { ((off) << 4) | _PAGE_INVALID | _PAGE_FILE }) + ((pte_t) { ((off) << 6) | _PAGE_CA_INVALID | _PAGE_FILE | _PAGE_USER }) #endif /* !defined (__ASSEMBLY__) */ @@ -393,14 +422,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) extern void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep); -/* - * remap a physical page `pfn' of size `size' with page protection `prot' - * into virtual address `from' - */ - -#define io_remap_pfn_range(vma,from,pfn,size,prot) \ - remap_pfn_range(vma, from, pfn, size, prot) - typedef pte_t *pte_addr_t; #endif /* !defined (__ASSEMBLY__) */ @@ -410,6 +431,10 @@ typedef pte_t *pte_addr_t; #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME +/* We provide our own get_unmapped_area to cope with + * SHM area cache aliasing for userland. + */ +#define HAVE_ARCH_UNMAPPED_AREA #include <asm-generic/pgtable.h> diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h index 7d936e58e9b..32e98f27ce9 100644 --- a/arch/xtensa/include/asm/platform.h +++ b/arch/xtensa/include/asm/platform.h @@ -30,11 +30,6 @@ extern void platform_init(bp_tag_t*); extern void platform_setup (char **); /* - * platform_init_irq is called from init_IRQ. - */ -extern void platform_init_irq (void); - -/* * platform_restart is called to restart the system. */ extern void platform_restart (void); @@ -75,4 +70,3 @@ extern int platform_pcibios_fixup (void); extern void platform_calibrate_ccount (void); #endif /* _XTENSA_PLATFORM_H */ - diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 2d630e7399c..abb59708a3b 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -5,7 +5,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2008 Tensilica Inc. */ #ifndef _XTENSA_PROCESSOR_H @@ -68,7 +68,7 @@ /* LOCKLEVEL defines the interrupt level that masks all * general-purpose interrupts. */ -#define LOCKLEVEL 1 +#define LOCKLEVEL XCHAL_EXCM_LEVEL /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE * registers @@ -89,7 +89,7 @@ #define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) typedef struct { - unsigned long seg; + unsigned long seg; } mm_segment_t; struct thread_struct { @@ -145,10 +145,10 @@ struct thread_struct { * set_thread_state in signal.c depends on it. */ #define USER_PS_VALUE ((1 << PS_WOE_BIT) | \ - (1 << PS_CALLINC_SHIFT) | \ - (USER_RING << PS_RING_SHIFT) | \ - (1 << PS_UM_BIT) | \ - (1 << PS_EXCM_BIT)) + (1 << PS_CALLINC_SHIFT) | \ + (USER_RING << PS_RING_SHIFT) | \ + (1 << PS_UM_BIT) | \ + (1 << PS_EXCM_BIT)) /* Clearing a0 terminates the backtrace. */ #define start_thread(regs, new_pc, new_sp) \ @@ -191,5 +191,25 @@ extern unsigned long get_wchan(struct task_struct *p); #define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);}) #define get_sr(sr) ({unsigned int v; RSR(v,sr); v; }) +#ifndef XCHAL_HAVE_EXTERN_REGS +#define XCHAL_HAVE_EXTERN_REGS 0 +#endif + +#if XCHAL_HAVE_EXTERN_REGS + +static inline void set_er(unsigned long value, unsigned long addr) +{ + asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory"); +} + +static inline unsigned long get_er(unsigned long addr) +{ + register unsigned long value; + asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory"); + return value; +} + +#endif /* XCHAL_HAVE_EXTERN_REGS */ + #endif /* __ASSEMBLY__ */ #endif /* _XTENSA_PROCESSOR_H */ diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index da21c17f23a..598e752dcbc 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h @@ -37,7 +37,8 @@ struct pt_regs { unsigned long windowstart; /* 52 */ unsigned long syscall; /* 56 */ unsigned long icountlevel; /* 60 */ - int reserved[1]; /* 64 */ + unsigned long scompare1; /* 64 */ + unsigned long threadptr; /* 68 */ /* Additional configurable registers that are used by the compiler. */ xtregs_opt_t xtregs_opt; @@ -48,21 +49,31 @@ struct pt_regs { /* current register frame. * Note: The ESF for kernel exceptions ends after 16 registers! */ - unsigned long areg[16]; /* 128 (64) */ + unsigned long areg[16]; }; #include <variant/core.h> # define arch_has_single_step() (1) # define task_pt_regs(tsk) ((struct pt_regs*) \ - (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) + (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) # define user_mode(regs) (((regs)->ps & 0x00000020)!=0) # define instruction_pointer(regs) ((regs)->pc) +# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \ + (regs)->areg[1])) # ifndef CONFIG_SMP # define profile_pc(regs) instruction_pointer(regs) +# else +# define profile_pc(regs) \ + ({ \ + in_lock_functions(instruction_pointer(regs)) ? \ + return_pointer(regs) : instruction_pointer(regs); \ + }) # endif +#define user_stack_pointer(regs) ((regs)->areg[1]) + #else /* __ASSEMBLY__ */ # include <asm/asm-offsets.h> diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h index 8a8aa61ccc8..4ba9f516b0e 100644 --- a/arch/xtensa/include/asm/regs.h +++ b/arch/xtensa/include/asm/regs.h @@ -52,6 +52,10 @@ #define EXCCAUSE_SPECULATION 7 #define EXCCAUSE_PRIVILEGED 8 #define EXCCAUSE_UNALIGNED 9 +#define EXCCAUSE_INSTR_DATA_ERROR 12 +#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13 +#define EXCCAUSE_INSTR_ADDR_ERROR 14 +#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15 #define EXCCAUSE_ITLB_MISS 16 #define EXCCAUSE_ITLB_MULTIHIT 17 #define EXCCAUSE_ITLB_PRIVILEGE 18 @@ -78,12 +82,14 @@ #define PS_CALLINC_SHIFT 16 #define PS_CALLINC_MASK 0x00030000 #define PS_OWB_SHIFT 8 +#define PS_OWB_WIDTH 4 #define PS_OWB_MASK 0x00000F00 #define PS_RING_SHIFT 6 #define PS_RING_MASK 0x000000C0 #define PS_UM_BIT 5 #define PS_EXCM_BIT 4 #define PS_INTLEVEL_SHIFT 0 +#define PS_INTLEVEL_WIDTH 4 #define PS_INTLEVEL_MASK 0x0000000F /* DBREAKCn register fields. */ @@ -105,4 +111,3 @@ #define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */ #endif /* _XTENSA_SPECREG_H */ - diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h index 72fd44c85b7..de169b4eaee 100644 --- a/arch/xtensa/include/asm/signal.h +++ b/arch/xtensa/include/asm/signal.h @@ -15,19 +15,9 @@ #include <uapi/asm/signal.h> #ifndef __ASSEMBLY__ -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - void (*sa_restorer)(void); - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -}; +#define __ARCH_HAS_SA_RESTORER #include <asm/sigcontext.h> -#define ptrace_signal_deliver(regs, cookie) do { } while (0) #endif /* __ASSEMBLY__ */ #endif /* _XTENSA_SIGNAL_H */ diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h index 83c569e3bdb..4e43f564389 100644 --- a/arch/xtensa/include/asm/smp.h +++ b/arch/xtensa/include/asm/smp.h @@ -1,27 +1,43 @@ /* - * include/asm-xtensa/smp.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2013 Tensilica Inc. */ #ifndef _XTENSA_SMP_H #define _XTENSA_SMP_H -extern struct xtensa_cpuinfo boot_cpu_data; +#ifdef CONFIG_SMP -#define cpu_data (&boot_cpu_data) -#define current_cpu_data boot_cpu_data +#define raw_smp_processor_id() (current_thread_info()->cpu) +#define cpu_logical_map(cpu) (cpu) -struct xtensa_cpuinfo { - unsigned long *pgd_cache; - unsigned long *pte_cache; - unsigned long pgtable_cache_sz; +struct start_info { + unsigned long stack; }; +extern struct start_info start_info; -#define cpu_logical_map(cpu) (cpu) +struct cpumask; +void arch_send_call_function_ipi_mask(const struct cpumask *mask); +void arch_send_call_function_single_ipi(int cpu); + +void smp_init_cpus(void); +void secondary_init_irq(void); +void ipi_init(void); +struct seq_file; +void show_ipi_list(struct seq_file *p, int prec); + +#ifdef CONFIG_HOTPLUG_CPU + +void __cpu_die(unsigned int cpu); +int __cpu_disable(void); +void cpu_die(void); +void cpu_restart(void); + +#endif /* CONFIG_HOTPLUG_CPU */ + +#endif /* CONFIG_SMP */ #endif /* _XTENSA_SMP_H */ diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index 8ff23649581..1d95fa5dcd1 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h @@ -11,6 +11,195 @@ #ifndef _XTENSA_SPINLOCK_H #define _XTENSA_SPINLOCK_H -#include <linux/spinlock.h> +/* + * spinlock + * + * There is at most one owner of a spinlock. There are not different + * types of spinlock owners like there are for rwlocks (see below). + * + * When trying to obtain a spinlock, the function "spins" forever, or busy- + * waits, until the lock is obtained. When spinning, presumably some other + * owner will soon give up the spinlock making it available to others. Use + * the trylock functions to avoid spinning forever. + * + * possible values: + * + * 0 nobody owns the spinlock + * 1 somebody owns the spinlock + */ + +#define arch_spin_is_locked(x) ((x)->slock != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) + +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + "1: movi %0, 1\n" + " s32c1i %0, %1, 0\n" + " bnez %0, 1b\n" + : "=&a" (tmp) + : "a" (&lock->slock) + : "memory"); +} + +/* Returns 1 if the lock is obtained, 0 otherwise. */ + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + " movi %0, 1\n" + " s32c1i %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&lock->slock) + : "memory"); + + return tmp == 0 ? 1 : 0; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " s32ri %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&lock->slock) + : "memory"); +} + +/* + * rwlock + * + * Read-write locks are really a more flexible spinlock. They allow + * multiple readers but only one writer. Write ownership is exclusive + * (i.e., all other readers and writers are blocked from ownership while + * there is a write owner). These rwlocks are unfair to writers. Writers + * can be starved for an indefinite time by readers. + * + * possible values: + * + * 0 nobody owns the rwlock + * >0 one or more readers own the rwlock + * (the positive value is the actual number of readers) + * 0x80000000 one writer owns the rwlock, no other writers, no readers + */ + +#define arch_write_can_lock(x) ((x)->lock == 0) + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + "1: movi %0, 1\n" + " slli %0, %0, 31\n" + " s32c1i %0, %1, 0\n" + " bnez %0, 1b\n" + : "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); +} + +/* Returns 1 if the lock is obtained, 0 otherwise. */ + +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + " movi %0, 1\n" + " slli %0, %0, 31\n" + " s32c1i %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); + + return tmp == 0 ? 1 : 0; +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " s32ri %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); +} + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned long tmp; + unsigned long result; + + __asm__ __volatile__( + "1: l32i %1, %2, 0\n" + " bltz %1, 1b\n" + " wsr %1, scompare1\n" + " addi %0, %1, 1\n" + " s32c1i %0, %2, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); +} + +/* Returns 1 if the lock is obtained, 0 otherwise. */ + +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned long result; + unsigned long tmp; + + __asm__ __volatile__( + " l32i %1, %2, 0\n" + " addi %0, %1, 1\n" + " bltz %0, 1f\n" + " wsr %1, scompare1\n" + " s32c1i %0, %2, 0\n" + " sub %0, %0, %1\n" + "1:\n" + : "=&a" (result), "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); + + return result == 0; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned long tmp1, tmp2; + + __asm__ __volatile__( + "1: l32i %1, %2, 0\n" + " addi %0, %1, -1\n" + " wsr %1, scompare1\n" + " s32c1i %0, %2, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp1), "=&a" (tmp2) + : "a" (&rw->lock) + : "memory"); +} + +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #endif /* _XTENSA_SPINLOCK_H */ diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h new file mode 100644 index 00000000000..7ec5ce10c9e --- /dev/null +++ b/arch/xtensa/include/asm/spinlock_types.h @@ -0,0 +1,20 @@ +#ifndef __ASM_SPINLOCK_TYPES_H +#define __ASM_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { + volatile unsigned int slock; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { 0 } + +#endif diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h new file mode 100644 index 00000000000..6a05fcb0a20 --- /dev/null +++ b/arch/xtensa/include/asm/stacktrace.h @@ -0,0 +1,36 @@ +/* + * arch/xtensa/include/asm/stacktrace.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2013 Tensilica Inc. + */ +#ifndef _XTENSA_STACKTRACE_H +#define _XTENSA_STACKTRACE_H + +#include <linux/sched.h> + +struct stackframe { + unsigned long pc; + unsigned long sp; +}; + +static __always_inline unsigned long *stack_pointer(struct task_struct *task) +{ + unsigned long *sp; + + if (!task || task == current) + __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp)); + else + sp = (unsigned long *)task->thread.sp; + + return sp; +} + +void walk_stackframe(unsigned long *sp, + int (*fn)(struct stackframe *frame, void *data), + void *data); + +#endif /* _XTENSA_STACKTRACE_H */ diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h index 405a8c49ff2..8d5d9dfadb0 100644 --- a/arch/xtensa/include/asm/string.h +++ b/arch/xtensa/include/asm/string.h @@ -74,7 +74,7 @@ static inline int strcmp(const char *__cs, const char *__ct) "beqz %2, 2f\n\t" "beq %2, %3, 1b\n" "2:\n\t" - "sub %2, %3, %2" + "sub %2, %2, %3" : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) : "0" (__cs), "1" (__ct)); @@ -99,7 +99,7 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n) "beqz %3, 2f\n\t" "beq %2, %3, 1b\n" "2:\n\t" - "sub %2, %3, %2" + "sub %2, %2, %3" : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy) : "0" (__cs), "1" (__ct), "r" (__cs+__n)); diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index 124aeee0d38..3673ff1f1bc 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -9,17 +9,9 @@ */ struct pt_regs; -struct sigaction; -asmlinkage long sys_execve(char*, char**, char**, struct pt_regs*); -asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*); asmlinkage long xtensa_ptrace(long, long, long, long); asmlinkage long xtensa_sigreturn(struct pt_regs*); asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); -asmlinkage long xtensa_sigaltstack(struct pt_regs *regs); -asmlinkage long sys_rt_sigaction(int, - const struct sigaction __user *, - struct sigaction __user *, - size_t); asmlinkage long xtensa_shmat(int, char __user *, int); asmlinkage long xtensa_fadvise64_64(int, int, unsigned long long, unsigned long long); @@ -27,9 +19,9 @@ asmlinkage long xtensa_fadvise64_64(int, int, /* Should probably move to linux/syscalls.h */ struct pollfd; asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, struct timespec __user *tsp, void __user *sig); + fd_set __user *exp, struct timespec __user *tsp, + void __user *sig); asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, - struct timespec __user *tsp, const sigset_t __user *sigmask, - size_t sigsetsize); -asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, - size_t sigsetsize); + struct timespec __user *tsp, + const sigset_t __user *sigmask, + size_t sigsetsize); diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h new file mode 100644 index 00000000000..c015c5c8e3f --- /dev/null +++ b/arch/xtensa/include/asm/sysmem.h @@ -0,0 +1,38 @@ +/* + * sysmem-related prototypes. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2014 Cadence Design Systems Inc. + */ + +#ifndef _XTENSA_SYSMEM_H +#define _XTENSA_SYSMEM_H + +#define SYSMEM_BANKS_MAX 31 + +struct meminfo { + unsigned long start; + unsigned long end; +}; + +/* + * Bank array is sorted by .start. + * Banks don't overlap and there's at least one page gap + * between adjacent bank entries. + */ +struct sysmem_info { + int nr_banks; + struct meminfo bank[SYSMEM_BANKS_MAX]; +}; + +extern struct sysmem_info sysmem; + +int add_sysmem_bank(unsigned long start, unsigned long end); +int mem_reserve(unsigned long, unsigned long, int); +void bootmem_init(void); +void zones_init(void); + +#endif /* _XTENSA_SYSMEM_H */ diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 9481004ac11..470153e8547 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -76,8 +76,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - /* * macros/functions for gaining access to the thread information structure */ diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h index 175b3d5e1b0..ca929e6a38b 100644 --- a/arch/xtensa/include/asm/timex.h +++ b/arch/xtensa/include/asm/timex.h @@ -1,63 +1,43 @@ /* - * include/asm-xtensa/timex.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2013 Tensilica Inc. */ #ifndef _XTENSA_TIMEX_H #define _XTENSA_TIMEX_H -#ifdef __KERNEL__ - #include <asm/processor.h> #include <linux/stringify.h> #define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL #define INTLEVEL(x) _INTLEVEL(x) -#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) == 1 +#if XCHAL_NUM_TIMERS > 0 && \ + INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL # define LINUX_TIMER 0 # define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT -#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) == 1 +#elif XCHAL_NUM_TIMERS > 1 && \ + INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL # define LINUX_TIMER 1 # define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT -#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) == 1 +#elif XCHAL_NUM_TIMERS > 2 && \ + INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL # define LINUX_TIMER 2 # define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT #else # error "Bad timer number for Linux configurations!" #endif -#define LINUX_TIMER_MASK (1L << LINUX_TIMER_INT) - -#define CLOCK_TICK_RATE 1193180 /* (everyone is using this value) */ -#define CLOCK_TICK_FACTOR 20 /* Factor of both 10^6 and CLOCK_TICK_RATE */ - -#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT -extern unsigned long ccount_per_jiffy; -extern unsigned long nsec_per_ccount; -#define CCOUNT_PER_JIFFY ccount_per_jiffy -#define NSEC_PER_CCOUNT nsec_per_ccount -#else -#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ)) -#define NSEC_PER_CCOUNT (1000UL / CONFIG_XTENSA_CPU_CLOCK) -#endif - +extern unsigned long ccount_freq; typedef unsigned long long cycles_t; -/* - * Only used for SMP. - */ - -extern cycles_t cacheflush_time; - #define get_cycles() (0) +void local_timer_setup(unsigned cpu); /* * Register access. @@ -92,5 +72,4 @@ static inline void set_linux_timer (unsigned long ccompare) WSR_CCOMPARE(LINUX_TIMER, ccompare); } -#endif /* __KERNEL__ */ #endif /* _XTENSA_TIMEX_H */ diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h index 43dd348a5a4..06875feb27c 100644 --- a/arch/xtensa/include/asm/tlbflush.h +++ b/arch/xtensa/include/asm/tlbflush.h @@ -1,18 +1,14 @@ /* - * include/asm-xtensa/tlbflush.h - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2013 Tensilica Inc. */ #ifndef _XTENSA_TLBFLUSH_H #define _XTENSA_TLBFLUSH_H -#ifdef __KERNEL__ - #include <linux/stringify.h> #include <asm/processor.h> @@ -34,12 +30,34 @@ * - flush_tlb_range(mm, start, end) flushes a range of pages */ -extern void flush_tlb_all(void); -extern void flush_tlb_mm(struct mm_struct*); -extern void flush_tlb_page(struct vm_area_struct*,unsigned long); -extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long); +void local_flush_tlb_all(void); +void local_flush_tlb_mm(struct mm_struct *mm); +void local_flush_tlb_page(struct vm_area_struct *vma, + unsigned long page); +void local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); + +#ifdef CONFIG_SMP + +void flush_tlb_all(void); +void flush_tlb_mm(struct mm_struct *); +void flush_tlb_page(struct vm_area_struct *, unsigned long); +void flush_tlb_range(struct vm_area_struct *, unsigned long, + unsigned long); +void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +#else /* !CONFIG_SMP */ + +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) +#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \ + end) +#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \ + end) -#define flush_tlb_kernel_range(start,end) flush_tlb_all() +#endif /* CONFIG_SMP */ /* TLB operations. */ @@ -187,5 +205,4 @@ static inline unsigned long read_itlb_translation (int way) } #endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ #endif /* _XTENSA_TLBFLUSH_H */ diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h new file mode 100644 index 00000000000..677bfcf4ee5 --- /dev/null +++ b/arch/xtensa/include/asm/traps.h @@ -0,0 +1,59 @@ +/* + * arch/xtensa/include/asm/traps.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 Tensilica Inc. + */ +#ifndef _XTENSA_TRAPS_H +#define _XTENSA_TRAPS_H + +#include <asm/ptrace.h> + +/* + * handler must be either of the following: + * void (*)(struct pt_regs *regs); + * void (*)(struct pt_regs *regs, unsigned long exccause); + */ +extern void * __init trap_set_handler(int cause, void *handler); +extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); +void secondary_trap_init(void); + +static inline void spill_registers(void) +{ +#if XCHAL_NUM_AREGS > 16 + __asm__ __volatile__ ( + " call12 1f\n" + " _j 2f\n" + " retw\n" + " .align 4\n" + "1:\n" + " _entry a1, 48\n" + " addi a12, a0, 3\n" +#if XCHAL_NUM_AREGS > 32 + " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n" + " _entry a1, 48\n" + " mov a12, a0\n" + " .endr\n" +#endif + " _entry a1, 48\n" +#if XCHAL_NUM_AREGS % 12 == 0 + " mov a8, a8\n" +#elif XCHAL_NUM_AREGS % 12 == 4 + " mov a12, a12\n" +#elif XCHAL_NUM_AREGS % 12 == 8 + " mov a4, a4\n" +#endif + " retw\n" + "2:\n" + : : : "a12", "a13", "memory"); +#else + __asm__ __volatile__ ( + " mov a12, a12\n" + : : : "memory"); +#endif +} + +#endif /* _XTENSA_TRAPS_H */ diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 6e4bb3b791a..fd686dc45d1 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -180,7 +180,8 @@ #define segment_eq(a,b) ((a).seg == (b).seg) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) -#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) +#define __user_ok(addr,size) \ + (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) @@ -234,10 +235,10 @@ do { \ int __cb; \ retval = 0; \ switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ - case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ - case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ - case 8: { \ + case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ + case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ + case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ + case 8: { \ __typeof__(*ptr) __v64 = x; \ retval = __copy_to_user(ptr,&__v64,8); \ break; \ @@ -291,7 +292,7 @@ do { \ * __check_align_* macros still work. */ #define __put_user_asm(x, addr, err, align, insn, cb) \ - __asm__ __volatile__( \ +__asm__ __volatile__( \ __check_align_##align \ "1: "insn" %2, %3, 0 \n" \ "2: \n" \ @@ -301,8 +302,8 @@ do { \ " .long 2b \n" \ "5: \n" \ " l32r %1, 4b \n" \ - " movi %0, %4 \n" \ - " jx %1 \n" \ + " movi %0, %4 \n" \ + " jx %1 \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " .long 1b, 5b \n" \ @@ -334,13 +335,13 @@ extern long __get_user_bad(void); do { \ int __cb; \ retval = 0; \ - switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ - case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ - case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ - case 8: retval = __copy_from_user(&x,ptr,8); break; \ - default: (x) = __get_user_bad(); \ - } \ + switch (size) { \ + case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ + case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ + case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ + case 8: retval = __copy_from_user(&x,ptr,8); break; \ + default: (x) = __get_user_bad(); \ + } \ } while (0) @@ -349,7 +350,7 @@ do { \ * __check_align_* macros still work. */ #define __get_user_asm(x, addr, err, align, insn, cb) \ - __asm__ __volatile__( \ +__asm__ __volatile__( \ __check_align_##align \ "1: "insn" %2, %3, 0 \n" \ "2: \n" \ @@ -360,8 +361,8 @@ do { \ "5: \n" \ " l32r %1, 4b \n" \ " movi %2, 0 \n" \ - " movi %0, %4 \n" \ - " jx %1 \n" \ + " movi %0, %4 \n" \ + " jx %1 \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " .long 1b, 5b \n" \ @@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) -#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) -#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) +#define __copy_to_user(to,from,n) \ + __generic_copy_to_user_nocheck((to),(from),(n)) +#define __copy_from_user(to,from,n) \ + __generic_copy_from_user_nocheck((to),(from),(n)) #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h index f4e6eaa40d1..cb4c2ce8d44 100644 --- a/arch/xtensa/include/asm/unistd.h +++ b/arch/xtensa/include/asm/unistd.h @@ -1,22 +1,12 @@ #ifndef _XTENSA_UNISTD_H #define _XTENSA_UNISTD_H -#define __ARCH_WANT_SYS_EXECVE +#define __ARCH_WANT_SYS_CLONE #include <uapi/asm/unistd.h> -/* - * "Conditional" syscalls - * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), - * but it doesn't work on all toolchains, so we just do it by hand - */ -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"); - #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_LLSEEK -#define __ARCH_WANT_SYS_RT_SIGACTION -#define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_GETPGRP /* diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h new file mode 100644 index 00000000000..f74ddfbb92e --- /dev/null +++ b/arch/xtensa/include/asm/vectors.h @@ -0,0 +1,130 @@ +/* + * arch/xtensa/include/asm/xchal_vaddr_remap.h + * + * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual + * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU + * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000). + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 - 2012 Tensilica Inc. + * + * Pete Delaney <piet@tensilica.com> + * Marc Gauthier <marc@tensilica.com + */ + +#ifndef _XTENSA_VECTORS_H +#define _XTENSA_VECTORS_H + +#include <variant/core.h> + +#define XCHAL_KIO_CACHED_VADDR 0xe0000000 +#define XCHAL_KIO_BYPASS_VADDR 0xf0000000 +#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 +#define XCHAL_KIO_SIZE 0x10000000 + +#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) +#define XCHAL_KIO_PADDR xtensa_get_kio_paddr() +#else +#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR +#endif + +#if defined(CONFIG_MMU) + +/* Will Become VECBASE */ +#define VIRTUAL_MEMORY_ADDRESS 0xD0000000 + +/* Image Virtual Start Address */ +#define KERNELOFFSET 0xD0003000 + +#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY + /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ + #define LOAD_MEMORY_ADDRESS 0x00003000 +#else + /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ + #define LOAD_MEMORY_ADDRESS 0xD0003000 +#endif + +#else /* !defined(CONFIG_MMU) */ + /* MMU Not being used - Virtual == Physical */ + + /* VECBASE */ + #define VIRTUAL_MEMORY_ADDRESS 0x00002000 + + /* Location of the start of the kernel text, _start */ + #define KERNELOFFSET 0x00003000 + + /* Loaded just above possibly live vectors */ + #define LOAD_MEMORY_ADDRESS 0x00003000 + +#endif /* CONFIG_MMU */ + +#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) + +/* Used to set VECBASE register */ +#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS + +#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \ + VECBASE_RESET_VADDR) +#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS) + +#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \ + VECBASE_RESET_VADDR) +#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS) + +#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE + +#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) +#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS) +#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS) +#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS) +#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS) +#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS) +#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS) +#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS) +#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS) + +#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS) + +#define NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS) + +#define INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS) + +/* + * These XCHAL_* #defines from varian/core.h + * are not valid to use with V3 MMU. Non-XCHAL + * constants are defined above and should be used. + */ +#undef XCHAL_VECBASE_RESET_VADDR +#undef XCHAL_RESET_VECTOR0_VADDR +#undef XCHAL_USER_VECTOR_VADDR +#undef XCHAL_KERNEL_VECTOR_VADDR +#undef XCHAL_DOUBLEEXC_VECTOR_VADDR +#undef XCHAL_WINDOW_VECTORS_VADDR +#undef XCHAL_INTLEVEL2_VECTOR_VADDR +#undef XCHAL_INTLEVEL3_VECTOR_VADDR +#undef XCHAL_INTLEVEL4_VECTOR_VADDR +#undef XCHAL_INTLEVEL5_VECTOR_VADDR +#undef XCHAL_INTLEVEL6_VECTOR_VADDR +#undef XCHAL_DEBUG_VECTOR_VADDR +#undef XCHAL_NMI_VECTOR_VADDR +#undef XCHAL_INTLEVEL7_VECTOR_VADDR + +#else + +#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR +#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR +#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR +#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR +#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR +#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR +#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR +#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR +#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR +#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR + +#endif + +#endif /* _XTENSA_VECTORS_H */ diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h index 2aa4cd9f0ce..b4cb1100c0f 100644 --- a/arch/xtensa/include/uapi/asm/ioctls.h +++ b/arch/xtensa/include/uapi/asm/ioctls.h @@ -101,6 +101,9 @@ #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ #define TIOCVHANGUP _IO('T', 0x37) +#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ +#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ +#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCSERCONFIG _IO('T', 83) #define TIOCSERGWILD _IOR('T', 84, int) diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index 25bc6c1309c..00eed6786d7 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -93,4 +93,15 @@ /* compatibility flags */ #define MAP_FILE 0 +/* + * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size. + * This gives us 6 bits, which is enough until someone invents 128 bit address + * spaces. + * + * Assume these are all power of twos. + * When 0 use the default page size. + */ +#define MAP_HUGE_SHIFT 26 +#define MAP_HUGE_MASK 0x3f + #endif /* _XTENSA_MMAN_H */ diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h index b88ce96f2af..586756ee267 100644 --- a/arch/xtensa/include/uapi/asm/signal.h +++ b/arch/xtensa/include/uapi/asm/signal.h @@ -97,27 +97,12 @@ typedef struct { #define SA_RESTORER 0x04000000 -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 #ifndef __ASSEMBLY__ -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -/* Type of a signal handler. */ -typedef void (*__sighandler_t)(int); - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include <asm-generic/signal-defs.h> #ifndef __KERNEL__ diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index e36c6818492..39acec0cf0b 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -32,7 +32,7 @@ #define SO_PRIORITY 12 #define SO_LINGER 13 #define SO_BSDCOMPAT 14 -/* To add :#define SO_REUSEPORT 15 */ +#define SO_REUSEPORT 15 #define SO_PASSCRED 16 #define SO_PEERCRED 17 #define SO_RCVLOWAT 18 @@ -52,6 +52,7 @@ #define SO_ATTACH_FILTER 26 #define SO_DETACH_FILTER 27 +#define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 28 #define SO_TIMESTAMP 29 @@ -80,4 +81,14 @@ /* Instruct lower device to use last 4-bytes of skb data as FCS */ #define SO_NOFCS 43 +#define SO_LOCK_FILTER 44 + +#define SO_SELECT_ERR_QUEUE 45 + +#define SO_BUSY_POLL 46 + +#define SO_MAX_PACING_RATE 47 + +#define SO_BPF_EXTENSIONS 48 + #endif /* _XTENSA_SOCKET_H */ diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index 9f36d0e3e0a..b9395529f02 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -260,7 +260,7 @@ __SYSCALL(115, sys_sendmmsg, 4) /* Process Operations */ #define __NR_clone 116 -__SYSCALL(116, xtensa_clone, 5) +__SYSCALL(116, sys_clone, 5) #define __NR_execve 117 __SYSCALL(117, sys_execve, 3) #define __NR_exit 118 @@ -483,7 +483,7 @@ __SYSCALL(222, sys_ni_syscall, 0) #define __NR_restart_syscall 223 __SYSCALL(223, sys_restart_syscall, 0) #define __NR_sigaltstack 224 -__SYSCALL(224, xtensa_sigaltstack, 2) +__SYSCALL(224, sys_sigaltstack, 2) #define __NR_rt_sigreturn 225 __SYSCALL(225, xtensa_rt_sigreturn, 1) #define __NR_rt_sigaction 226 @@ -728,8 +728,18 @@ __SYSCALL(330, sys_prlimit64, 4) #define __NR_kcmp 331 __SYSCALL(331, sys_kcmp, 5) +#define __NR_finit_module 332 +__SYSCALL(332, sys_finit_module, 3) -#define __NR_syscall_count 332 +#define __NR_accept4 333 +__SYSCALL(333, sys_accept4, 4) + +#define __NR_sched_setattr 334 +__SYSCALL(334, sys_sched_setattr, 2) +#define __NR_sched_getattr 335 +__SYSCALL(335, sys_sched_getattr, 3) + +#define __NR_syscall_count 336 /* * sysxtensa syscall handler |
