diff options
Diffstat (limited to 'arch/mips/include')
74 files changed, 1470 insertions, 1789 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index dd75d673447..519197ede08 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -137,7 +137,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) { int result; - smp_llsc_mb(); + smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { int temp; @@ -189,7 +189,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) { int result; - smp_llsc_mb(); + smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { int temp; @@ -249,7 +249,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) { int result; - smp_llsc_mb(); + smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { int temp; @@ -516,7 +516,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) { long result; - smp_llsc_mb(); + smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { long temp; @@ -568,7 +568,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { long result; - smp_llsc_mb(); + smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { long temp; @@ -628,7 +628,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) { long result; - smp_llsc_mb(); + smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { long temp; @@ -788,9 +788,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) * atomic*_return operations are serializing but not the non-*_return * versions. */ -#define smp_mb__before_atomic_dec() smp_llsc_mb() +#define smp_mb__before_atomic_dec() smp_mb__before_llsc() #define smp_mb__after_atomic_dec() smp_llsc_mb() -#define smp_mb__before_atomic_inc() smp_llsc_mb() +#define smp_mb__before_atomic_inc() smp_mb__before_llsc() #define smp_mb__after_atomic_inc() smp_llsc_mb() #include <asm-generic/atomic-long.h> diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 8e9ac313ca3..c0884f02d3a 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -88,12 +88,20 @@ : /* no output */ \ : "m" (*(int *)CKSEG1) \ : "memory") - -#define fast_wmb() __sync() -#define fast_rmb() __sync() -#define fast_mb() __sync() -#ifdef CONFIG_SGI_IP28 -#define fast_iob() \ +#ifdef CONFIG_CPU_CAVIUM_OCTEON +# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n" +# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory") + +# define fast_wmb() __syncw() +# define fast_rmb() barrier() +# define fast_mb() __sync() +# define fast_iob() do { } while (0) +#else /* ! CONFIG_CPU_CAVIUM_OCTEON */ +# define fast_wmb() __sync() +# define fast_rmb() __sync() +# define fast_mb() __sync() +# ifdef CONFIG_SGI_IP28 +# define fast_iob() \ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ @@ -104,13 +112,14 @@ : /* no output */ \ : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \ : "memory") -#else -#define fast_iob() \ +# else +# define fast_iob() \ do { \ __sync(); \ __fast_iob(); \ } while (0) -#endif +# endif +#endif /* CONFIG_CPU_CAVIUM_OCTEON */ #ifdef CONFIG_CPU_HAS_WB @@ -131,25 +140,42 @@ #endif /* !CONFIG_CPU_HAS_WB */ #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) -#define __WEAK_ORDERING_MB " sync \n" +# ifdef CONFIG_CPU_CAVIUM_OCTEON +# define smp_mb() __sync() +# define smp_rmb() barrier() +# define smp_wmb() __syncw() +# else +# define smp_mb() __asm__ __volatile__("sync" : : :"memory") +# define smp_rmb() __asm__ __volatile__("sync" : : :"memory") +# define smp_wmb() __asm__ __volatile__("sync" : : :"memory") +# endif #else -#define __WEAK_ORDERING_MB " \n" +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() #endif + #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) #define __WEAK_LLSC_MB " sync \n" #else #define __WEAK_LLSC_MB " \n" #endif -#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") -#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") -#define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") - #define set_mb(var, value) \ do { var = value; smp_mb(); } while (0) #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") -#define smp_llsc_rmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") -#define smp_llsc_wmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") + +#ifdef CONFIG_CPU_CAVIUM_OCTEON +#define smp_mb__before_llsc() smp_wmb() +/* Cause previous writes to become visible on all CPUs as soon as possible */ +#define nudge_writes() __asm__ __volatile__(".set push\n\t" \ + ".set arch=octeon\n\t" \ + "syncw\n\t" \ + ".set pop" : : : "memory") +#else +#define smp_mb__before_llsc() smp_llsc_mb() +#define nudge_writes() mb() +#endif #endif /* __ASM_BARRIER_H */ diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 84a383806b2..9255cfbee45 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -42,7 +42,7 @@ /* * clear_bit() doesn't provide any barrier for the compiler. */ -#define smp_mb__before_clear_bit() smp_llsc_mb() +#define smp_mb__before_clear_bit() smp_mb__before_llsc() #define smp_mb__after_clear_bit() smp_llsc_mb() /* @@ -258,7 +258,7 @@ static inline int test_and_set_bit(unsigned long nr, unsigned short bit = nr & SZLONG_MASK; unsigned long res; - smp_llsc_mb(); + smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -395,7 +395,7 @@ static inline int test_and_clear_bit(unsigned long nr, unsigned short bit = nr & SZLONG_MASK; unsigned long res; - smp_llsc_mb(); + smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -485,7 +485,7 @@ static inline int test_and_change_bit(unsigned long nr, unsigned short bit = nr & SZLONG_MASK; unsigned long res; - smp_llsc_mb(); + smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 815a438a268..ed9aaaaf074 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -72,14 +72,14 @@ */ extern void __cmpxchg_called_with_bad_pointer(void); -#define __cmpxchg(ptr, old, new, barrier) \ +#define __cmpxchg(ptr, old, new, pre_barrier, post_barrier) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(*(ptr)) __old = (old); \ __typeof__(*(ptr)) __new = (new); \ __typeof__(*(ptr)) __res = 0; \ \ - barrier; \ + pre_barrier; \ \ switch (sizeof(*(__ptr))) { \ case 4: \ @@ -96,13 +96,13 @@ extern void __cmpxchg_called_with_bad_pointer(void); break; \ } \ \ - barrier; \ + post_barrier; \ \ __res; \ }) -#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_llsc_mb()) -#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, ) +#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb()) +#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, , ) #define cmpxchg64(ptr, o, n) \ ({ \ diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index f58aed354bf..613f6912dfc 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -8,7 +8,8 @@ #include <asm/page.h> #include <asm/ptrace.h> -#define COMPAT_USER_HZ 100 +#define COMPAT_USER_HZ 100 +#define COMPAT_UTS_MACHINE "mips\0\0\0" typedef u32 compat_size_t; typedef s32 compat_ssize_t; diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 1f4df647c38..ac73cede3a0 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -95,6 +95,9 @@ #ifndef cpu_has_smartmips #define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS) #endif +#ifndef kernel_uses_smartmips_rixi +#define kernel_uses_smartmips_rixi 0 +#endif #ifndef cpu_has_vtag_icache #define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG) #endif @@ -191,6 +194,9 @@ # ifndef cpu_has_64bit_addresses # define cpu_has_64bit_addresses 0 # endif +# ifndef cpu_vmbits +# define cpu_vmbits 31 +# endif #endif #ifdef CONFIG_64BIT @@ -209,6 +215,10 @@ # ifndef cpu_has_64bit_addresses # define cpu_has_64bit_addresses 1 # endif +# ifndef cpu_vmbits +# define cpu_vmbits cpu_data[0].vmbits +# define __NEED_VMBITS_PROBE +# endif #endif #if defined(CONFIG_CPU_MIPSR2_IRQ_VI) && !defined(cpu_has_vint) diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index 126044308de..b39def3f6e0 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -58,6 +58,9 @@ struct cpuinfo_mips { struct cache_desc tcache; /* Tertiary/split secondary cache */ int srsets; /* Shadow register sets */ int core; /* physical core number */ +#ifdef CONFIG_64BIT + int vmbits; /* Virtual memory size in bits */ +#endif #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) /* * In the MIPS MT "SMTC" model, each TC is considered diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index cf373a95fe4..a5acda41694 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -224,7 +224,7 @@ enum cpu_type_enum { * MIPS64 class processors */ CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, - CPU_CAVIUM_OCTEON, + CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_LAST }; diff --git a/arch/mips/include/asm/current.h b/arch/mips/include/asm/current.h index 559db66b979..4c51401b553 100644 --- a/arch/mips/include/asm/current.h +++ b/arch/mips/include/asm/current.h @@ -1,23 +1 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1998, 2002 Ralf Baechle - * Copyright (C) 1999 Silicon Graphics, Inc. - */ -#ifndef _ASM_CURRENT_H -#define _ASM_CURRENT_H - -#include <linux/thread_info.h> - -struct task_struct; - -static inline struct task_struct * get_current(void) -{ - return current_thread_info()->task; -} - -#define current get_current() - -#endif /* _ASM_CURRENT_H */ +#include <asm-generic/current.h> diff --git a/arch/mips/include/asm/dec/kn01.h b/arch/mips/include/asm/dec/kn01.h index 28fa717ac42..88d9ffd7425 100644 --- a/arch/mips/include/asm/dec/kn01.h +++ b/arch/mips/include/asm/dec/kn01.h @@ -80,7 +80,6 @@ struct pt_regs; extern u16 cached_kn01_csr; -extern spinlock_t kn01_lock; extern void dec_kn01_be_init(void); extern int dec_kn01_be_handler(struct pt_regs *regs, int is_fixup); diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h index d8f9872b0e2..06746c5e809 100644 --- a/arch/mips/include/asm/device.h +++ b/arch/mips/include/asm/device.h @@ -4,4 +4,3 @@ * This file is released under the GPLv2 */ #include <asm-generic/device.h> - diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index 7a6a35dbe52..e53d7bed5cd 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -334,14 +334,14 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); #define ELF_HWCAP (0) -/* This yields a string that ld.so will use to load implementation - specific libraries for optimization. This is more specific in - intent than poking at uname or /proc/cpuinfo. - - For the moment, we have only optimizations for the Intel generations, - but that could change... */ +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + */ -#define ELF_PLATFORM (NULL) +#define ELF_PLATFORM __elf_platform +extern const char *__elf_platform; /* * See comments in asm-alpha/elf.h, this is the same thing diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h index 3986cd8704f..ce35c9af0c2 100644 --- a/arch/mips/include/asm/ftrace.h +++ b/arch/mips/include/asm/ftrace.h @@ -4,7 +4,7 @@ * more details. * * Copyright (C) 2009 DSLab, Lanzhou University, China - * Author: Wu Zhangjin <wuzj@lemote.com> + * Author: Wu Zhangjin <wuzhangjin@gmail.com> */ #ifndef _ASM_MIPS_FTRACE_H diff --git a/arch/mips/include/asm/i8259.h b/arch/mips/include/asm/i8259.h index 8572a2d9048..c7e278447c0 100644 --- a/arch/mips/include/asm/i8259.h +++ b/arch/mips/include/asm/i8259.h @@ -35,7 +35,7 @@ #define SLAVE_ICW4_DEFAULT 0x01 #define PIC_ICW4_AEOI 2 -extern spinlock_t i8259A_lock; +extern raw_spinlock_t i8259A_lock; extern int i8259A_irq_pending(unsigned int irq); extern void make_8259A_irq(unsigned int irq); @@ -51,7 +51,7 @@ static inline int i8259_irq(void) { int irq; - spin_lock(&i8259A_lock); + raw_spin_lock(&i8259A_lock); /* Perform an interrupt acknowledge cycle on controller 1. */ outb(0x0C, PIC_MASTER_CMD); /* prepare for poll */ @@ -78,7 +78,7 @@ static inline int i8259_irq(void) irq = -1; } - spin_unlock(&i8259A_lock); + raw_spin_unlock(&i8259A_lock); return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq; } diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 436878e4e06..c98bf514ec7 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -447,6 +447,24 @@ __BUILDIO(q, u64) #define readl_relaxed readl #define readq_relaxed readq +#define readb_be(addr) \ + __raw_readb((__force unsigned *)(addr)) +#define readw_be(addr) \ + be16_to_cpu(__raw_readw((__force unsigned *)(addr))) +#define readl_be(addr) \ + be32_to_cpu(__raw_readl((__force unsigned *)(addr))) +#define readq_be(addr) \ + be64_to_cpu(__raw_readq((__force unsigned *)(addr))) + +#define writeb_be(val, addr) \ + __raw_writeb((val), (__force unsigned *)(addr)) +#define writew_be(val, addr) \ + __raw_writew(cpu_to_be16((val)), (__ |
