diff options
author | James Morris <jmorris@namei.org> | 2008-08-28 10:47:34 +1000 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2008-08-28 10:47:34 +1000 |
commit | 86d688984deefa3ae5a802880c11f2b408b5d6cf (patch) | |
tree | 7ea5e8189b0a774626d3ed7c3c87df2495a4c4a0 /arch/avr32/include/asm/system.h | |
parent | 93c06cbbf9fea5d5be1778febb7fa9ab1a74e5f5 (diff) | |
parent | 4c246edd2550304df5b766cc841584b2bb058843 (diff) |
Merge branch 'master' into next
Diffstat (limited to 'arch/avr32/include/asm/system.h')
-rw-r--r-- | arch/avr32/include/asm/system.h | 178 |
1 files changed, 178 insertions, 0 deletions
diff --git a/arch/avr32/include/asm/system.h b/arch/avr32/include/asm/system.h new file mode 100644 index 00000000000..9702c2213e1 --- /dev/null +++ b/arch/avr32/include/asm/system.h @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2004-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_AVR32_SYSTEM_H +#define __ASM_AVR32_SYSTEM_H + +#include <linux/compiler.h> +#include <linux/linkage.h> +#include <linux/types.h> + +#include <asm/ptrace.h> +#include <asm/sysreg.h> + +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + +#define nop() asm volatile("nop") + +#define mb() asm volatile("" : : : "memory") +#define rmb() mb() +#define wmb() asm volatile("sync 0" : : : "memory") +#define read_barrier_depends() do { } while(0) +#define set_mb(var, value) do { var = value; mb(); } while(0) + +/* + * Help PathFinder and other Nexus-compliant debuggers keep track of + * the current PID by emitting an Ownership Trace Message each time we + * switch task. + */ +#ifdef CONFIG_OWNERSHIP_TRACE +#include <asm/ocd.h> +#define finish_arch_switch(prev) \ + do { \ + ocd_write(PID, prev->pid); \ + ocd_write(PID, current->pid); \ + } while(0) +#endif + +/* + * switch_to(prev, next, last) should switch from task `prev' to task + * `next'. `prev' will never be the same as `next'. + * + * We just delegate everything to the __switch_to assembly function, + * which is implemented in arch/avr32/kernel/switch_to.S + * + * mb() tells GCC not to cache `current' across this call. + */ +struct cpu_context; +struct task_struct; +extern struct task_struct *__switch_to(struct task_struct *, + struct cpu_context *, + struct cpu_context *); +#define switch_to(prev, next, last) \ + do { \ + last = __switch_to(prev, &prev->thread.cpu_context + 1, \ + &next->thread.cpu_context); \ + } while (0) + +#ifdef CONFIG_SMP +# error "The AVR32 port does not support SMP" +#else +# define smp_mb() barrier() +# define smp_rmb() barrier() +# define smp_wmb() barrier() +# define smp_read_barrier_depends() do { } while(0) +#endif + +#include <linux/irqflags.h> + +extern void __xchg_called_with_bad_pointer(void); + +static inline unsigned long xchg_u32(u32 val, volatile u32 *m) +{ + u32 ret; + + asm volatile("xchg %[ret], %[m], %[val]" + : [ret] "=&r"(ret), "=m"(*m) + : "m"(*m), [m] "r"(m), [val] "r"(val) + : "memory"); + return ret; +} + +static inline unsigned long __xchg(unsigned long x, + volatile void *ptr, + int size) +{ + switch(size) { + case 4: + return xchg_u32(x, ptr); + default: + __xchg_called_with_bad_pointer(); + return x; + } +} + +static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, + unsigned long new) +{ + __u32 ret; + + asm volatile( + "1: ssrf 5\n" + " ld.w %[ret], %[m]\n" + " cp.w %[ret], %[old]\n" + " brne 2f\n" + " stcond %[m], %[new]\n" + " brne 1b\n" + "2:\n" + : [ret] "=&r"(ret), [m] "=m"(*m) + : "m"(m), [old] "ir"(old), [new] "r"(new) + : "memory", "cc"); + return ret; +} + +extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( + volatile int * m, unsigned long old, unsigned long new); +#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels + +/* This function doesn't exist, so you'll get a linker error + if something tries to do an invalid cmpxchg(). */ +extern void __cmpxchg_called_with_bad_pointer(void); + +#define __HAVE_ARCH_CMPXCHG 1 + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); + case 8: + return __cmpxchg_u64(ptr, old, new); + } + + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#define cmpxchg(ptr, old, new) \ + ((typeof(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), \ + (unsigned long)(new), \ + sizeof(*(ptr)))) + +#include <asm-generic/cmpxchg-local.h> + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 4: + return __cmpxchg_u32(ptr, old, new); + default: + return __cmpxchg_local_generic(ptr, old, new, size); + } + + return old; +} + +#define cmpxchg_local(ptr, old, new) \ + ((typeof(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(old), \ + (unsigned long)(new), \ + sizeof(*(ptr)))) + +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +struct pt_regs; +void NORET_TYPE die(const char *str, struct pt_regs *regs, long err); +void _exception(long signr, struct pt_regs *regs, int code, + unsigned long addr); + +#define arch_align_stack(x) (x) + +#endif /* __ASM_AVR32_SYSTEM_H */ |