diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-sh64 |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-sh64')
89 files changed, 6757 insertions, 0 deletions
diff --git a/include/asm-sh64/a.out.h b/include/asm-sh64/a.out.h new file mode 100644 index 00000000000..e1995e86b66 --- /dev/null +++ b/include/asm-sh64/a.out.h @@ -0,0 +1,37 @@ +#ifndef __ASM_SH64_A_OUT_H +#define __ASM_SH64_A_OUT_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/a.out.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +struct exec +{ + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned a_text; /* length of text, in bytes */ + unsigned a_data; /* length of data, in bytes */ + unsigned a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned a_syms; /* length of symbol table data in file, in bytes */ + unsigned a_entry; /* start address */ + unsigned a_trsize; /* length of relocation info for text, in bytes */ + unsigned a_drsize; /* length of relocation info for data, in bytes */ +}; + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#ifdef __KERNEL__ + +#define STACK_TOP TASK_SIZE + +#endif + +#endif /* __ASM_SH64_A_OUT_H */ diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h new file mode 100644 index 00000000000..8c3872d3e65 --- /dev/null +++ b/include/asm-sh64/atomic.h @@ -0,0 +1,126 @@ +#ifndef __ASM_SH64_ATOMIC_H +#define __ASM_SH64_ATOMIC_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/atomic.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + */ + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + * + */ + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) + +#define atomic_read(v) ((v)->counter) +#define atomic_set(v,i) ((v)->counter = (i)) + +#include <asm/system.h> + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ + +static __inline__ void atomic_add(int i, atomic_t * v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v += i; + local_irq_restore(flags); +} + +static __inline__ void atomic_sub(int i, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v -= i; + local_irq_restore(flags); +} + +static __inline__ int atomic_add_return(int i, atomic_t * v) +{ + unsigned long temp, flags; + + local_irq_save(flags); + temp = *(long *)v; + temp += i; + *(long *)v = temp; + local_irq_restore(flags); + + return temp; +} + +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) + +static __inline__ int atomic_sub_return(int i, atomic_t * v) +{ + unsigned long temp, flags; + + local_irq_save(flags); + temp = *(long *)v; + temp -= i; + *(long *)v = temp; + local_irq_restore(flags); + + return temp; +} + +#define atomic_dec_return(v) atomic_sub_return(1,(v)) +#define atomic_inc_return(v) atomic_add_return(1,(v)) + +/* + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + +#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) + +#define atomic_inc(v) atomic_add(1,(v)) +#define atomic_dec(v) atomic_sub(1,(v)) + +static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v &= ~mask; + local_irq_restore(flags); +} + +static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v |= mask; + local_irq_restore(flags); +} + +/* Atomic operations are already serializing on SH */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif /* __ASM_SH64_ATOMIC_H */ diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h new file mode 100644 index 00000000000..e1ff63e0922 --- /dev/null +++ b/include/asm-sh64/bitops.h @@ -0,0 +1,516 @@ +#ifndef __ASM_SH64_BITOPS_H +#define __ASM_SH64_BITOPS_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/bitops.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + */ + +#ifdef __KERNEL__ +#include <linux/compiler.h> +#include <asm/system.h> +/* For __swab32 */ +#include <asm/byteorder.h> + +static __inline__ void set_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + *a |= mask; + local_irq_restore(flags); +} + +static inline void __set_bit(int nr, void *addr) +{ + int mask; + unsigned int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + *a |= mask; +} + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() +static inline void clear_bit(int nr, volatile unsigned long *a) +{ + int mask; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + *a &= ~mask; + local_irq_restore(flags); +} + +static inline void __clear_bit(int nr, volatile unsigned long *a) +{ + int mask; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + *a &= ~mask; +} + +static __inline__ void change_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + *a ^= mask; + local_irq_restore(flags); +} + +static __inline__ void __change_bit(int nr, volatile void * addr) +{ + int mask; + volatile unsigned int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + *a ^= mask; +} + +static __inline__ int test_and_set_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + retval = (mask & *a) != 0; + *a |= mask; + local_irq_restore(flags); + + return retval; +} + +static __inline__ int __test_and_set_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a |= mask; + + return retval; +} + +static __inline__ int test_and_clear_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + retval = (mask & *a) != 0; + *a &= ~mask; + local_irq_restore(flags); + + return retval; +} + +static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a &= ~mask; + + return retval; +} + +static __inline__ int test_and_change_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + unsigned long flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + local_irq_save(flags); + retval = (mask & *a) != 0; + *a ^= mask; + local_irq_restore(flags); + + return retval; +} + +static __inline__ int __test_and_change_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile unsigned int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a ^= mask; + + return retval; +} + +static __inline__ int test_bit(int nr, const volatile void *addr) +{ + return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); +} + +static __inline__ unsigned long ffz(unsigned long word) +{ + unsigned long result, __d2, __d3; + + __asm__("gettr tr0, %2\n\t" + "pta $+32, tr0\n\t" + "andi %1, 1, %3\n\t" + "beq %3, r63, tr0\n\t" + "pta $+4, tr0\n" + "0:\n\t" + "shlri.l %1, 1, %1\n\t" + "addi %0, 1, %0\n\t" + "andi %1, 1, %3\n\t" + "beqi %3, 1, tr0\n" + "1:\n\t" + "ptabs %2, tr0\n\t" + : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3) + : "0" (0L), "1" (word)); + + return result; +} + +/** + * __ffs - find first bit in word + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static inline unsigned long __ffs(unsigned long word) +{ + int r = 0; + + if (!word) + return 0; + if (!(word & 0xffff)) { + word >>= 16; + r += 16; + } + if (!(word & 0xff)) { + word >>= 8; + r += 8; + } + if (!(word & 0xf)) { + word >>= 4; + r += 4; + } + if (!(word & 3)) { + word >>= 2; + r += 2; + } + if (!(word & 1)) { + word >>= 1; + r += 1; + } + return r; +} + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +static inline unsigned long find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset) +{ + unsigned int *p = ((unsigned int *) addr) + (offset >> 5); + unsigned int result = offset & ~31UL; + unsigned int tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *p++; + tmp &= ~0UL << offset; + if (size < 32) + goto found_first; + if (tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size >= 32) { + if ((tmp = *p++) != 0) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= ~0UL >> (32 - size); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first set bit, not the number of the byte + * |