diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-01-18 10:34:51 +1100 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-01-18 10:34:51 +1100 |
commit | 9cdf083f981b8d37b3212400a359368661385099 (patch) | |
tree | aa15a6a08ad87e650dea40fb59b3180bef0d345b /include/asm-sh | |
parent | e499e01d234a31d59679b7b1e1cf628d917ba49a (diff) | |
parent | a8b3485287731978899ced11f24628c927890e78 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'include/asm-sh')
52 files changed, 1191 insertions, 2239 deletions
diff --git a/include/asm-sh/atomic-irq.h b/include/asm-sh/atomic-irq.h new file mode 100644 index 00000000000..74f7943cff6 --- /dev/null +++ b/include/asm-sh/atomic-irq.h @@ -0,0 +1,71 @@ +#ifndef __ASM_SH_ATOMIC_IRQ_H +#define __ASM_SH_ATOMIC_IRQ_H + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v += i; + local_irq_restore(flags); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v -= i; + local_irq_restore(flags); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long temp, flags; + + local_irq_save(flags); + temp = *(long *)v; + temp += i; + *(long *)v = temp; + local_irq_restore(flags); + + return temp; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long temp, flags; + + local_irq_save(flags); + temp = *(long *)v; + temp -= i; + *(long *)v = temp; + local_irq_restore(flags); + + return temp; +} + +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v &= ~mask; + local_irq_restore(flags); +} + +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v |= mask; + local_irq_restore(flags); +} + +#endif /* __ASM_SH_ATOMIC_IRQ_H */ diff --git a/include/asm-sh/atomic-llsc.h b/include/asm-sh/atomic-llsc.h new file mode 100644 index 00000000000..4b00b78e3f4 --- /dev/null +++ b/include/asm-sh/atomic-llsc.h @@ -0,0 +1,107 @@ +#ifndef __ASM_SH_ATOMIC_LLSC_H +#define __ASM_SH_ATOMIC_LLSC_H + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_add \n" +" add %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (i), "r" (&v->counter) + : "t"); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_sub \n" +" sub %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (i), "r" (&v->counter) + : "t"); +} + +/* + * SH-4A note: + * + * We basically get atomic_xxx_return() for free compared with + * atomic_xxx(). movli.l/movco.l require r0 due to the instruction + * encoding, so the retval is automatically set without having to + * do any special work. + */ +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long temp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_add_return \n" +" add %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" +" synco \n" + : "=&z" (temp) + : "r" (i), "r" (&v->counter) + : "t"); + + return temp; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long temp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_sub_return \n" +" sub %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" +" synco \n" + : "=&z" (temp) + : "r" (i), "r" (&v->counter) + : "t"); + + return temp; +} + +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_clear_mask \n" +" and %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (~mask), "r" (&v->counter) + : "t"); +} + +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_set_mask \n" +" or %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (mask), "r" (&v->counter) + : "t"); +} + +#endif /* __ASM_SH_ATOMIC_LLSC_H */ diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index 8bdc1ba56f7..e12570b9339 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h @@ -17,119 +17,14 @@ typedef struct { volatile int counter; } atomic_t; #include <linux/compiler.h> #include <asm/system.h> -/* - * To get proper branch prediction for the main line, we must branch - * forward to code at the end of this object's .text section, then - * branch back to restart the operation. - */ -static inline void atomic_add(int i, atomic_t *v) -{ #ifdef CONFIG_CPU_SH4A - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%3, %0 ! atomic_add \n" -" add %2, %0 \n" -" movco.l %0, @%3 \n" -" bf 1b \n" - : "=&z" (tmp), "=r" (&v->counter) - : "r" (i), "r" (&v->counter) - : "t"); +#include <asm/atomic-llsc.h> #else - unsigned long flags; - - local_irq_save(flags); - *(long *)v += i; - local_irq_restore(flags); -#endif -} - -static inline void atomic_sub(int i, atomic_t *v) -{ -#ifdef CONFIG_CPU_SH4A - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%3, %0 ! atomic_sub \n" -" sub %2, %0 \n" -" movco.l %0, @%3 \n" -" bf 1b \n" - : "=&z" (tmp), "=r" (&v->counter) - : "r" (i), "r" (&v->counter) - : "t"); -#else - unsigned long flags; - - local_irq_save(flags); - *(long *)v -= i; - local_irq_restore(flags); +#include <asm/atomic-irq.h> #endif -} - -/* - * SH-4A note: - * - * We basically get atomic_xxx_return() for free compared with - * atomic_xxx(). movli.l/movco.l require r0 due to the instruction - * encoding, so the retval is automatically set without having to - * do any special work. - */ -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long temp; - -#ifdef CONFIG_CPU_SH4A - __asm__ __volatile__ ( -"1: movli.l @%3, %0 ! atomic_add_return \n" -" add %2, %0 \n" -" movco.l %0, @%3 \n" -" bf 1b \n" -" synco \n" - : "=&z" (temp), "=r" (&v->counter) - : "r" (i), "r" (&v->counter) - : "t"); -#else - unsigned long flags; - - local_irq_save(flags); - temp = *(long *)v; - temp += i; - *(long *)v = temp; - local_irq_restore(flags); -#endif - - return temp; -} #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long temp; - -#ifdef CONFIG_CPU_SH4A - __asm__ __volatile__ ( -"1: movli.l @%3, %0 ! atomic_sub_return \n" -" sub %2, %0 \n" -" movco.l %0, @%3 \n" -" bf 1b \n" -" synco \n" - : "=&z" (temp), "=r" (&v->counter) - : "r" (i), "r" (&v->counter) - : "t"); -#else - unsigned long flags; - - local_irq_save(flags); - temp = *(long *)v; - temp -= i; - *(long *)v = temp; - local_irq_restore(flags); -#endif - - return temp; -} - #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) @@ -180,50 +75,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ -#ifdef CONFIG_CPU_SH4A - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%3, %0 ! atomic_clear_mask \n" -" and %2, %0 \n" -" movco.l %0, @%3 \n" -" bf 1b \n" - : "=&z" (tmp), "=r" (&v->counter) - : "r" (~mask), "r" (&v->counter) - : "t"); -#else - unsigned long flags; - - local_irq_save(flags); - *(long *)v &= ~mask; - local_irq_restore(flags); -#endif -} - -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ -#ifdef CONFIG_CPU_SH4A - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%3, %0 ! atomic_set_mask \n" -" or %2, %0 \n" -" movco.l %0, @%3 \n" -" bf 1b \n" - : "=&z" (tmp), "=r" (&v->counter) - : "r" (mask), "r" (&v->counter) - : "t"); -#else - unsigned long flags; - - local_irq_save(flags); - *(long *)v |= mask; - local_irq_restore(flags); -#endif -} - /* Atomic operations are already serializing on SH */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h index 1b4fc52a59e..2f89dd06d0c 100644 --- a/include/asm-sh/bug.h +++ b/include/asm-sh/bug.h @@ -1,19 +1,54 @@ #ifndef __ASM_SH_BUG_H #define __ASM_SH_BUG_H - #ifdef CONFIG_BUG -/* - * Tell the user there is some problem. - */ -#define BUG() do { \ - printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ - *(volatile int *)0 = 0; \ + +struct bug_frame { + unsigned short opcode; + unsigned short line; + const char *file; + const char *func; +}; + +struct pt_regs; + +extern void handle_BUG(struct pt_regs *); + +#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */ + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +#define BUG() \ +do { \ + __asm__ __volatile__ ( \ + ".align 2\n\t" \ + ".short %O0\n\t" \ + ".short %O1\n\t" \ + ".long %O2\n\t" \ + ".long %O3\n\t" \ + : \ + : "n" (TRAPA_BUG_OPCODE), \ + "i" (__LINE__), "X" (__FILE__), \ + "X" (__FUNCTION__)); \ +} while (0) + +#else + +#define BUG() \ +do { \ + __asm__ __volatile__ ( \ + ".align 2\n\t" \ + ".short %O0\n\t" \ + : \ + : "n" (TRAPA_BUG_OPCODE)); \ } while (0) +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + #define HAVE_ARCH_BUG -#endif + +#endif /* CONFIG_BUG */ #include <asm-generic/bug.h> -#endif +#endif /* __ASM_SH_BUG_H */ diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h index beeea40f549..a294997a841 100644 --- a/include/asm-sh/bugs.h +++ b/include/asm-sh/bugs.h @@ -16,25 +16,37 @@ static void __init check_bugs(void) { - extern char *get_cpu_subtype(void); extern unsigned long loops_per_jiffy; - char *p= &init_utsname()->machine[2]; /* "sh" */ + char *p = &init_utsname()->machine[2]; /* "sh" */ cpu_data->loops_per_jiffy = loops_per_jiffy; switch (cpu_data->type) { - case CPU_SH7604: + case CPU_SH7604 ... CPU_SH7619: *p++ = '2'; break; + case CPU_SH7206: + *p++ = '2'; + *p++ = 'a'; + break; case CPU_SH7705 ... CPU_SH7300: *p++ = '3'; break; case CPU_SH7750 ... CPU_SH4_501: *p++ = '4'; break; - case CPU_SH7770 ... CPU_SH7781: + case CPU_SH7770 ... CPU_SH7785: + *p++ = '4'; + *p++ = 'a'; + break; + case CPU_SH73180 ... CPU_SH7722: *p++ = '4'; *p++ = 'a'; + *p++ = 'l'; + *p++ = '-'; + *p++ = 'd'; + *p++ = 's'; + *p++ = 'p'; break; default: *p++ = '?'; diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h index 08168afe674..4bc8357e889 100644 --- a/include/asm-sh/checksum.h +++ b/include/asm-sh/checksum.h @@ -23,7 +23,7 @@ * * it's best to have buff aligned on a 32-bit boundary */ -asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); +asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); /* * the same as csum_partial, but copies from src while it @@ -33,35 +33,37 @@ asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsign * better 64-bit) boundary */ -asmlinkage unsigned int csum_partial_copy_generic(const unsigned char *src, unsigned char *dst, - int len, int sum, int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); /* * Note: when you get a NULL pointer exception here this means someone - * passed in an incorrect kernel address to one of these functions. - * - * If you use these functions directly please don't forget the + * passed in an incorrect kernel address to one of these functions. + * + * If you use these functions directly please don't forget the * access_ok(). */ -static __inline__ -unsigned int csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, - int len, int sum) +static inline +__wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum) { - return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); + return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); } -static __inline__ -unsigned int csum_partial_copy_from_user (const unsigned char *src, unsigned char *dst, - int len, int sum, int *err_ptr) +static inline +__wsum csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *err_ptr) { - return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL); + return csum_partial_copy_generic((__force const void *)src, dst, + len, sum, err_ptr, NULL); } /* * Fold a partial checksum */ -static __inline__ unsigned int csum_fold(unsigned int sum) +static inline __sum16 csum_fold(__wsum sum) { unsigned int __dummy; __asm__("swap.w %0, %1\n\t" @@ -74,7 +76,7 @@ static __inline__ unsigned int csum_fold(unsigned int sum) : "=r" (sum), "=&r" (__dummy) : "0" (sum) : "t"); - return sum; + return (__force __sum16)sum; } /* @@ -84,7 +86,7 @@ static __inline__ unsigned int csum_fold(unsigned int sum) * i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted * for linux by * Arnt Gulbrandsen. */ -static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { unsigned int sum, __dummy0, __dummy1; @@ -112,16 +114,15 @@ static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int return csum_fold(sum); } -static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr, - unsigned long daddr, - unsigned short len, - unsigned short proto, - unsigned int sum) +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum) { #ifdef __LITTLE_ENDIAN__ - unsigned long len_proto = (ntohs(len)<<16)+proto*256; + unsigned long len_proto = (proto + len) << 8; #else - unsigned long len_proto = (proto<<16)+len; + unsigned long len_proto = proto + len; #endif __asm__("clrt\n\t" "addc %0, %1\n\t" @@ -132,6 +133,7 @@ static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr, : "=r" (sum), "=r" (len_proto) : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum) : "t"); + return sum; } @@ -139,32 +141,28 @@ static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr, * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ -static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr, - unsigned long daddr, - unsigned short len, - unsigned short proto, - unsigned int sum) +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum) { - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ - -static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len) +static inline __sum16 ip_compute_csum(const void *buff, int len) { - return csum_fold (csum_partial(buff, len, 0)); + return csum_fold(csum_partial(buff, len, 0)); } #define _HAVE_ARCH_IPV6_CSUM -#ifdef CONFIG_IPV6 -static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, - struct in6_addr *daddr, - __u32 len, - unsigned short proto, - unsigned int sum) +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) { unsigned int __dummy; __asm__("clrt\n\t" @@ -189,29 +187,29 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, "movt %1\n\t" "add %1, %0\n" : "=r" (sum), "=&r" (__dummy) - : "r" (saddr), "r" (daddr), + : "r" (saddr), "r" (daddr), "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) : "t"); return csum_fold(sum); } -#endif -/* +/* * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src, - unsigned char __user *dst, - int len, int sum, - int *err_ptr) +static inline __wsum csum_and_copy_to_user(const void *src, + void __user *dst, + int len, __wsum sum, + int *err_ptr) { if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); + return csum_partial_copy_generic((__force const void *)src, + dst, len, sum, NULL, err_ptr); if (len) *err_ptr = -EFAULT; - return -1; /* invalid checksum */ + return (__force __wsum)-1; /* invalid checksum */ } #endif /* __ASM_SH_CHECKSUM_H */ diff --git a/include/asm-sh/clock.h b/include/asm-sh/clock.h index fdfb75b30f0..1df92807f8c 100644 --- a/include/asm-sh/clock.h +++ b/include/asm-sh/clock.h @@ -4,6 +4,7 @@ #include <linux/kref.h> #include <linux/list.h> #include <linux/seq_file.h> +#include <linux/clk.h> struct clk; @@ -18,7 +19,7 @@ struct clk_ops { struct clk { struct list_head node; const char *name; - + int id; struct module *owner; struct clk *parent; @@ -40,22 +41,13 @@ void arch_init_clk_ops(struct clk_ops **, int type); int clk_init(void); int __clk_enable(struct clk *); -int clk_enable(struct clk *); - void __clk_disable(struct clk *); -void clk_disable(struct clk *); -int clk_set_rate(struct clk *, unsigned long rate); -unsigned long clk_get_rate(struct clk *); void clk_recalc_rate(struct clk *); -struct clk *clk_get(const char *id); -void clk_put(struct clk *); - int clk_register(struct clk *); void clk_unregister(struct clk *); int show_clocks(struct seq_file *m); #endif /* __ASM_SH_CLOCK_H */ - diff --git a/include/asm-sh/cpu-sh2/cache.h b/include/asm-sh/cpu-sh2/cache.h index cd96402e856..20b9796842d 100644 --- a/include/asm-sh/cpu-sh2/cache.h +++ b/include/asm-sh/cpu-sh2/cache.h @@ -12,6 +12,7 @@ #define L1_CACHE_SHIFT 4 +#if defined(CONFIG_CPU_SUBTYPE_SH7604) #define CCR 0xfffffe92 /* Address of Cache Control Register */ #define CCR_CACHE_CE 0x01 /* Cache enable */ @@ -27,5 +28,26 @@ #define CCR_CACHE_ORA CCR_CACHE_TW #define CCR_CACHE_WT 0x00 /* SH-2 is _always_ write-through */ +#elif defined(CONFIG_CPU_SUBTYPE_SH7619) +#define CCR1 0xffffffec +#define CCR CCR1 + +#define CCR_CACHE_CE 0x01 /* Cache enable */ +#define CCR_CACHE_WT 0x06 /* CCR[bit1=1,bit2=1] */ + /* 0x00000000-0x7fffffff: Write-through */ + /* 0x80000000-0x9fffffff: Write-back */ + /* 0xc0000000-0xdfffffff: Write-through */ +#define CCR_CACHE_CB 0x00 /* CCR[bit1=0,bit2=0] */ + /* 0x00000000-0x7fffffff: Write-back */ + /* 0x80000000-0x9fffffff: Write-through */ + |