diff options
Diffstat (limited to 'arch/sh/include/asm')
| -rw-r--r-- | arch/sh/include/asm/Kbuild | 7 | ||||
| -rw-r--r-- | arch/sh/include/asm/atomic.h | 6 | ||||
| -rw-r--r-- | arch/sh/include/asm/barrier.h | 21 | ||||
| -rw-r--r-- | arch/sh/include/asm/bitops.h | 7 | ||||
| -rw-r--r-- | arch/sh/include/asm/clkdev.h | 2 | ||||
| -rw-r--r-- | arch/sh/include/asm/fixmap.h | 39 | ||||
| -rw-r--r-- | arch/sh/include/asm/fpu.h | 2 | ||||
| -rw-r--r-- | arch/sh/include/asm/ftrace.h | 10 | ||||
| -rw-r--r-- | arch/sh/include/asm/io.h | 4 | ||||
| -rw-r--r-- | arch/sh/include/asm/io_trapped.h | 2 | ||||
| -rw-r--r-- | arch/sh/include/asm/machvec.h | 2 | ||||
| -rw-r--r-- | arch/sh/include/asm/mmu_context.h | 2 | ||||
| -rw-r--r-- | arch/sh/include/asm/pci.h | 5 | ||||
| -rw-r--r-- | arch/sh/include/asm/pgalloc.h | 5 | ||||
| -rw-r--r-- | arch/sh/include/asm/processor_32.h | 10 | ||||
| -rw-r--r-- | arch/sh/include/asm/processor_64.h | 10 | ||||
| -rw-r--r-- | arch/sh/include/asm/syscalls_32.h | 12 | ||||
| -rw-r--r-- | arch/sh/include/asm/thread_info.h | 2 | ||||
| -rw-r--r-- | arch/sh/include/asm/tlb.h | 8 | ||||
| -rw-r--r-- | arch/sh/include/asm/traps_32.h | 16 | ||||
| -rw-r--r-- | arch/sh/include/asm/unistd.h | 1 | 
21 files changed, 60 insertions, 113 deletions
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 280bea9e5e2..c19e47dacb3 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -8,18 +8,21 @@ generic-y += emergency-restart.h  generic-y += errno.h  generic-y += exec.h  generic-y += fcntl.h +generic-y += hash.h  generic-y += ioctl.h  generic-y += ipcbuf.h  generic-y += irq_regs.h  generic-y += kvm_para.h  generic-y += local.h  generic-y += local64.h +generic-y += mcs_spinlock.h +generic-y += mman.h +generic-y += msgbuf.h  generic-y += param.h  generic-y += parport.h  generic-y += percpu.h  generic-y += poll.h -generic-y += mman.h -generic-y += msgbuf.h +generic-y += preempt.h  generic-y += resource.h  generic-y += scatterlist.h  generic-y += sembuf.h diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index f4c1c20bcdf..f57b8a6743b 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -10,6 +10,7 @@  #include <linux/compiler.h>  #include <linux/types.h>  #include <asm/cmpxchg.h> +#include <asm/barrier.h>  #define ATOMIC_INIT(i)	{ (i) } @@ -62,9 +63,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)  	return c;  } -#define smp_mb__before_atomic_dec()	smp_mb() -#define smp_mb__after_atomic_dec()	smp_mb() -#define smp_mb__before_atomic_inc()	smp_mb() -#define smp_mb__after_atomic_inc()	smp_mb() -  #endif /* __ASM_SH_ATOMIC_H */ diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index 72c103dae30..43715308b06 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h @@ -26,29 +26,14 @@  #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)  #define mb()		__asm__ __volatile__ ("synco": : :"memory")  #define rmb()		mb() -#define wmb()		__asm__ __volatile__ ("synco": : :"memory") +#define wmb()		mb()  #define ctrl_barrier()	__icbi(PAGE_OFFSET) -#define read_barrier_depends()	do { } while(0)  #else -#define mb()		__asm__ __volatile__ ("": : :"memory") -#define rmb()		mb() -#define wmb()		__asm__ __volatile__ ("": : :"memory")  #define ctrl_barrier()	__asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") -#define read_barrier_depends()	do { } while(0) -#endif - -#ifdef CONFIG_SMP -#define smp_mb()	mb() -#define smp_rmb()	rmb() -#define smp_wmb()	wmb() -#define smp_read_barrier_depends()	read_barrier_depends() -#else -#define smp_mb()	barrier() -#define smp_rmb()	barrier() -#define smp_wmb()	barrier() -#define smp_read_barrier_depends()	do { } while(0)  #endif  #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) +#include <asm-generic/barrier.h> +  #endif /* __ASM_SH_BARRIER_H */ diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index ea8706d94f0..fc8e652cf17 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -9,6 +9,7 @@  /* For __swab32 */  #include <asm/byteorder.h> +#include <asm/barrier.h>  #ifdef CONFIG_GUSA_RB  #include <asm/bitops-grb.h> @@ -22,12 +23,6 @@  #include <asm-generic/bitops/non-atomic.h>  #endif -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit()	smp_mb() -#define smp_mb__after_clear_bit()	smp_mb() -  #ifdef CONFIG_SUPERH32  static inline unsigned long ffz(unsigned long word)  { diff --git a/arch/sh/include/asm/clkdev.h b/arch/sh/include/asm/clkdev.h index 6ba91868201..c41901465fb 100644 --- a/arch/sh/include/asm/clkdev.h +++ b/arch/sh/include/asm/clkdev.h @@ -25,7 +25,9 @@ static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)  		return kzalloc(size, GFP_KERNEL);  } +#ifndef CONFIG_COMMON_CLK  #define __clk_put(clk)  #define __clk_get(clk) ({ 1; }) +#endif  #endif /* __CLKDEV_H__ */ diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index cbe0186b679..4daf91c3b72 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h @@ -79,13 +79,6 @@ extern void __set_fixmap(enum fixed_addresses idx,  			 unsigned long phys, pgprot_t flags);  extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags); -#define set_fixmap(idx, phys) \ -		__set_fixmap(idx, phys, PAGE_KERNEL) -/* - * Some hardware wants to get fixmapped without caching. - */ -#define set_fixmap_nocache(idx, phys) \ -		__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)  /*   * used by vmalloc.c.   * @@ -101,36 +94,8 @@ extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags);  #define FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)  #define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE) -#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) - -extern void __this_fixmap_does_not_exist(void); - -/* - * 'index to address' translation. If anyone tries to use the idx - * directly without tranlation, we catch the bug with a NULL-deference - * kernel oops. Illegal ranges of incoming indices are caught too. - */ -static inline unsigned long fix_to_virt(const unsigned int idx) -{ -	/* -	 * this branch gets completely eliminated after inlining, -	 * except when someone tries to use fixaddr indices in an -	 * illegal way. (such as mixing up address types or using -	 * out-of-range indices). -	 * -	 * If it doesn't get removed, the linker will complain -	 * loudly with a reasonably clear error message.. -	 */ -	if (idx >= __end_of_fixed_addresses) -		__this_fixmap_does_not_exist(); +#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE -        return __fix_to_virt(idx); -} +#include <asm-generic/fixmap.h> -static inline unsigned long virt_to_fix(const unsigned long vaddr) -{ -	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); -	return __virt_to_fix(vaddr); -}  #endif diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 06c4281aab6..09fc2bc8a79 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -46,7 +46,7 @@ static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)  		save_fpu(tsk);  		release_fpu(regs);  	} else -		tsk->fpu_counter = 0; +		tsk->thread.fpu_counter = 0;  }  static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 13e9966464c..e79fb6ebaa4 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -40,15 +40,7 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)  /* arch/sh/kernel/return_address.c */  extern void *return_address(unsigned int); -#define HAVE_ARCH_CALLER_ADDR - -#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -#define CALLER_ADDR1 ((unsigned long)return_address(1)) -#define CALLER_ADDR2 ((unsigned long)return_address(2)) -#define CALLER_ADDR3 ((unsigned long)return_address(3)) -#define CALLER_ADDR4 ((unsigned long)return_address(4)) -#define CALLER_ADDR5 ((unsigned long)return_address(5)) -#define CALLER_ADDR6 ((unsigned long)return_address(6)) +#define ftrace_return_address(n) return_address(n)  #endif /* __ASSEMBLY__ */ diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 629db2ad791..728c4c571f4 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -122,7 +122,7 @@ __BUILD_MEMORY_STRING(__raw_, l, u32)  __BUILD_MEMORY_STRING(__raw_, q, u64) -#ifdef CONFIG_HAS_IOPORT +#ifdef CONFIG_HAS_IOPORT_MAP  /*   * Slowdown I/O port space accesses for antique hardware. @@ -218,7 +218,7 @@ __BUILD_IOPORT_STRING(w, u16)  __BUILD_IOPORT_STRING(l, u32)  __BUILD_IOPORT_STRING(q, u64) -#else /* !CONFIG_HAS_IOPORT */ +#else /* !CONFIG_HAS_IOPORT_MAP */  #include <asm/io_noioport.h> diff --git a/arch/sh/include/asm/io_trapped.h b/arch/sh/include/asm/io_trapped.h index f1251d4f0ba..4ab94ef5107 100644 --- a/arch/sh/include/asm/io_trapped.h +++ b/arch/sh/include/asm/io_trapped.h @@ -36,7 +36,7 @@ __ioremap_trapped(unsigned long offset, unsigned long size)  #define __ioremap_trapped(offset, size) NULL  #endif -#ifdef CONFIG_HAS_IOPORT +#ifdef CONFIG_HAS_IOPORT_MAP  extern struct list_head trapped_io;  static inline void __iomem * diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index eb9c20d971d..d3324e4f372 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -21,7 +21,7 @@ struct sh_machine_vector {  	int (*mv_irq_demux)(int irq);  	void (*mv_init_irq)(void); -#ifdef CONFIG_HAS_IOPORT +#ifdef CONFIG_HAS_IOPORT_MAP  	void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size);  	void (*mv_ioport_unmap)(void __iomem *);  #endif diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 21c5088788d..b9d9489a501 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -81,7 +81,7 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)  		/*  		 * Fix version; Note that we avoid version #0 -		 * to distingush NO_CONTEXT. +		 * to distinguish NO_CONTEXT.  		 */  		if (!asid)  			asid = MMU_CONTEXT_FIRST_VERSION; diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index bff96c2e7d2..5b451155299 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -70,11 +70,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,  	enum pci_mmap_state mmap_state, int write_combine);  extern void pcibios_set_master(struct pci_dev *dev); -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ -	/* We don't do dynamic PCI IRQ allocation */ -} -  /* Dynamic DMA mapping stuff.   * SuperH has everything mapped statically like x86.   */ diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 8c00785c60d..a33673b3687 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -47,7 +47,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,  	if (!pg)  		return NULL;  	page = virt_to_page(pg); -	pgtable_page_ctor(page); +	if (!pgtable_page_ctor(page)) { +		quicklist_free(QUICK_PT, NULL, pg); +		return NULL; +	}  	return page;  } diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index e699a12cdcc..18e0377f72b 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -111,6 +111,16 @@ struct thread_struct {  	/* Extended processor state */  	union thread_xstate *xstate; + +	/* +	 * fpu_counter contains the number of consecutive context switches +	 * that the FPU is used. If this is over a threshold, the lazy fpu +	 * saving becomes unlazy to save the trap. This is an unsigned char +	 * so that after 256 times the counter wraps and the behavior turns +	 * lazy again; this to deal with bursty apps that only use FPU for +	 * a short time +	 */ +	unsigned char fpu_counter;  };  #define INIT_THREAD  {						\ diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 1cc7d319714..eedd4f625d0 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -126,6 +126,16 @@ struct thread_struct {  	/* floating point info */  	union thread_xstate *xstate; + +	/* +	 * fpu_counter contains the number of consecutive context switches +	 * that the FPU is used. If this is over a threshold, the lazy fpu +	 * saving becomes unlazy to save the trap. This is an unsigned char +	 * so that after 256 times the counter wraps and the behavior turns +	 * lazy again; this to deal with bursty apps that only use FPU for +	 * a short time +	 */ +	unsigned char fpu_counter;  };  #define INIT_MMAP \ diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h index 4f97df87d7d..4f643aa718e 100644 --- a/arch/sh/include/asm/syscalls_32.h +++ b/arch/sh/include/asm/syscalls_32.h @@ -9,15 +9,9 @@  struct pt_regs; -asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, -			     unsigned long r6, unsigned long r7, -			     struct pt_regs __regs); -asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, -				unsigned long r6, unsigned long r7, -				struct pt_regs __regs); -asmlinkage int sys_sh_pipe(unsigned long r4, unsigned long r5, -			   unsigned long r6, unsigned long r7, -			   struct pt_regs __regs); +asmlinkage int sys_sigreturn(void); +asmlinkage int sys_rt_sigreturn(void); +asmlinkage int sys_sh_pipe(void);  asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,  				     size_t count, long dummy, loff_t pos);  asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf, diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 45a93669289..ad27ffa65e2 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -41,8 +41,6 @@ struct thread_info {  #endif -#define PREEMPT_ACTIVE		0x10000000 -  #if defined(CONFIG_4KSTACKS)  #define THREAD_SHIFT	12  #else diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 362192ed12f..62f80d2a9df 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -86,6 +86,14 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)  	}  } +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) +{ +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ +} +  static inline void tlb_flush_mmu(struct mmu_gather *tlb)  {  } diff --git a/arch/sh/include/asm/traps_32.h b/arch/sh/include/asm/traps_32.h index cfd55ff9dff..17e129fe459 100644 --- a/arch/sh/include/asm/traps_32.h +++ b/arch/sh/include/asm/traps_32.h @@ -42,18 +42,10 @@ static inline void trigger_address_error(void)  asmlinkage void do_address_error(struct pt_regs *regs,  				 unsigned long writeaccess,  				 unsigned long address); -asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, -				unsigned long r6, unsigned long r7, -				struct pt_regs __regs); -asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, -				unsigned long r6, unsigned long r7, -				struct pt_regs __regs); -asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, -				unsigned long r6, unsigned long r7, -				struct pt_regs __regs); -asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, -				   unsigned long r6, unsigned long r7, -				   struct pt_regs __regs); +asmlinkage void do_divide_error(unsigned long r4); +asmlinkage void do_reserved_inst(void); +asmlinkage void do_illegal_slot_inst(void); +asmlinkage void do_exception_error(void);  #define BUILD_TRAP_HANDLER(name)					\  asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5,	\ diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h index e77816c4b9b..126fe8340b2 100644 --- a/arch/sh/include/asm/unistd.h +++ b/arch/sh/include/asm/unistd.h @@ -11,7 +11,6 @@  # define __ARCH_WANT_SYS_GETHOSTNAME  # define __ARCH_WANT_SYS_IPC  # define __ARCH_WANT_SYS_PAUSE -# define __ARCH_WANT_SYS_SGETMASK  # define __ARCH_WANT_SYS_SIGNAL  # define __ARCH_WANT_SYS_TIME  # define __ARCH_WANT_SYS_UTIME  | 
